Changes for kernel and Busybox
[tomato.git] / release / src / router / busybox / archival / libarchive / bz / compress.c
blobe9f1afdaf3c0b2988d0b1f014f7cb10f7b688bc2
1 /*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting) ---*/
9 /*--- compress.c ---*/
10 /*-------------------------------------------------------------*/
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
20 README file.
22 This program is released under the terms of the license contained
23 in the file LICENSE.
24 ------------------------------------------------------------------ */
26 /* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
33 /* #include "bzlib_private.h" */
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
39 /*---------------------------------------------------*/
40 static
41 void BZ2_bsInitWrite(EState* s)
43 s->bsLive = 0;
44 s->bsBuff = 0;
48 /*---------------------------------------------------*/
49 static NOINLINE
50 void bsFinishWrite(EState* s)
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
61 /*---------------------------------------------------*/
62 static
63 /* Helps only on level 5, on other levels hurts. ? */
64 #if CONFIG_BZIP2_FAST >= 5
65 ALWAYS_INLINE
66 #endif
67 void bsW(EState* s, int32_t n, uint32_t v)
69 while (s->bsLive >= 8) {
70 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 s->numZ++;
72 s->bsBuff <<= 8;
73 s->bsLive -= 8;
75 s->bsBuff |= (v << (32 - s->bsLive - n));
76 s->bsLive += n;
80 /*---------------------------------------------------*/
81 static
82 void bsPutU32(EState* s, unsigned u)
84 bsW(s, 8, (u >> 24) & 0xff);
85 bsW(s, 8, (u >> 16) & 0xff);
86 bsW(s, 8, (u >> 8) & 0xff);
87 bsW(s, 8, u & 0xff);
91 /*---------------------------------------------------*/
92 static
93 void bsPutU16(EState* s, unsigned u)
95 bsW(s, 8, (u >> 8) & 0xff);
96 bsW(s, 8, u & 0xff);
100 /*---------------------------------------------------*/
101 /*--- The back end proper ---*/
102 /*---------------------------------------------------*/
104 /*---------------------------------------------------*/
105 static
106 void makeMaps_e(EState* s)
108 int i;
109 s->nInUse = 0;
110 for (i = 0; i < 256; i++) {
111 if (s->inUse[i]) {
112 s->unseqToSeq[i] = s->nInUse;
113 s->nInUse++;
119 /*---------------------------------------------------*/
120 static NOINLINE
121 void generateMTFValues(EState* s)
123 uint8_t yy[256];
124 int32_t i, j;
125 int32_t zPend;
126 int32_t wr;
127 int32_t EOB;
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 * and
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
136 * The first thing to do is generate the MTF values,
137 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
143 * The final compressed bitstream is generated into the
144 * area starting at &((uint8_t*)s->arr2)[s->nblock]
146 * These storage aliases are set up in bzCompressInit(),
147 * except for the last one, which is arranged in
148 * compressBlock().
150 uint32_t* ptr = s->ptr;
151 uint8_t* block = s->block;
152 uint16_t* mtfv = s->mtfv;
154 makeMaps_e(s);
155 EOB = s->nInUse+1;
157 for (i = 0; i <= EOB; i++)
158 s->mtfFreq[i] = 0;
160 wr = 0;
161 zPend = 0;
162 for (i = 0; i < s->nInUse; i++)
163 yy[i] = (uint8_t) i;
165 for (i = 0; i < s->nblock; i++) {
166 uint8_t ll_i;
167 AssertD(wr <= i, "generateMTFValues(1)");
168 j = ptr[i] - 1;
169 if (j < 0)
170 j += s->nblock;
171 ll_i = s->unseqToSeq[block[j]];
172 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
174 if (yy[0] == ll_i) {
175 zPend++;
176 } else {
177 if (zPend > 0) {
178 zPend--;
179 while (1) {
180 if (zPend & 1) {
181 mtfv[wr] = BZ_RUNB; wr++;
182 s->mtfFreq[BZ_RUNB]++;
183 } else {
184 mtfv[wr] = BZ_RUNA; wr++;
185 s->mtfFreq[BZ_RUNA]++;
187 if (zPend < 2) break;
188 zPend = (uint32_t)(zPend - 2) / 2;
189 /* bbox: unsigned div is easier */
191 zPend = 0;
194 register uint8_t rtmp;
195 register uint8_t* ryy_j;
196 register uint8_t rll_i;
197 rtmp = yy[1];
198 yy[1] = yy[0];
199 ryy_j = &(yy[1]);
200 rll_i = ll_i;
201 while (rll_i != rtmp) {
202 register uint8_t rtmp2;
203 ryy_j++;
204 rtmp2 = rtmp;
205 rtmp = *ryy_j;
206 *ryy_j = rtmp2;
208 yy[0] = rtmp;
209 j = ryy_j - &(yy[0]);
210 mtfv[wr] = j+1;
211 wr++;
212 s->mtfFreq[j+1]++;
217 if (zPend > 0) {
218 zPend--;
219 while (1) {
220 if (zPend & 1) {
221 mtfv[wr] = BZ_RUNB;
222 wr++;
223 s->mtfFreq[BZ_RUNB]++;
224 } else {
225 mtfv[wr] = BZ_RUNA;
226 wr++;
227 s->mtfFreq[BZ_RUNA]++;
229 if (zPend < 2)
230 break;
231 zPend = (uint32_t)(zPend - 2) / 2;
232 /* bbox: unsigned div is easier */
234 zPend = 0;
237 mtfv[wr] = EOB;
238 wr++;
239 s->mtfFreq[EOB]++;
241 s->nMTF = wr;
245 /*---------------------------------------------------*/
246 #define BZ_LESSER_ICOST 0
247 #define BZ_GREATER_ICOST 15
249 static NOINLINE
250 void sendMTFValues(EState* s)
252 int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
253 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
254 int32_t nGroups;
257 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 * is a global since the decoder also needs it.
260 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
261 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
262 * are also globals only used in this proc.
263 * Made global to keep stack frame size small.
265 #define code sendMTFValues__code
266 #define rfreq sendMTFValues__rfreq
267 #define len_pack sendMTFValues__len_pack
269 uint16_t cost[BZ_N_GROUPS];
270 int32_t fave[BZ_N_GROUPS];
272 uint16_t* mtfv = s->mtfv;
274 alphaSize = s->nInUse + 2;
275 for (t = 0; t < BZ_N_GROUPS; t++)
276 for (v = 0; v < alphaSize; v++)
277 s->len[t][v] = BZ_GREATER_ICOST;
279 /*--- Decide how many coding tables to use ---*/
280 AssertH(s->nMTF > 0, 3001);
281 if (s->nMTF < 200) nGroups = 2; else
282 if (s->nMTF < 600) nGroups = 3; else
283 if (s->nMTF < 1200) nGroups = 4; else
284 if (s->nMTF < 2400) nGroups = 5; else
285 nGroups = 6;
287 /*--- Generate an initial set of coding tables ---*/
289 int32_t nPart, remF, tFreq, aFreq;
291 nPart = nGroups;
292 remF = s->nMTF;
293 gs = 0;
294 while (nPart > 0) {
295 tFreq = remF / nPart;
296 ge = gs - 1;
297 aFreq = 0;
298 while (aFreq < tFreq && ge < alphaSize-1) {
299 ge++;
300 aFreq += s->mtfFreq[ge];
303 if (ge > gs
304 && nPart != nGroups && nPart != 1
305 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
307 aFreq -= s->mtfFreq[ge];
308 ge--;
311 for (v = 0; v < alphaSize; v++)
312 if (v >= gs && v <= ge)
313 s->len[nPart-1][v] = BZ_LESSER_ICOST;
314 else
315 s->len[nPart-1][v] = BZ_GREATER_ICOST;
317 nPart--;
318 gs = ge + 1;
319 remF -= aFreq;
324 * Iterate up to BZ_N_ITERS times to improve the tables.
326 for (iter = 0; iter < BZ_N_ITERS; iter++) {
327 for (t = 0; t < nGroups; t++)
328 fave[t] = 0;
330 for (t = 0; t < nGroups; t++)
331 for (v = 0; v < alphaSize; v++)
332 s->rfreq[t][v] = 0;
334 #if CONFIG_BZIP2_FAST >= 5
336 * Set up an auxiliary length table which is used to fast-track
337 * the common case (nGroups == 6).
339 if (nGroups == 6) {
340 for (v = 0; v < alphaSize; v++) {
341 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
342 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
343 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
346 #endif
347 nSelectors = 0;
348 totc = 0;
349 gs = 0;
350 while (1) {
351 /*--- Set group start & end marks. --*/
352 if (gs >= s->nMTF)
353 break;
354 ge = gs + BZ_G_SIZE - 1;
355 if (ge >= s->nMTF)
356 ge = s->nMTF-1;
359 * Calculate the cost of this group as coded
360 * by each of the coding tables.
362 for (t = 0; t < nGroups; t++)
363 cost[t] = 0;
364 #if CONFIG_BZIP2_FAST >= 5
365 if (nGroups == 6 && 50 == ge-gs+1) {
366 /*--- fast track the common case ---*/
367 register uint32_t cost01, cost23, cost45;
368 register uint16_t icv;
369 cost01 = cost23 = cost45 = 0;
370 #define BZ_ITER(nn) \
371 icv = mtfv[gs+(nn)]; \
372 cost01 += s->len_pack[icv][0]; \
373 cost23 += s->len_pack[icv][1]; \
374 cost45 += s->len_pack[icv][2];
375 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
376 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
377 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
378 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
379 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
380 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
381 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
382 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
383 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
384 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
385 #undef BZ_ITER
386 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
387 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
388 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
390 } else
391 #endif
393 /*--- slow version which correctly handles all situations ---*/
394 for (i = gs; i <= ge; i++) {
395 uint16_t icv = mtfv[i];
396 for (t = 0; t < nGroups; t++)
397 cost[t] += s->len[t][icv];
401 * Find the coding table which is best for this group,
402 * and record its identity in the selector table.
404 /*bc = 999999999;*/
405 /*bt = -1;*/
406 bc = cost[0];
407 bt = 0;
408 for (t = 1 /*0*/; t < nGroups; t++) {
409 if (cost[t] < bc) {
410 bc = cost[t];
411 bt = t;
414 totc += bc;
415 fave[bt]++;
416 s->selector[nSelectors] = bt;
417 nSelectors++;
420 * Increment the symbol frequencies for the selected table.
422 /* 1% faster compress. +800 bytes */
423 #if CONFIG_BZIP2_FAST >= 4
424 if (nGroups == 6 && 50 == ge-gs+1) {
425 /*--- fast track the common case ---*/
426 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
427 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
428 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
429 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
430 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
431 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
432 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
433 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
434 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
435 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
436 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
437 #undef BZ_ITUR
438 gs = ge + 1;
439 } else
440 #endif
442 /*--- slow version which correctly handles all situations ---*/
443 while (gs <= ge) {
444 s->rfreq[bt][mtfv[gs]]++;
445 gs++;
447 /* already is: gs = ge + 1; */
452 * Recompute the tables based on the accumulated frequencies.
454 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
455 * comment in huffman.c for details. */
456 for (t = 0; t < nGroups; t++)
457 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
460 AssertH(nGroups < 8, 3002);
461 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
463 /*--- Compute MTF values for the selectors. ---*/
465 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
467 for (i = 0; i < nGroups; i++)
468 pos[i] = i;
469 for (i = 0; i < nSelectors; i++) {
470 ll_i = s->selector[i];
471 j = 0;
472 tmp = pos[j];
473 while (ll_i != tmp) {
474 j++;
475 tmp2 = tmp;
476 tmp = pos[j];
477 pos[j] = tmp2;
479 pos[0] = tmp;
480 s->selectorMtf[i] = j;
484 /*--- Assign actual codes for the tables. --*/
485 for (t = 0; t < nGroups; t++) {
486 minLen = 32;
487 maxLen = 0;
488 for (i = 0; i < alphaSize; i++) {
489 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
490 if (s->len[t][i] < minLen) minLen = s->len[t][i];
492 AssertH(!(maxLen > 17 /*20*/), 3004);
493 AssertH(!(minLen < 1), 3005);
494 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
497 /*--- Transmit the mapping table. ---*/
499 /* bbox: optimized a bit more than in bzip2 */
500 int inUse16 = 0;
501 for (i = 0; i < 16; i++) {
502 if (sizeof(long) <= 4) {
503 inUse16 = inUse16*2 +
504 ((*(uint32_t*)&(s->inUse[i * 16 + 0])
505 | *(uint32_t*)&(s->inUse[i * 16 + 4])
506 | *(uint32_t*)&(s->inUse[i * 16 + 8])
507 | *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
508 } else { /* Our CPU can do better */
509 inUse16 = inUse16*2 +
510 ((*(uint64_t*)&(s->inUse[i * 16 + 0])
511 | *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
515 bsW(s, 16, inUse16);
517 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
518 for (i = 0; i < 16; i++) {
519 if (inUse16 < 0) {
520 unsigned v16 = 0;
521 for (j = 0; j < 16; j++)
522 v16 = v16*2 + s->inUse[i * 16 + j];
523 bsW(s, 16, v16);
525 inUse16 <<= 1;
529 /*--- Now the selectors. ---*/
530 bsW(s, 3, nGroups);
531 bsW(s, 15, nSelectors);
532 for (i = 0; i < nSelectors; i++) {
533 for (j = 0; j < s->selectorMtf[i]; j++)
534 bsW(s, 1, 1);
535 bsW(s, 1, 0);
538 /*--- Now the coding tables. ---*/
539 for (t = 0; t < nGroups; t++) {
540 int32_t curr = s->len[t][0];
541 bsW(s, 5, curr);
542 for (i = 0; i < alphaSize; i++) {
543 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
544 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
545 bsW(s, 1, 0);
549 /*--- And finally, the block data proper ---*/
550 selCtr = 0;
551 gs = 0;
552 while (1) {
553 if (gs >= s->nMTF)
554 break;
555 ge = gs + BZ_G_SIZE - 1;
556 if (ge >= s->nMTF)
557 ge = s->nMTF-1;
558 AssertH(s->selector[selCtr] < nGroups, 3006);
560 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
561 #if 0
562 if (nGroups == 6 && 50 == ge-gs+1) {
563 /*--- fast track the common case ---*/
564 uint16_t mtfv_i;
565 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
566 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
567 #define BZ_ITAH(nn) \
568 mtfv_i = mtfv[gs+(nn)]; \
569 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
570 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
571 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
572 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
573 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
574 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
575 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
576 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
577 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
578 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
579 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
580 #undef BZ_ITAH
581 gs = ge+1;
582 } else
583 #endif
585 /*--- slow version which correctly handles all situations ---*/
586 /* code is bit bigger, but moves multiply out of the loop */
587 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
588 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
589 while (gs <= ge) {
590 bsW(s,
591 s_len_sel_selCtr[mtfv[gs]],
592 s_code_sel_selCtr[mtfv[gs]]
594 gs++;
596 /* already is: gs = ge+1; */
598 selCtr++;
600 AssertH(selCtr == nSelectors, 3007);
601 #undef code
602 #undef rfreq
603 #undef len_pack
607 /*---------------------------------------------------*/
608 static
609 void BZ2_compressBlock(EState* s, int is_last_block)
611 if (s->nblock > 0) {
612 BZ_FINALISE_CRC(s->blockCRC);
613 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
614 s->combinedCRC ^= s->blockCRC;
615 if (s->blockNo > 1)
616 s->numZ = 0;
618 BZ2_blockSort(s);
621 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
623 /*-- If this is the first block, create the stream header. --*/
624 if (s->blockNo == 1) {
625 BZ2_bsInitWrite(s);
626 /*bsPutU8(s, BZ_HDR_B);*/
627 /*bsPutU8(s, BZ_HDR_Z);*/
628 /*bsPutU8(s, BZ_HDR_h);*/
629 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
630 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
633 if (s->nblock > 0) {
634 /*bsPutU8(s, 0x31);*/
635 /*bsPutU8(s, 0x41);*/
636 /*bsPutU8(s, 0x59);*/
637 /*bsPutU8(s, 0x26);*/
638 bsPutU32(s, 0x31415926);
639 /*bsPutU8(s, 0x53);*/
640 /*bsPutU8(s, 0x59);*/
641 bsPutU16(s, 0x5359);
643 /*-- Now the block's CRC, so it is in a known place. --*/
644 bsPutU32(s, s->blockCRC);
647 * Now a single bit indicating (non-)randomisation.
648 * As of version 0.9.5, we use a better sorting algorithm
649 * which makes randomisation unnecessary. So always set
650 * the randomised bit to 'no'. Of course, the decoder
651 * still needs to be able to handle randomised blocks
652 * so as to maintain backwards compatibility with
653 * older versions of bzip2.
655 bsW(s, 1, 0);
657 bsW(s, 24, s->origPtr);
658 generateMTFValues(s);
659 sendMTFValues(s);
662 /*-- If this is the last block, add the stream trailer. --*/
663 if (is_last_block) {
664 /*bsPutU8(s, 0x17);*/
665 /*bsPutU8(s, 0x72);*/
666 /*bsPutU8(s, 0x45);*/
667 /*bsPutU8(s, 0x38);*/
668 bsPutU32(s, 0x17724538);
669 /*bsPutU8(s, 0x50);*/
670 /*bsPutU8(s, 0x90);*/
671 bsPutU16(s, 0x5090);
672 bsPutU32(s, s->combinedCRC);
673 bsFinishWrite(s);
678 /*-------------------------------------------------------------*/
679 /*--- end compress.c ---*/
680 /*-------------------------------------------------------------*/