r654: Initial revision
[cinelerra_cv.git] / quicktime / ffmpeg / libavcodec / huffyuv.c
blobebb1340ac857d9f51a927938542f10ae451c2075
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
21 * the algorithm used
24 /**
25 * @file huffyuv.c
26 * huffyuv codec for libavcodec.
29 #include "common.h"
30 #include "bitstream.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
34 #define VLC_BITS 11
36 #ifdef WORDS_BIGENDIAN
37 #define B 3
38 #define G 2
39 #define R 1
40 #else
41 #define B 0
42 #define G 1
43 #define R 2
44 #endif
46 typedef enum Predictor{
47 LEFT= 0,
48 PLANE,
49 MEDIAN,
50 } Predictor;
52 typedef struct HYuvContext{
53 AVCodecContext *avctx;
54 Predictor predictor;
55 GetBitContext gb;
56 PutBitContext pb;
57 int interlaced;
58 int decorrelate;
59 int bitstream_bpp;
60 int version;
61 int yuy2; //use yuy2 instead of 422P
62 int bgr32; //use bgr32 instead of bgr24
63 int width, height;
64 int flags;
65 int context;
66 int picture_number;
67 int last_slice_end;
68 uint8_t *temp[3];
69 uint64_t stats[3][256];
70 uint8_t len[3][256];
71 uint32_t bits[3][256];
72 VLC vlc[3];
73 AVFrame picture;
74 uint8_t *bitstream_buffer;
75 int bitstream_buffer_size;
76 DSPContext dsp;
77 }HYuvContext;
79 static const unsigned char classic_shift_luma[] = {
80 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
81 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
82 69,68, 0
85 static const unsigned char classic_shift_chroma[] = {
86 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
87 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
88 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
91 static const unsigned char classic_add_luma[256] = {
92 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
93 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
94 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
95 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
96 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
97 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
98 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
99 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
100 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
101 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
102 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
103 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
104 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
105 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
106 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
107 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
110 static const unsigned char classic_add_chroma[256] = {
111 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
112 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
113 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
114 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
115 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
116 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
117 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
118 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
119 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
120 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
121 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
122 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
123 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
124 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
125 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
126 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
129 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
130 int i;
132 for(i=0; i<w-1; i++){
133 acc+= src[i];
134 dst[i]= acc;
135 i++;
136 acc+= src[i];
137 dst[i]= acc;
140 for(; i<w; i++){
141 acc+= src[i];
142 dst[i]= acc;
145 return acc;
148 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
149 int i;
150 uint8_t l, lt;
152 l= *left;
153 lt= *left_top;
155 for(i=0; i<w; i++){
156 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
157 lt= src1[i];
158 dst[i]= l;
161 *left= l;
162 *left_top= lt;
165 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
166 int i;
167 int r,g,b;
168 r= *red;
169 g= *green;
170 b= *blue;
172 for(i=0; i<w; i++){
173 b+= src[4*i+B];
174 g+= src[4*i+G];
175 r+= src[4*i+R];
177 dst[4*i+B]= b;
178 dst[4*i+G]= g;
179 dst[4*i+R]= r;
182 *red= r;
183 *green= g;
184 *blue= b;
187 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
188 int i;
189 if(w<32){
190 for(i=0; i<w; i++){
191 const int temp= src[i];
192 dst[i]= temp - left;
193 left= temp;
195 return left;
196 }else{
197 for(i=0; i<16; i++){
198 const int temp= src[i];
199 dst[i]= temp - left;
200 left= temp;
202 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
203 return src[w-1];
207 static void read_len_table(uint8_t *dst, GetBitContext *gb){
208 int i, val, repeat;
210 for(i=0; i<256;){
211 repeat= get_bits(gb, 3);
212 val = get_bits(gb, 5);
213 if(repeat==0)
214 repeat= get_bits(gb, 8);
215 //printf("%d %d\n", val, repeat);
216 while (repeat--)
217 dst[i++] = val;
221 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
222 int len, index;
223 uint32_t bits=0;
225 for(len=32; len>0; len--){
226 for(index=0; index<256; index++){
227 if(len_table[index]==len)
228 dst[index]= bits++;
230 if(bits & 1){
231 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
232 return -1;
234 bits >>= 1;
236 return 0;
239 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
240 uint64_t counts[2*size];
241 int up[2*size];
242 int offset, i, next;
244 for(offset=1; ; offset<<=1){
245 for(i=0; i<size; i++){
246 counts[i]= stats[i] + offset - 1;
249 for(next=size; next<size*2; next++){
250 uint64_t min1, min2;
251 int min1_i, min2_i;
253 min1=min2= INT64_MAX;
254 min1_i= min2_i=-1;
256 for(i=0; i<next; i++){
257 if(min2 > counts[i]){
258 if(min1 > counts[i]){
259 min2= min1;
260 min2_i= min1_i;
261 min1= counts[i];
262 min1_i= i;
263 }else{
264 min2= counts[i];
265 min2_i= i;
270 if(min2==INT64_MAX) break;
272 counts[next]= min1 + min2;
273 counts[min1_i]=
274 counts[min2_i]= INT64_MAX;
275 up[min1_i]=
276 up[min2_i]= next;
277 up[next]= -1;
280 for(i=0; i<size; i++){
281 int len;
282 int index=i;
284 for(len=0; up[index] != -1; len++)
285 index= up[index];
287 if(len >= 32) break;
289 dst[i]= len;
291 if(i==size) break;
295 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
296 GetBitContext gb;
297 int i;
299 init_get_bits(&gb, src, length*8);
301 for(i=0; i<3; i++){
302 read_len_table(s->len[i], &gb);
304 if(generate_bits_table(s->bits[i], s->len[i])<0){
305 return -1;
307 #if 0
308 for(j=0; j<256; j++){
309 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
311 #endif
312 free_vlc(&s->vlc[i]);
313 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
316 return (get_bits_count(&gb)+7)/8;
319 static int read_old_huffman_tables(HYuvContext *s){
320 #if 1
321 GetBitContext gb;
322 int i;
324 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
325 read_len_table(s->len[0], &gb);
326 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
327 read_len_table(s->len[1], &gb);
329 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
330 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
332 if(s->bitstream_bpp >= 24){
333 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
334 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
336 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
337 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
339 for(i=0; i<3; i++){
340 free_vlc(&s->vlc[i]);
341 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
344 return 0;
345 #else
346 fprintf(stderr, "v1 huffyuv is not supported \n");
347 return -1;
348 #endif
351 static void alloc_temp(HYuvContext *s){
352 int i;
354 if(s->bitstream_bpp<24){
355 for(i=0; i<3; i++){
356 s->temp[i]= av_malloc(s->width + 16);
358 }else{
359 s->temp[0]= av_malloc(4*s->width + 16);
363 static int common_init(AVCodecContext *avctx){
364 HYuvContext *s = avctx->priv_data;
366 s->avctx= avctx;
367 s->flags= avctx->flags;
369 dsputil_init(&s->dsp, avctx);
371 s->width= avctx->width;
372 s->height= avctx->height;
373 assert(s->width>0 && s->height>0);
375 return 0;
378 static int decode_init(AVCodecContext *avctx)
380 HYuvContext *s = avctx->priv_data;
382 common_init(avctx);
383 memset(s->vlc, 0, 3*sizeof(VLC));
385 avctx->coded_frame= &s->picture;
386 s->interlaced= s->height > 288;
388 s->bgr32=1;
389 //if(avctx->extradata)
390 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
391 if(avctx->extradata_size){
392 if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
393 s->version=1; // do such files exist at all?
394 else
395 s->version=2;
396 }else
397 s->version=0;
399 if(s->version==2){
400 int method, interlace;
402 method= ((uint8_t*)avctx->extradata)[0];
403 s->decorrelate= method&64 ? 1 : 0;
404 s->predictor= method&63;
405 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
406 if(s->bitstream_bpp==0)
407 s->bitstream_bpp= avctx->bits_per_sample&~7;
408 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
409 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
410 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
412 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
413 return -1;
414 }else{
415 switch(avctx->bits_per_sample&7){
416 case 1:
417 s->predictor= LEFT;
418 s->decorrelate= 0;
419 break;
420 case 2:
421 s->predictor= LEFT;
422 s->decorrelate= 1;
423 break;
424 case 3:
425 s->predictor= PLANE;
426 s->decorrelate= avctx->bits_per_sample >= 24;
427 break;
428 case 4:
429 s->predictor= MEDIAN;
430 s->decorrelate= 0;
431 break;
432 default:
433 s->predictor= LEFT; //OLD
434 s->decorrelate= 0;
435 break;
437 s->bitstream_bpp= avctx->bits_per_sample & ~7;
438 s->context= 0;
440 if(read_old_huffman_tables(s) < 0)
441 return -1;
444 switch(s->bitstream_bpp){
445 case 12:
446 avctx->pix_fmt = PIX_FMT_YUV420P;
447 break;
448 case 16:
449 if(s->yuy2){
450 avctx->pix_fmt = PIX_FMT_YUV422;
451 }else{
452 avctx->pix_fmt = PIX_FMT_YUV422P;
454 break;
455 case 24:
456 case 32:
457 if(s->bgr32){
458 avctx->pix_fmt = PIX_FMT_RGBA32;
459 }else{
460 avctx->pix_fmt = PIX_FMT_BGR24;
462 break;
463 default:
464 assert(0);
467 alloc_temp(s);
469 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
471 return 0;
474 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
475 int i;
476 int index= 0;
478 for(i=0; i<256;){
479 int val= len[i];
480 int repeat=0;
482 for(; i<256 && len[i]==val && repeat<255; i++)
483 repeat++;
485 assert(val < 32 && val >0 && repeat<256 && repeat>0);
486 if(repeat>7){
487 buf[index++]= val;
488 buf[index++]= repeat;
489 }else{
490 buf[index++]= val | (repeat<<5);
494 return index;
497 static int encode_init(AVCodecContext *avctx)
499 HYuvContext *s = avctx->priv_data;
500 int i, j;
502 common_init(avctx);
504 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
505 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
506 s->version=2;
508 avctx->coded_frame= &s->picture;
510 switch(avctx->pix_fmt){
511 case PIX_FMT_YUV420P:
512 s->bitstream_bpp= 12;
513 break;
514 case PIX_FMT_YUV422P:
515 s->bitstream_bpp= 16;
516 break;
517 default:
518 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
519 return -1;
521 avctx->bits_per_sample= s->bitstream_bpp;
522 s->decorrelate= s->bitstream_bpp >= 24;
523 s->predictor= avctx->prediction_method;
524 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
525 if(avctx->context_model==1){
526 s->context= avctx->context_model;
527 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
528 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
529 return -1;
531 }else s->context= 0;
533 if(avctx->codec->id==CODEC_ID_HUFFYUV){
534 if(avctx->pix_fmt==PIX_FMT_YUV420P){
535 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
536 return -1;
538 if(avctx->context_model){
539 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
540 return -1;
542 if(s->interlaced != ( s->height > 288 ))
543 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
544 }else if(avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){
545 av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodable with future versions!!! Set vstrict=-2 / -strict -2 to use it anyway.\n");
546 return -1;
549 ((uint8_t*)avctx->extradata)[0]= s->predictor;
550 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
551 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
552 if(s->context)
553 ((uint8_t*)avctx->extradata)[2]|= 0x40;
554 ((uint8_t*)avctx->extradata)[3]= 0;
555 s->avctx->extradata_size= 4;
557 if(avctx->stats_in){
558 char *p= avctx->stats_in;
560 for(i=0; i<3; i++)
561 for(j=0; j<256; j++)
562 s->stats[i][j]= 1;
564 for(;;){
565 for(i=0; i<3; i++){
566 char *next;
568 for(j=0; j<256; j++){
569 s->stats[i][j]+= strtol(p, &next, 0);
570 if(next==p) return -1;
571 p=next;
574 if(p[0]==0 || p[1]==0 || p[2]==0) break;
576 }else{
577 for(i=0; i<3; i++)
578 for(j=0; j<256; j++){
579 int d= FFMIN(j, 256-j);
581 s->stats[i][j]= 100000000/(d+1);
585 for(i=0; i<3; i++){
586 generate_len_table(s->len[i], s->stats[i], 256);
588 if(generate_bits_table(s->bits[i], s->len[i])<0){
589 return -1;
592 s->avctx->extradata_size+=
593 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
596 if(s->context){
597 for(i=0; i<3; i++){
598 int pels = s->width*s->height / (i?40:10);
599 for(j=0; j<256; j++){
600 int d= FFMIN(j, 256-j);
601 s->stats[i][j]= pels/(d+1);
604 }else{
605 for(i=0; i<3; i++)
606 for(j=0; j<256; j++)
607 s->stats[i][j]= 0;
610 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
612 alloc_temp(s);
614 s->picture_number=0;
616 return 0;
619 static void decode_422_bitstream(HYuvContext *s, int count){
620 int i;
622 count/=2;
624 for(i=0; i<count; i++){
625 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
626 s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
627 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
628 s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
632 static void decode_gray_bitstream(HYuvContext *s, int count){
633 int i;
635 count/=2;
637 for(i=0; i<count; i++){
638 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
639 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
643 static int encode_422_bitstream(HYuvContext *s, int count){
644 int i;
646 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
647 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
648 return -1;
651 count/=2;
652 if(s->flags&CODEC_FLAG_PASS1){
653 for(i=0; i<count; i++){
654 s->stats[0][ s->temp[0][2*i ] ]++;
655 s->stats[1][ s->temp[1][ i ] ]++;
656 s->stats[0][ s->temp[0][2*i+1] ]++;
657 s->stats[2][ s->temp[2][ i ] ]++;
660 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
661 return 0;
662 if(s->context){
663 for(i=0; i<count; i++){
664 s->stats[0][ s->temp[0][2*i ] ]++;
665 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
666 s->stats[1][ s->temp[1][ i ] ]++;
667 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
668 s->stats[0][ s->temp[0][2*i+1] ]++;
669 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
670 s->stats[2][ s->temp[2][ i ] ]++;
671 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
673 }else{
674 for(i=0; i<count; i++){
675 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
676 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
677 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
678 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
681 return 0;
684 static int encode_gray_bitstream(HYuvContext *s, int count){
685 int i;
687 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
688 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
689 return -1;
692 count/=2;
693 if(s->flags&CODEC_FLAG_PASS1){
694 for(i=0; i<count; i++){
695 s->stats[0][ s->temp[0][2*i ] ]++;
696 s->stats[0][ s->temp[0][2*i+1] ]++;
699 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
700 return 0;
702 if(s->context){
703 for(i=0; i<count; i++){
704 s->stats[0][ s->temp[0][2*i ] ]++;
705 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
706 s->stats[0][ s->temp[0][2*i+1] ]++;
707 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
709 }else{
710 for(i=0; i<count; i++){
711 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
712 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
715 return 0;
718 static void decode_bgr_bitstream(HYuvContext *s, int count){
719 int i;
721 if(s->decorrelate){
722 if(s->bitstream_bpp==24){
723 for(i=0; i<count; i++){
724 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
725 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
726 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
728 }else{
729 for(i=0; i<count; i++){
730 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
731 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
732 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
733 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
736 }else{
737 if(s->bitstream_bpp==24){
738 for(i=0; i<count; i++){
739 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
740 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
741 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
743 }else{
744 for(i=0; i<count; i++){
745 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
746 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
747 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
748 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
754 static void draw_slice(HYuvContext *s, int y){
755 int h, cy;
756 int offset[4];
758 if(s->avctx->draw_horiz_band==NULL)
759 return;
761 h= y - s->last_slice_end;
762 y -= h;
764 if(s->bitstream_bpp==12){
765 cy= y>>1;
766 }else{
767 cy= y;
770 offset[0] = s->picture.linesize[0]*y;
771 offset[1] = s->picture.linesize[1]*cy;
772 offset[2] = s->picture.linesize[2]*cy;
773 offset[3] = 0;
774 emms_c();
776 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
778 s->last_slice_end= y + h;
781 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
782 HYuvContext *s = avctx->priv_data;
783 const int width= s->width;
784 const int width2= s->width>>1;
785 const int height= s->height;
786 int fake_ystride, fake_ustride, fake_vstride;
787 AVFrame * const p= &s->picture;
788 int table_size= 0;
790 AVFrame *picture = data;
792 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
794 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
796 if(p->data[0])
797 avctx->release_buffer(avctx, p);
799 p->reference= 0;
800 if(avctx->get_buffer(avctx, p) < 0){
801 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
802 return -1;
805 if(s->context){
806 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
807 if(table_size < 0)
808 return -1;
811 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
813 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
814 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
815 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
817 s->last_slice_end= 0;
819 if(s->bitstream_bpp<24){
820 int y, cy;
821 int lefty, leftu, leftv;
822 int lefttopy, lefttopu, lefttopv;
824 if(s->yuy2){
825 p->data[0][3]= get_bits(&s->gb, 8);
826 p->data[0][2]= get_bits(&s->gb, 8);
827 p->data[0][1]= get_bits(&s->gb, 8);
828 p->data[0][0]= get_bits(&s->gb, 8);
830 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
831 return -1;
832 }else{
834 leftv= p->data[2][0]= get_bits(&s->gb, 8);
835 lefty= p->data[0][1]= get_bits(&s->gb, 8);
836 leftu= p->data[1][0]= get_bits(&s->gb, 8);
837 p->data[0][0]= get_bits(&s->gb, 8);
839 switch(s->predictor){
840 case LEFT:
841 case PLANE:
842 decode_422_bitstream(s, width-2);
843 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
844 if(!(s->flags&CODEC_FLAG_GRAY)){
845 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
846 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
849 for(cy=y=1; y<s->height; y++,cy++){
850 uint8_t *ydst, *udst, *vdst;
852 if(s->bitstream_bpp==12){
853 decode_gray_bitstream(s, width);
855 ydst= p->data[0] + p->linesize[0]*y;
857 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
858 if(s->predictor == PLANE){
859 if(y>s->interlaced)
860 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
862 y++;
863 if(y>=s->height) break;
866 draw_slice(s, y);
868 ydst= p->data[0] + p->linesize[0]*y;
869 udst= p->data[1] + p->linesize[1]*cy;
870 vdst= p->data[2] + p->linesize[2]*cy;
872 decode_422_bitstream(s, width);
873 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
874 if(!(s->flags&CODEC_FLAG_GRAY)){
875 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
876 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
878 if(s->predictor == PLANE){
879 if(cy>s->interlaced){
880 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
881 if(!(s->flags&CODEC_FLAG_GRAY)){
882 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
883 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
888 draw_slice(s, height);
890 break;
891 case MEDIAN:
892 /* first line except first 2 pixels is left predicted */
893 decode_422_bitstream(s, width-2);
894 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
895 if(!(s->flags&CODEC_FLAG_GRAY)){
896 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
897 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
900 cy=y=1;
902 /* second line is left predicted for interlaced case */
903 if(s->interlaced){
904 decode_422_bitstream(s, width);
905 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
906 if(!(s->flags&CODEC_FLAG_GRAY)){
907 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
908 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
910 y++; cy++;
913 /* next 4 pixels are left predicted too */
914 decode_422_bitstream(s, 4);
915 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
916 if(!(s->flags&CODEC_FLAG_GRAY)){
917 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
918 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
921 /* next line except the first 4 pixels is median predicted */
922 lefttopy= p->data[0][3];
923 decode_422_bitstream(s, width-4);
924 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
925 if(!(s->flags&CODEC_FLAG_GRAY)){
926 lefttopu= p->data[1][1];
927 lefttopv= p->data[2][1];
928 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
929 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
931 y++; cy++;
933 for(; y<height; y++,cy++){
934 uint8_t *ydst, *udst, *vdst;
936 if(s->bitstream_bpp==12){
937 while(2*cy > y){
938 decode_gray_bitstream(s, width);
939 ydst= p->data[0] + p->linesize[0]*y;
940 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
941 y++;
943 if(y>=height) break;
945 draw_slice(s, y);
947 decode_422_bitstream(s, width);
949 ydst= p->data[0] + p->linesize[0]*y;
950 udst= p->data[1] + p->linesize[1]*cy;
951 vdst= p->data[2] + p->linesize[2]*cy;
953 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
954 if(!(s->flags&CODEC_FLAG_GRAY)){
955 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
956 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
960 draw_slice(s, height);
961 break;
964 }else{
965 int y;
966 int leftr, leftg, leftb;
967 const int last_line= (height-1)*p->linesize[0];
969 if(s->bitstream_bpp==32){
970 skip_bits(&s->gb, 8);
971 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
972 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
973 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
974 }else{
975 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
976 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
977 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
978 skip_bits(&s->gb, 8);
981 if(s->bgr32){
982 switch(s->predictor){
983 case LEFT:
984 case PLANE:
985 decode_bgr_bitstream(s, width-1);
986 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
988 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
989 decode_bgr_bitstream(s, width);
991 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
992 if(s->predictor == PLANE){
993 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
994 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
995 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
999 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1000 break;
1001 default:
1002 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1004 }else{
1006 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1007 return -1;
1010 emms_c();
1012 *picture= *p;
1013 *data_size = sizeof(AVFrame);
1015 return (get_bits_count(&s->gb)+31)/32*4;
1018 static int common_end(HYuvContext *s){
1019 int i;
1021 for(i=0; i<3; i++){
1022 av_freep(&s->temp[i]);
1024 return 0;
1027 static int decode_end(AVCodecContext *avctx)
1029 HYuvContext *s = avctx->priv_data;
1030 int i;
1032 common_end(s);
1033 av_freep(&s->bitstream_buffer);
1035 for(i=0; i<3; i++){
1036 free_vlc(&s->vlc[i]);
1039 return 0;
1042 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1043 HYuvContext *s = avctx->priv_data;
1044 AVFrame *pict = data;
1045 const int width= s->width;
1046 const int width2= s->width>>1;
1047 const int height= s->height;
1048 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1049 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1050 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1051 AVFrame * const p= &s->picture;
1052 int i, j, size=0;
1054 *p = *pict;
1055 p->pict_type= FF_I_TYPE;
1056 p->key_frame= 1;
1058 if(s->context){
1059 for(i=0; i<3; i++){
1060 generate_len_table(s->len[i], s->stats[i], 256);
1061 if(generate_bits_table(s->bits[i], s->len[i])<0)
1062 return -1;
1063 size+= store_table(s, s->len[i], &buf[size]);
1066 for(i=0; i<3; i++)
1067 for(j=0; j<256; j++)
1068 s->stats[i][j] >>= 1;
1071 init_put_bits(&s->pb, buf+size, buf_size-size);
1073 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1074 int lefty, leftu, leftv, y, cy;
1076 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1077 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1078 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1079 put_bits(&s->pb, 8, p->data[0][0]);
1081 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1082 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1083 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1085 encode_422_bitstream(s, width-2);
1087 if(s->predictor==MEDIAN){
1088 int lefttopy, lefttopu, lefttopv;
1089 cy=y=1;
1090 if(s->interlaced){
1091 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1092 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1093 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1095 encode_422_bitstream(s, width);
1096 y++; cy++;
1099 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1100 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1101 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1103 encode_422_bitstream(s, 4);
1105 lefttopy= p->data[0][3];
1106 lefttopu= p->data[1][1];
1107 lefttopv= p->data[2][1];
1108 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1109 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1110 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1111 encode_422_bitstream(s, width-4);
1112 y++; cy++;
1114 for(; y<height; y++,cy++){
1115 uint8_t *ydst, *udst, *vdst;
1117 if(s->bitstream_bpp==12){
1118 while(2*cy > y){
1119 ydst= p->data[0] + p->linesize[0]*y;
1120 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1121 encode_gray_bitstream(s, width);
1122 y++;
1124 if(y>=height) break;
1126 ydst= p->data[0] + p->linesize[0]*y;
1127 udst= p->data[1] + p->linesize[1]*cy;
1128 vdst= p->data[2] + p->linesize[2]*cy;
1130 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1131 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1132 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1134 encode_422_bitstream(s, width);
1136 }else{
1137 for(cy=y=1; y<height; y++,cy++){
1138 uint8_t *ydst, *udst, *vdst;
1140 /* encode a luma only line & y++ */
1141 if(s->bitstream_bpp==12){
1142 ydst= p->data[0] + p->linesize[0]*y;
1144 if(s->predictor == PLANE && s->interlaced < y){
1145 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1147 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1148 }else{
1149 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1151 encode_gray_bitstream(s, width);
1152 y++;
1153 if(y>=height) break;
1156 ydst= p->data[0] + p->linesize[0]*y;
1157 udst= p->data[1] + p->linesize[1]*cy;
1158 vdst= p->data[2] + p->linesize[2]*cy;
1160 if(s->predictor == PLANE && s->interlaced < cy){
1161 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1162 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1163 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1165 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1166 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1167 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1168 }else{
1169 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1170 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1171 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1174 encode_422_bitstream(s, width);
1177 }else{
1178 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1180 emms_c();
1182 size+= (put_bits_count(&s->pb)+31)/8;
1183 size/= 4;
1185 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1186 int j;
1187 char *p= avctx->stats_out;
1188 char *end= p + 1024*30;
1189 for(i=0; i<3; i++){
1190 for(j=0; j<256; j++){
1191 snprintf(p, end-p, "%llu ", s->stats[i][j]);
1192 p+= strlen(p);
1193 s->stats[i][j]= 0;
1195 snprintf(p, end-p, "\n");
1196 p++;
1199 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1200 flush_put_bits(&s->pb);
1201 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1202 avctx->stats_out[0] = '\0';
1205 s->picture_number++;
1207 return size*4;
1210 static int encode_end(AVCodecContext *avctx)
1212 HYuvContext *s = avctx->priv_data;
1214 common_end(s);
1216 av_freep(&avctx->extradata);
1217 av_freep(&avctx->stats_out);
1219 return 0;
1222 AVCodec huffyuv_decoder = {
1223 "huffyuv",
1224 CODEC_TYPE_VIDEO,
1225 CODEC_ID_HUFFYUV,
1226 sizeof(HYuvContext),
1227 decode_init,
1228 NULL,
1229 decode_end,
1230 decode_frame,
1231 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1232 NULL
1235 AVCodec ffvhuff_decoder = {
1236 "ffvhuff",
1237 CODEC_TYPE_VIDEO,
1238 CODEC_ID_FFVHUFF,
1239 sizeof(HYuvContext),
1240 decode_init,
1241 NULL,
1242 decode_end,
1243 decode_frame,
1244 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1245 NULL
1248 #ifdef CONFIG_ENCODERS
1250 AVCodec huffyuv_encoder = {
1251 "huffyuv",
1252 CODEC_TYPE_VIDEO,
1253 CODEC_ID_HUFFYUV,
1254 sizeof(HYuvContext),
1255 encode_init,
1256 encode_frame,
1257 encode_end,
1260 AVCodec ffvhuff_encoder = {
1261 "ffvhuff",
1262 CODEC_TYPE_VIDEO,
1263 CODEC_ID_FFVHUFF,
1264 sizeof(HYuvContext),
1265 encode_init,
1266 encode_frame,
1267 encode_end,
1270 #endif //CONFIG_ENCODERS