Merge branch 'mirror' into vdpau
[FFMpeg-mirror/ffmpeg-vdpau.git] / libavcodec / huffyuv.c
blobcf90adc70122ccfd5437c40c8e70272fe6b9b8c4
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "bitstream.h"
33 #include "dsputil.h"
35 #define VLC_BITS 11
37 #ifdef WORDS_BIGENDIAN
38 #define B 3
39 #define G 2
40 #define R 1
41 #else
42 #define B 0
43 #define G 1
44 #define R 2
45 #endif
47 typedef enum Predictor{
48 LEFT= 0,
49 PLANE,
50 MEDIAN,
51 } Predictor;
53 typedef struct HYuvContext{
54 AVCodecContext *avctx;
55 Predictor predictor;
56 GetBitContext gb;
57 PutBitContext pb;
58 int interlaced;
59 int decorrelate;
60 int bitstream_bpp;
61 int version;
62 int yuy2; //use yuy2 instead of 422P
63 int bgr32; //use bgr32 instead of bgr24
64 int width, height;
65 int flags;
66 int context;
67 int picture_number;
68 int last_slice_end;
69 uint8_t *temp[3];
70 uint64_t stats[3][256];
71 uint8_t len[3][256];
72 uint32_t bits[3][256];
73 uint32_t pix_bgr_map[1<<VLC_BITS];
74 VLC vlc[6]; //Y,U,V,YY,YU,YV
75 AVFrame picture;
76 uint8_t *bitstream_buffer;
77 unsigned int bitstream_buffer_size;
78 DSPContext dsp;
79 }HYuvContext;
81 static const unsigned char classic_shift_luma[] = {
82 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
83 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
84 69,68, 0
87 static const unsigned char classic_shift_chroma[] = {
88 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
89 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
90 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
93 static const unsigned char classic_add_luma[256] = {
94 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
95 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
96 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
97 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
98 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
99 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
100 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
101 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
102 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
103 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
104 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
105 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
106 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
107 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
108 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
109 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
112 static const unsigned char classic_add_chroma[256] = {
113 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
114 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
115 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
116 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
117 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
118 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
119 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
120 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
121 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
122 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
123 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
124 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
125 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
126 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
127 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
128 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
131 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
132 int i;
134 for(i=0; i<w-1; i++){
135 acc+= src[i];
136 dst[i]= acc;
137 i++;
138 acc+= src[i];
139 dst[i]= acc;
142 for(; i<w; i++){
143 acc+= src[i];
144 dst[i]= acc;
147 return acc;
150 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
151 int i;
152 uint8_t l, lt;
154 l= *left;
155 lt= *left_top;
157 for(i=0; i<w; i++){
158 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
159 lt= src1[i];
160 dst[i]= l;
163 *left= l;
164 *left_top= lt;
167 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
168 int i;
169 int r,g,b;
170 r= *red;
171 g= *green;
172 b= *blue;
174 for(i=0; i<w; i++){
175 b+= src[4*i+B];
176 g+= src[4*i+G];
177 r+= src[4*i+R];
179 dst[4*i+B]= b;
180 dst[4*i+G]= g;
181 dst[4*i+R]= r;
184 *red= r;
185 *green= g;
186 *blue= b;
189 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
190 int i;
191 if(w<32){
192 for(i=0; i<w; i++){
193 const int temp= src[i];
194 dst[i]= temp - left;
195 left= temp;
197 return left;
198 }else{
199 for(i=0; i<16; i++){
200 const int temp= src[i];
201 dst[i]= temp - left;
202 left= temp;
204 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
205 return src[w-1];
209 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
210 int i;
211 int r,g,b;
212 r= *red;
213 g= *green;
214 b= *blue;
215 for(i=0; i<FFMIN(w,4); i++){
216 const int rt= src[i*4+R];
217 const int gt= src[i*4+G];
218 const int bt= src[i*4+B];
219 dst[i*4+R]= rt - r;
220 dst[i*4+G]= gt - g;
221 dst[i*4+B]= bt - b;
222 r = rt;
223 g = gt;
224 b = bt;
226 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
227 *red= src[(w-1)*4+R];
228 *green= src[(w-1)*4+G];
229 *blue= src[(w-1)*4+B];
232 static void read_len_table(uint8_t *dst, GetBitContext *gb){
233 int i, val, repeat;
235 for(i=0; i<256;){
236 repeat= get_bits(gb, 3);
237 val = get_bits(gb, 5);
238 if(repeat==0)
239 repeat= get_bits(gb, 8);
240 //printf("%d %d\n", val, repeat);
241 while (repeat--)
242 dst[i++] = val;
246 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
247 int len, index;
248 uint32_t bits=0;
250 for(len=32; len>0; len--){
251 for(index=0; index<256; index++){
252 if(len_table[index]==len)
253 dst[index]= bits++;
255 if(bits & 1){
256 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
257 return -1;
259 bits >>= 1;
261 return 0;
264 #if defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER)
265 typedef struct {
266 uint64_t val;
267 int name;
268 } heap_elem_t;
270 static void heap_sift(heap_elem_t *h, int root, int size)
272 while(root*2+1 < size) {
273 int child = root*2+1;
274 if(child < size-1 && h[child].val > h[child+1].val)
275 child++;
276 if(h[root].val > h[child].val) {
277 FFSWAP(heap_elem_t, h[root], h[child]);
278 root = child;
279 } else
280 break;
284 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
285 heap_elem_t h[size];
286 int up[2*size];
287 int len[2*size];
288 int offset, i, next;
290 for(offset=1; ; offset<<=1){
291 for(i=0; i<size; i++){
292 h[i].name = i;
293 h[i].val = (stats[i] << 8) + offset;
295 for(i=size/2-1; i>=0; i--)
296 heap_sift(h, i, size);
298 for(next=size; next<size*2-1; next++){
299 // merge the two smallest entries, and put it back in the heap
300 uint64_t min1v = h[0].val;
301 up[h[0].name] = next;
302 h[0].val = INT64_MAX;
303 heap_sift(h, 0, size);
304 up[h[0].name] = next;
305 h[0].name = next;
306 h[0].val += min1v;
307 heap_sift(h, 0, size);
310 len[2*size-2] = 0;
311 for(i=2*size-3; i>=size; i--)
312 len[i] = len[up[i]] + 1;
313 for(i=0; i<size; i++) {
314 dst[i] = len[up[i]] + 1;
315 if(dst[i] >= 32) break;
317 if(i==size) break;
320 #endif /* defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER) */
322 static void generate_joint_tables(HYuvContext *s){
323 uint16_t symbols[1<<VLC_BITS];
324 uint16_t bits[1<<VLC_BITS];
325 uint8_t len[1<<VLC_BITS];
326 if(s->bitstream_bpp < 24){
327 int p, i, y, u;
328 for(p=0; p<3; p++){
329 for(i=y=0; y<256; y++){
330 int len0 = s->len[0][y];
331 int limit = VLC_BITS - len0;
332 if(limit <= 0)
333 continue;
334 for(u=0; u<256; u++){
335 int len1 = s->len[p][u];
336 if(len1 > limit)
337 continue;
338 len[i] = len0 + len1;
339 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
340 symbols[i] = (y<<8) + u;
341 if(symbols[i] != 0xffff) // reserved to mean "invalid"
342 i++;
345 free_vlc(&s->vlc[3+p]);
346 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
348 }else{
349 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
350 int i, b, g, r, code;
351 int p0 = s->decorrelate;
352 int p1 = !s->decorrelate;
353 // restrict the range to +/-16 becaues that's pretty much guaranteed to
354 // cover all the combinations that fit in 11 bits total, and it doesn't
355 // matter if we miss a few rare codes.
356 for(i=0, g=-16; g<16; g++){
357 int len0 = s->len[p0][g&255];
358 int limit0 = VLC_BITS - len0;
359 if(limit0 < 2)
360 continue;
361 for(b=-16; b<16; b++){
362 int len1 = s->len[p1][b&255];
363 int limit1 = limit0 - len1;
364 if(limit1 < 1)
365 continue;
366 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
367 for(r=-16; r<16; r++){
368 int len2 = s->len[2][r&255];
369 if(len2 > limit1)
370 continue;
371 len[i] = len0 + len1 + len2;
372 bits[i] = (code << len2) + s->bits[2][r&255];
373 if(s->decorrelate){
374 map[i][G] = g;
375 map[i][B] = g+b;
376 map[i][R] = g+r;
377 }else{
378 map[i][B] = g;
379 map[i][G] = b;
380 map[i][R] = r;
382 i++;
386 free_vlc(&s->vlc[3]);
387 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
391 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
392 GetBitContext gb;
393 int i;
395 init_get_bits(&gb, src, length*8);
397 for(i=0; i<3; i++){
398 read_len_table(s->len[i], &gb);
400 if(generate_bits_table(s->bits[i], s->len[i])<0){
401 return -1;
403 #if 0
404 for(j=0; j<256; j++){
405 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
407 #endif
408 free_vlc(&s->vlc[i]);
409 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
412 generate_joint_tables(s);
414 return (get_bits_count(&gb)+7)/8;
417 static int read_old_huffman_tables(HYuvContext *s){
418 #if 1
419 GetBitContext gb;
420 int i;
422 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
423 read_len_table(s->len[0], &gb);
424 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
425 read_len_table(s->len[1], &gb);
427 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
428 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
430 if(s->bitstream_bpp >= 24){
431 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
432 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
434 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
435 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
437 for(i=0; i<3; i++){
438 free_vlc(&s->vlc[i]);
439 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
442 generate_joint_tables(s);
444 return 0;
445 #else
446 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
447 return -1;
448 #endif
451 static void alloc_temp(HYuvContext *s){
452 int i;
454 if(s->bitstream_bpp<24){
455 for(i=0; i<3; i++){
456 s->temp[i]= av_malloc(s->width + 16);
458 }else{
459 for(i=0; i<2; i++){
460 s->temp[i]= av_malloc(4*s->width + 16);
465 static int common_init(AVCodecContext *avctx){
466 HYuvContext *s = avctx->priv_data;
468 s->avctx= avctx;
469 s->flags= avctx->flags;
471 dsputil_init(&s->dsp, avctx);
473 s->width= avctx->width;
474 s->height= avctx->height;
475 assert(s->width>0 && s->height>0);
477 return 0;
480 #if defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER)
481 static av_cold int decode_init(AVCodecContext *avctx)
483 HYuvContext *s = avctx->priv_data;
485 common_init(avctx);
486 memset(s->vlc, 0, 3*sizeof(VLC));
488 avctx->coded_frame= &s->picture;
489 s->interlaced= s->height > 288;
491 s->bgr32=1;
492 //if(avctx->extradata)
493 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
494 if(avctx->extradata_size){
495 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
496 s->version=1; // do such files exist at all?
497 else
498 s->version=2;
499 }else
500 s->version=0;
502 if(s->version==2){
503 int method, interlace;
505 method= ((uint8_t*)avctx->extradata)[0];
506 s->decorrelate= method&64 ? 1 : 0;
507 s->predictor= method&63;
508 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
509 if(s->bitstream_bpp==0)
510 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
511 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
512 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
513 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
515 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
516 return -1;
517 }else{
518 switch(avctx->bits_per_coded_sample&7){
519 case 1:
520 s->predictor= LEFT;
521 s->decorrelate= 0;
522 break;
523 case 2:
524 s->predictor= LEFT;
525 s->decorrelate= 1;
526 break;
527 case 3:
528 s->predictor= PLANE;
529 s->decorrelate= avctx->bits_per_coded_sample >= 24;
530 break;
531 case 4:
532 s->predictor= MEDIAN;
533 s->decorrelate= 0;
534 break;
535 default:
536 s->predictor= LEFT; //OLD
537 s->decorrelate= 0;
538 break;
540 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
541 s->context= 0;
543 if(read_old_huffman_tables(s) < 0)
544 return -1;
547 switch(s->bitstream_bpp){
548 case 12:
549 avctx->pix_fmt = PIX_FMT_YUV420P;
550 break;
551 case 16:
552 if(s->yuy2){
553 avctx->pix_fmt = PIX_FMT_YUYV422;
554 }else{
555 avctx->pix_fmt = PIX_FMT_YUV422P;
557 break;
558 case 24:
559 case 32:
560 if(s->bgr32){
561 avctx->pix_fmt = PIX_FMT_RGB32;
562 }else{
563 avctx->pix_fmt = PIX_FMT_BGR24;
565 break;
566 default:
567 assert(0);
570 alloc_temp(s);
572 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
574 return 0;
576 #endif /* defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER) */
578 #if defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER)
579 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
580 int i;
581 int index= 0;
583 for(i=0; i<256;){
584 int val= len[i];
585 int repeat=0;
587 for(; i<256 && len[i]==val && repeat<255; i++)
588 repeat++;
590 assert(val < 32 && val >0 && repeat<256 && repeat>0);
591 if(repeat>7){
592 buf[index++]= val;
593 buf[index++]= repeat;
594 }else{
595 buf[index++]= val | (repeat<<5);
599 return index;
602 static av_cold int encode_init(AVCodecContext *avctx)
604 HYuvContext *s = avctx->priv_data;
605 int i, j;
607 common_init(avctx);
609 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
610 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
611 s->version=2;
613 avctx->coded_frame= &s->picture;
615 switch(avctx->pix_fmt){
616 case PIX_FMT_YUV420P:
617 s->bitstream_bpp= 12;
618 break;
619 case PIX_FMT_YUV422P:
620 s->bitstream_bpp= 16;
621 break;
622 case PIX_FMT_RGB32:
623 s->bitstream_bpp= 24;
624 break;
625 default:
626 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
627 return -1;
629 avctx->bits_per_coded_sample= s->bitstream_bpp;
630 s->decorrelate= s->bitstream_bpp >= 24;
631 s->predictor= avctx->prediction_method;
632 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
633 if(avctx->context_model==1){
634 s->context= avctx->context_model;
635 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
636 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
637 return -1;
639 }else s->context= 0;
641 if(avctx->codec->id==CODEC_ID_HUFFYUV){
642 if(avctx->pix_fmt==PIX_FMT_YUV420P){
643 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
644 return -1;
646 if(avctx->context_model){
647 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
648 return -1;
650 if(s->interlaced != ( s->height > 288 ))
651 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
654 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
655 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
656 return -1;
659 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
660 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
661 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
662 if(s->context)
663 ((uint8_t*)avctx->extradata)[2]|= 0x40;
664 ((uint8_t*)avctx->extradata)[3]= 0;
665 s->avctx->extradata_size= 4;
667 if(avctx->stats_in){
668 char *p= avctx->stats_in;
670 for(i=0; i<3; i++)
671 for(j=0; j<256; j++)
672 s->stats[i][j]= 1;
674 for(;;){
675 for(i=0; i<3; i++){
676 char *next;
678 for(j=0; j<256; j++){
679 s->stats[i][j]+= strtol(p, &next, 0);
680 if(next==p) return -1;
681 p=next;
684 if(p[0]==0 || p[1]==0 || p[2]==0) break;
686 }else{
687 for(i=0; i<3; i++)
688 for(j=0; j<256; j++){
689 int d= FFMIN(j, 256-j);
691 s->stats[i][j]= 100000000/(d+1);
695 for(i=0; i<3; i++){
696 generate_len_table(s->len[i], s->stats[i], 256);
698 if(generate_bits_table(s->bits[i], s->len[i])<0){
699 return -1;
702 s->avctx->extradata_size+=
703 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
706 if(s->context){
707 for(i=0; i<3; i++){
708 int pels = s->width*s->height / (i?40:10);
709 for(j=0; j<256; j++){
710 int d= FFMIN(j, 256-j);
711 s->stats[i][j]= pels/(d+1);
714 }else{
715 for(i=0; i<3; i++)
716 for(j=0; j<256; j++)
717 s->stats[i][j]= 0;
720 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
722 alloc_temp(s);
724 s->picture_number=0;
726 return 0;
728 #endif /* defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER) */
730 /* TODO instead of restarting the read when the code isn't in the first level
731 * of the joint table, jump into the 2nd level of the individual table. */
732 #define READ_2PIX(dst0, dst1, plane1){\
733 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
734 if(code != 0xffff){\
735 dst0 = code>>8;\
736 dst1 = code;\
737 }else{\
738 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
739 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
743 static void decode_422_bitstream(HYuvContext *s, int count){
744 int i;
746 count/=2;
748 for(i=0; i<count; i++){
749 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
750 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
754 static void decode_gray_bitstream(HYuvContext *s, int count){
755 int i;
757 count/=2;
759 for(i=0; i<count; i++){
760 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
764 #if defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER)
765 static int encode_422_bitstream(HYuvContext *s, int count){
766 int i;
768 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
769 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
770 return -1;
773 #define LOAD4\
774 int y0 = s->temp[0][2*i];\
775 int y1 = s->temp[0][2*i+1];\
776 int u0 = s->temp[1][i];\
777 int v0 = s->temp[2][i];
779 count/=2;
780 if(s->flags&CODEC_FLAG_PASS1){
781 for(i=0; i<count; i++){
782 LOAD4;
783 s->stats[0][y0]++;
784 s->stats[1][u0]++;
785 s->stats[0][y1]++;
786 s->stats[2][v0]++;
789 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
790 return 0;
791 if(s->context){
792 for(i=0; i<count; i++){
793 LOAD4;
794 s->stats[0][y0]++;
795 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
796 s->stats[1][u0]++;
797 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
798 s->stats[0][y1]++;
799 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
800 s->stats[2][v0]++;
801 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
803 }else{
804 for(i=0; i<count; i++){
805 LOAD4;
806 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
807 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
808 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
809 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
812 return 0;
815 static int encode_gray_bitstream(HYuvContext *s, int count){
816 int i;
818 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
819 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
820 return -1;
823 #define LOAD2\
824 int y0 = s->temp[0][2*i];\
825 int y1 = s->temp[0][2*i+1];
826 #define STAT2\
827 s->stats[0][y0]++;\
828 s->stats[0][y1]++;
829 #define WRITE2\
830 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
831 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
833 count/=2;
834 if(s->flags&CODEC_FLAG_PASS1){
835 for(i=0; i<count; i++){
836 LOAD2;
837 STAT2;
840 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
841 return 0;
843 if(s->context){
844 for(i=0; i<count; i++){
845 LOAD2;
846 STAT2;
847 WRITE2;
849 }else{
850 for(i=0; i<count; i++){
851 LOAD2;
852 WRITE2;
855 return 0;
857 #endif /* defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER) */
859 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
860 int i;
861 for(i=0; i<count; i++){
862 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
863 if(code != -1){
864 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
865 }else if(decorrelate){
866 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
867 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
868 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
869 }else{
870 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
871 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
872 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
874 if(alpha)
875 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
879 static void decode_bgr_bitstream(HYuvContext *s, int count){
880 if(s->decorrelate){
881 if(s->bitstream_bpp==24)
882 decode_bgr_1(s, count, 1, 0);
883 else
884 decode_bgr_1(s, count, 1, 1);
885 }else{
886 if(s->bitstream_bpp==24)
887 decode_bgr_1(s, count, 0, 0);
888 else
889 decode_bgr_1(s, count, 0, 1);
893 static int encode_bgr_bitstream(HYuvContext *s, int count){
894 int i;
896 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
897 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
898 return -1;
901 #define LOAD3\
902 int g= s->temp[0][4*i+G];\
903 int b= (s->temp[0][4*i+B] - g) & 0xff;\
904 int r= (s->temp[0][4*i+R] - g) & 0xff;
905 #define STAT3\
906 s->stats[0][b]++;\
907 s->stats[1][g]++;\
908 s->stats[2][r]++;
909 #define WRITE3\
910 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
911 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
912 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
914 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
915 for(i=0; i<count; i++){
916 LOAD3;
917 STAT3;
919 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
920 for(i=0; i<count; i++){
921 LOAD3;
922 STAT3;
923 WRITE3;
925 }else{
926 for(i=0; i<count; i++){
927 LOAD3;
928 WRITE3;
931 return 0;
934 #if defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER)
935 static void draw_slice(HYuvContext *s, int y){
936 int h, cy;
937 int offset[4];
939 if(s->avctx->draw_horiz_band==NULL)
940 return;
942 h= y - s->last_slice_end;
943 y -= h;
945 if(s->bitstream_bpp==12){
946 cy= y>>1;
947 }else{
948 cy= y;
951 offset[0] = s->picture.linesize[0]*y;
952 offset[1] = s->picture.linesize[1]*cy;
953 offset[2] = s->picture.linesize[2]*cy;
954 offset[3] = 0;
955 emms_c();
957 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
959 s->last_slice_end= y + h;
962 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size){
963 HYuvContext *s = avctx->priv_data;
964 const int width= s->width;
965 const int width2= s->width>>1;
966 const int height= s->height;
967 int fake_ystride, fake_ustride, fake_vstride;
968 AVFrame * const p= &s->picture;
969 int table_size= 0;
971 AVFrame *picture = data;
973 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
975 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
977 if(p->data[0])
978 avctx->release_buffer(avctx, p);
980 p->reference= 0;
981 if(avctx->get_buffer(avctx, p) < 0){
982 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
983 return -1;
986 if(s->context){
987 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
988 if(table_size < 0)
989 return -1;
992 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
993 return -1;
995 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
997 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
998 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
999 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1001 s->last_slice_end= 0;
1003 if(s->bitstream_bpp<24){
1004 int y, cy;
1005 int lefty, leftu, leftv;
1006 int lefttopy, lefttopu, lefttopv;
1008 if(s->yuy2){
1009 p->data[0][3]= get_bits(&s->gb, 8);
1010 p->data[0][2]= get_bits(&s->gb, 8);
1011 p->data[0][1]= get_bits(&s->gb, 8);
1012 p->data[0][0]= get_bits(&s->gb, 8);
1014 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1015 return -1;
1016 }else{
1018 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1019 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1020 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1021 p->data[0][0]= get_bits(&s->gb, 8);
1023 switch(s->predictor){
1024 case LEFT:
1025 case PLANE:
1026 decode_422_bitstream(s, width-2);
1027 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1028 if(!(s->flags&CODEC_FLAG_GRAY)){
1029 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1030 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1033 for(cy=y=1; y<s->height; y++,cy++){
1034 uint8_t *ydst, *udst, *vdst;
1036 if(s->bitstream_bpp==12){
1037 decode_gray_bitstream(s, width);
1039 ydst= p->data[0] + p->linesize[0]*y;
1041 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1042 if(s->predictor == PLANE){
1043 if(y>s->interlaced)
1044 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1046 y++;
1047 if(y>=s->height) break;
1050 draw_slice(s, y);
1052 ydst= p->data[0] + p->linesize[0]*y;
1053 udst= p->data[1] + p->linesize[1]*cy;
1054 vdst= p->data[2] + p->linesize[2]*cy;
1056 decode_422_bitstream(s, width);
1057 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1058 if(!(s->flags&CODEC_FLAG_GRAY)){
1059 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1060 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1062 if(s->predictor == PLANE){
1063 if(cy>s->interlaced){
1064 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1065 if(!(s->flags&CODEC_FLAG_GRAY)){
1066 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1067 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1072 draw_slice(s, height);
1074 break;
1075 case MEDIAN:
1076 /* first line except first 2 pixels is left predicted */
1077 decode_422_bitstream(s, width-2);
1078 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1079 if(!(s->flags&CODEC_FLAG_GRAY)){
1080 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1081 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1084 cy=y=1;
1086 /* second line is left predicted for interlaced case */
1087 if(s->interlaced){
1088 decode_422_bitstream(s, width);
1089 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1090 if(!(s->flags&CODEC_FLAG_GRAY)){
1091 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1092 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1094 y++; cy++;
1097 /* next 4 pixels are left predicted too */
1098 decode_422_bitstream(s, 4);
1099 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1100 if(!(s->flags&CODEC_FLAG_GRAY)){
1101 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1102 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1105 /* next line except the first 4 pixels is median predicted */
1106 lefttopy= p->data[0][3];
1107 decode_422_bitstream(s, width-4);
1108 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1109 if(!(s->flags&CODEC_FLAG_GRAY)){
1110 lefttopu= p->data[1][1];
1111 lefttopv= p->data[2][1];
1112 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1113 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1115 y++; cy++;
1117 for(; y<height; y++,cy++){
1118 uint8_t *ydst, *udst, *vdst;
1120 if(s->bitstream_bpp==12){
1121 while(2*cy > y){
1122 decode_gray_bitstream(s, width);
1123 ydst= p->data[0] + p->linesize[0]*y;
1124 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1125 y++;
1127 if(y>=height) break;
1129 draw_slice(s, y);
1131 decode_422_bitstream(s, width);
1133 ydst= p->data[0] + p->linesize[0]*y;
1134 udst= p->data[1] + p->linesize[1]*cy;
1135 vdst= p->data[2] + p->linesize[2]*cy;
1137 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1138 if(!(s->flags&CODEC_FLAG_GRAY)){
1139 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1140 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1144 draw_slice(s, height);
1145 break;
1148 }else{
1149 int y;
1150 int leftr, leftg, leftb;
1151 const int last_line= (height-1)*p->linesize[0];
1153 if(s->bitstream_bpp==32){
1154 skip_bits(&s->gb, 8);
1155 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1156 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1157 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1158 }else{
1159 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1160 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1161 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1162 skip_bits(&s->gb, 8);
1165 if(s->bgr32){
1166 switch(s->predictor){
1167 case LEFT:
1168 case PLANE:
1169 decode_bgr_bitstream(s, width-1);
1170 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1172 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1173 decode_bgr_bitstream(s, width);
1175 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1176 if(s->predictor == PLANE){
1177 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1178 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1179 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1183 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1184 break;
1185 default:
1186 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1188 }else{
1190 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1191 return -1;
1194 emms_c();
1196 *picture= *p;
1197 *data_size = sizeof(AVFrame);
1199 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1201 #endif /* defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER) */
1203 static int common_end(HYuvContext *s){
1204 int i;
1206 for(i=0; i<3; i++){
1207 av_freep(&s->temp[i]);
1209 return 0;
1212 #if defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER)
1213 static av_cold int decode_end(AVCodecContext *avctx)
1215 HYuvContext *s = avctx->priv_data;
1216 int i;
1218 common_end(s);
1219 av_freep(&s->bitstream_buffer);
1221 for(i=0; i<6; i++){
1222 free_vlc(&s->vlc[i]);
1225 return 0;
1227 #endif /* defined(CONFIG_HUFFYUV_DECODER) || defined(CONFIG_FFVHUFF_DECODER) */
1229 #if defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER)
1230 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1231 HYuvContext *s = avctx->priv_data;
1232 AVFrame *pict = data;
1233 const int width= s->width;
1234 const int width2= s->width>>1;
1235 const int height= s->height;
1236 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1237 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1238 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1239 AVFrame * const p= &s->picture;
1240 int i, j, size=0;
1242 *p = *pict;
1243 p->pict_type= FF_I_TYPE;
1244 p->key_frame= 1;
1246 if(s->context){
1247 for(i=0; i<3; i++){
1248 generate_len_table(s->len[i], s->stats[i], 256);
1249 if(generate_bits_table(s->bits[i], s->len[i])<0)
1250 return -1;
1251 size+= store_table(s, s->len[i], &buf[size]);
1254 for(i=0; i<3; i++)
1255 for(j=0; j<256; j++)
1256 s->stats[i][j] >>= 1;
1259 init_put_bits(&s->pb, buf+size, buf_size-size);
1261 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1262 int lefty, leftu, leftv, y, cy;
1264 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1265 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1266 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1267 put_bits(&s->pb, 8, p->data[0][0]);
1269 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1270 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1271 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1273 encode_422_bitstream(s, width-2);
1275 if(s->predictor==MEDIAN){
1276 int lefttopy, lefttopu, lefttopv;
1277 cy=y=1;
1278 if(s->interlaced){
1279 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1280 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1281 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1283 encode_422_bitstream(s, width);
1284 y++; cy++;
1287 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1288 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1289 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1291 encode_422_bitstream(s, 4);
1293 lefttopy= p->data[0][3];
1294 lefttopu= p->data[1][1];
1295 lefttopv= p->data[2][1];
1296 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1297 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1298 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1299 encode_422_bitstream(s, width-4);
1300 y++; cy++;
1302 for(; y<height; y++,cy++){
1303 uint8_t *ydst, *udst, *vdst;
1305 if(s->bitstream_bpp==12){
1306 while(2*cy > y){
1307 ydst= p->data[0] + p->linesize[0]*y;
1308 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1309 encode_gray_bitstream(s, width);
1310 y++;
1312 if(y>=height) break;
1314 ydst= p->data[0] + p->linesize[0]*y;
1315 udst= p->data[1] + p->linesize[1]*cy;
1316 vdst= p->data[2] + p->linesize[2]*cy;
1318 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1319 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1320 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1322 encode_422_bitstream(s, width);
1324 }else{
1325 for(cy=y=1; y<height; y++,cy++){
1326 uint8_t *ydst, *udst, *vdst;
1328 /* encode a luma only line & y++ */
1329 if(s->bitstream_bpp==12){
1330 ydst= p->data[0] + p->linesize[0]*y;
1332 if(s->predictor == PLANE && s->interlaced < y){
1333 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1335 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1336 }else{
1337 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1339 encode_gray_bitstream(s, width);
1340 y++;
1341 if(y>=height) break;
1344 ydst= p->data[0] + p->linesize[0]*y;
1345 udst= p->data[1] + p->linesize[1]*cy;
1346 vdst= p->data[2] + p->linesize[2]*cy;
1348 if(s->predictor == PLANE && s->interlaced < cy){
1349 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1350 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1351 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1353 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1354 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1355 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1356 }else{
1357 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1358 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1359 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1362 encode_422_bitstream(s, width);
1365 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1366 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1367 const int stride = -p->linesize[0];
1368 const int fake_stride = -fake_ystride;
1369 int y;
1370 int leftr, leftg, leftb;
1372 put_bits(&s->pb, 8, leftr= data[R]);
1373 put_bits(&s->pb, 8, leftg= data[G]);
1374 put_bits(&s->pb, 8, leftb= data[B]);
1375 put_bits(&s->pb, 8, 0);
1377 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1378 encode_bgr_bitstream(s, width-1);
1380 for(y=1; y<s->height; y++){
1381 uint8_t *dst = data + y*stride;
1382 if(s->predictor == PLANE && s->interlaced < y){
1383 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1384 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1385 }else{
1386 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1388 encode_bgr_bitstream(s, width);
1390 }else{
1391 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1393 emms_c();
1395 size+= (put_bits_count(&s->pb)+31)/8;
1396 size/= 4;
1398 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1399 int j;
1400 char *p= avctx->stats_out;
1401 char *end= p + 1024*30;
1402 for(i=0; i<3; i++){
1403 for(j=0; j<256; j++){
1404 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1405 p+= strlen(p);
1406 s->stats[i][j]= 0;
1408 snprintf(p, end-p, "\n");
1409 p++;
1411 } else
1412 avctx->stats_out[0] = '\0';
1413 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1414 flush_put_bits(&s->pb);
1415 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1418 s->picture_number++;
1420 return size*4;
1423 static av_cold int encode_end(AVCodecContext *avctx)
1425 HYuvContext *s = avctx->priv_data;
1427 common_end(s);
1429 av_freep(&avctx->extradata);
1430 av_freep(&avctx->stats_out);
1432 return 0;
1434 #endif /* defined(CONFIG_HUFFYUV_ENCODER) || defined(CONFIG_FFVHUFF_ENCODER) */
1436 #ifdef CONFIG_HUFFYUV_DECODER
1437 AVCodec huffyuv_decoder = {
1438 "huffyuv",
1439 CODEC_TYPE_VIDEO,
1440 CODEC_ID_HUFFYUV,
1441 sizeof(HYuvContext),
1442 decode_init,
1443 NULL,
1444 decode_end,
1445 decode_frame,
1446 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1447 NULL,
1448 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1450 #endif
1452 #ifdef CONFIG_FFVHUFF_DECODER
1453 AVCodec ffvhuff_decoder = {
1454 "ffvhuff",
1455 CODEC_TYPE_VIDEO,
1456 CODEC_ID_FFVHUFF,
1457 sizeof(HYuvContext),
1458 decode_init,
1459 NULL,
1460 decode_end,
1461 decode_frame,
1462 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1463 NULL,
1464 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1466 #endif
1468 #ifdef CONFIG_HUFFYUV_ENCODER
1469 AVCodec huffyuv_encoder = {
1470 "huffyuv",
1471 CODEC_TYPE_VIDEO,
1472 CODEC_ID_HUFFYUV,
1473 sizeof(HYuvContext),
1474 encode_init,
1475 encode_frame,
1476 encode_end,
1477 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1478 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1480 #endif
1482 #ifdef CONFIG_FFVHUFF_ENCODER
1483 AVCodec ffvhuff_encoder = {
1484 "ffvhuff",
1485 CODEC_TYPE_VIDEO,
1486 CODEC_ID_FFVHUFF,
1487 sizeof(HYuvContext),
1488 encode_init,
1489 encode_frame,
1490 encode_end,
1491 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1492 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1494 #endif