10l: don't check against current layout until after validating ch_mode.
[FFMpeg-mirror/lagarith.git] / libavcodec / huffyuv.c
blob51acf0570d5085601a6c84baa28003f3effd60eb
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "bitstream.h"
33 #include "dsputil.h"
35 #define VLC_BITS 11
37 #ifdef WORDS_BIGENDIAN
38 #define B 3
39 #define G 2
40 #define R 1
41 #else
42 #define B 0
43 #define G 1
44 #define R 2
45 #endif
47 typedef enum Predictor{
48 LEFT= 0,
49 PLANE,
50 MEDIAN,
51 } Predictor;
53 typedef struct HYuvContext{
54 AVCodecContext *avctx;
55 Predictor predictor;
56 GetBitContext gb;
57 PutBitContext pb;
58 int interlaced;
59 int decorrelate;
60 int bitstream_bpp;
61 int version;
62 int yuy2; //use yuy2 instead of 422P
63 int bgr32; //use bgr32 instead of bgr24
64 int width, height;
65 int flags;
66 int context;
67 int picture_number;
68 int last_slice_end;
69 uint8_t *temp[3];
70 uint64_t stats[3][256];
71 uint8_t len[3][256];
72 uint32_t bits[3][256];
73 uint32_t pix_bgr_map[1<<VLC_BITS];
74 VLC vlc[6]; //Y,U,V,YY,YU,YV
75 AVFrame picture;
76 uint8_t *bitstream_buffer;
77 unsigned int bitstream_buffer_size;
78 DSPContext dsp;
79 }HYuvContext;
81 static const unsigned char classic_shift_luma[] = {
82 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
83 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
84 69,68, 0
87 static const unsigned char classic_shift_chroma[] = {
88 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
89 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
90 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
93 static const unsigned char classic_add_luma[256] = {
94 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
95 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
96 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
97 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
98 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
99 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
100 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
101 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
102 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
103 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
104 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
105 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
106 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
107 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
108 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
109 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
112 static const unsigned char classic_add_chroma[256] = {
113 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
114 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
115 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
116 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
117 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
118 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
119 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
120 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
121 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
122 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
123 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
124 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
125 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
126 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
127 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
128 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
131 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
132 int i;
134 for(i=0; i<w-1; i++){
135 acc+= src[i];
136 dst[i]= acc;
137 i++;
138 acc+= src[i];
139 dst[i]= acc;
142 for(; i<w; i++){
143 acc+= src[i];
144 dst[i]= acc;
147 return acc;
150 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
151 int i;
152 int r,g,b;
153 r= *red;
154 g= *green;
155 b= *blue;
157 for(i=0; i<w; i++){
158 b+= src[4*i+B];
159 g+= src[4*i+G];
160 r+= src[4*i+R];
162 dst[4*i+B]= b;
163 dst[4*i+G]= g;
164 dst[4*i+R]= r;
167 *red= r;
168 *green= g;
169 *blue= b;
172 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
173 int i;
174 if(w<32){
175 for(i=0; i<w; i++){
176 const int temp= src[i];
177 dst[i]= temp - left;
178 left= temp;
180 return left;
181 }else{
182 for(i=0; i<16; i++){
183 const int temp= src[i];
184 dst[i]= temp - left;
185 left= temp;
187 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
188 return src[w-1];
192 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
193 int i;
194 int r,g,b;
195 r= *red;
196 g= *green;
197 b= *blue;
198 for(i=0; i<FFMIN(w,4); i++){
199 const int rt= src[i*4+R];
200 const int gt= src[i*4+G];
201 const int bt= src[i*4+B];
202 dst[i*4+R]= rt - r;
203 dst[i*4+G]= gt - g;
204 dst[i*4+B]= bt - b;
205 r = rt;
206 g = gt;
207 b = bt;
209 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
210 *red= src[(w-1)*4+R];
211 *green= src[(w-1)*4+G];
212 *blue= src[(w-1)*4+B];
215 static void read_len_table(uint8_t *dst, GetBitContext *gb){
216 int i, val, repeat;
218 for(i=0; i<256;){
219 repeat= get_bits(gb, 3);
220 val = get_bits(gb, 5);
221 if(repeat==0)
222 repeat= get_bits(gb, 8);
223 //printf("%d %d\n", val, repeat);
224 while (repeat--)
225 dst[i++] = val;
229 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
230 int len, index;
231 uint32_t bits=0;
233 for(len=32; len>0; len--){
234 for(index=0; index<256; index++){
235 if(len_table[index]==len)
236 dst[index]= bits++;
238 if(bits & 1){
239 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
240 return -1;
242 bits >>= 1;
244 return 0;
247 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
248 typedef struct {
249 uint64_t val;
250 int name;
251 } HeapElem;
253 static void heap_sift(HeapElem *h, int root, int size)
255 while(root*2+1 < size) {
256 int child = root*2+1;
257 if(child < size-1 && h[child].val > h[child+1].val)
258 child++;
259 if(h[root].val > h[child].val) {
260 FFSWAP(HeapElem, h[root], h[child]);
261 root = child;
262 } else
263 break;
267 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
268 HeapElem h[size];
269 int up[2*size];
270 int len[2*size];
271 int offset, i, next;
273 for(offset=1; ; offset<<=1){
274 for(i=0; i<size; i++){
275 h[i].name = i;
276 h[i].val = (stats[i] << 8) + offset;
278 for(i=size/2-1; i>=0; i--)
279 heap_sift(h, i, size);
281 for(next=size; next<size*2-1; next++){
282 // merge the two smallest entries, and put it back in the heap
283 uint64_t min1v = h[0].val;
284 up[h[0].name] = next;
285 h[0].val = INT64_MAX;
286 heap_sift(h, 0, size);
287 up[h[0].name] = next;
288 h[0].name = next;
289 h[0].val += min1v;
290 heap_sift(h, 0, size);
293 len[2*size-2] = 0;
294 for(i=2*size-3; i>=size; i--)
295 len[i] = len[up[i]] + 1;
296 for(i=0; i<size; i++) {
297 dst[i] = len[up[i]] + 1;
298 if(dst[i] >= 32) break;
300 if(i==size) break;
303 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
305 static void generate_joint_tables(HYuvContext *s){
306 uint16_t symbols[1<<VLC_BITS];
307 uint16_t bits[1<<VLC_BITS];
308 uint8_t len[1<<VLC_BITS];
309 if(s->bitstream_bpp < 24){
310 int p, i, y, u;
311 for(p=0; p<3; p++){
312 for(i=y=0; y<256; y++){
313 int len0 = s->len[0][y];
314 int limit = VLC_BITS - len0;
315 if(limit <= 0)
316 continue;
317 for(u=0; u<256; u++){
318 int len1 = s->len[p][u];
319 if(len1 > limit)
320 continue;
321 len[i] = len0 + len1;
322 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
323 symbols[i] = (y<<8) + u;
324 if(symbols[i] != 0xffff) // reserved to mean "invalid"
325 i++;
328 free_vlc(&s->vlc[3+p]);
329 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
331 }else{
332 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
333 int i, b, g, r, code;
334 int p0 = s->decorrelate;
335 int p1 = !s->decorrelate;
336 // restrict the range to +/-16 becaues that's pretty much guaranteed to
337 // cover all the combinations that fit in 11 bits total, and it doesn't
338 // matter if we miss a few rare codes.
339 for(i=0, g=-16; g<16; g++){
340 int len0 = s->len[p0][g&255];
341 int limit0 = VLC_BITS - len0;
342 if(limit0 < 2)
343 continue;
344 for(b=-16; b<16; b++){
345 int len1 = s->len[p1][b&255];
346 int limit1 = limit0 - len1;
347 if(limit1 < 1)
348 continue;
349 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
350 for(r=-16; r<16; r++){
351 int len2 = s->len[2][r&255];
352 if(len2 > limit1)
353 continue;
354 len[i] = len0 + len1 + len2;
355 bits[i] = (code << len2) + s->bits[2][r&255];
356 if(s->decorrelate){
357 map[i][G] = g;
358 map[i][B] = g+b;
359 map[i][R] = g+r;
360 }else{
361 map[i][B] = g;
362 map[i][G] = b;
363 map[i][R] = r;
365 i++;
369 free_vlc(&s->vlc[3]);
370 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
374 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
375 GetBitContext gb;
376 int i;
378 init_get_bits(&gb, src, length*8);
380 for(i=0; i<3; i++){
381 read_len_table(s->len[i], &gb);
383 if(generate_bits_table(s->bits[i], s->len[i])<0){
384 return -1;
386 #if 0
387 for(j=0; j<256; j++){
388 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
390 #endif
391 free_vlc(&s->vlc[i]);
392 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
395 generate_joint_tables(s);
397 return (get_bits_count(&gb)+7)/8;
400 static int read_old_huffman_tables(HYuvContext *s){
401 #if 1
402 GetBitContext gb;
403 int i;
405 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
406 read_len_table(s->len[0], &gb);
407 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
408 read_len_table(s->len[1], &gb);
410 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
411 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
413 if(s->bitstream_bpp >= 24){
414 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
415 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
417 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
418 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
420 for(i=0; i<3; i++){
421 free_vlc(&s->vlc[i]);
422 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
425 generate_joint_tables(s);
427 return 0;
428 #else
429 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
430 return -1;
431 #endif
434 static av_cold void alloc_temp(HYuvContext *s){
435 int i;
437 if(s->bitstream_bpp<24){
438 for(i=0; i<3; i++){
439 s->temp[i]= av_malloc(s->width + 16);
441 }else{
442 for(i=0; i<2; i++){
443 s->temp[i]= av_malloc(4*s->width + 16);
448 static av_cold int common_init(AVCodecContext *avctx){
449 HYuvContext *s = avctx->priv_data;
451 s->avctx= avctx;
452 s->flags= avctx->flags;
454 dsputil_init(&s->dsp, avctx);
456 s->width= avctx->width;
457 s->height= avctx->height;
458 assert(s->width>0 && s->height>0);
460 return 0;
463 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
464 static av_cold int decode_init(AVCodecContext *avctx)
466 HYuvContext *s = avctx->priv_data;
468 common_init(avctx);
469 memset(s->vlc, 0, 3*sizeof(VLC));
471 avctx->coded_frame= &s->picture;
472 s->interlaced= s->height > 288;
474 s->bgr32=1;
475 //if(avctx->extradata)
476 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
477 if(avctx->extradata_size){
478 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
479 s->version=1; // do such files exist at all?
480 else
481 s->version=2;
482 }else
483 s->version=0;
485 if(s->version==2){
486 int method, interlace;
488 method= ((uint8_t*)avctx->extradata)[0];
489 s->decorrelate= method&64 ? 1 : 0;
490 s->predictor= method&63;
491 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
492 if(s->bitstream_bpp==0)
493 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
494 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
495 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
496 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
498 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
499 return -1;
500 }else{
501 switch(avctx->bits_per_coded_sample&7){
502 case 1:
503 s->predictor= LEFT;
504 s->decorrelate= 0;
505 break;
506 case 2:
507 s->predictor= LEFT;
508 s->decorrelate= 1;
509 break;
510 case 3:
511 s->predictor= PLANE;
512 s->decorrelate= avctx->bits_per_coded_sample >= 24;
513 break;
514 case 4:
515 s->predictor= MEDIAN;
516 s->decorrelate= 0;
517 break;
518 default:
519 s->predictor= LEFT; //OLD
520 s->decorrelate= 0;
521 break;
523 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
524 s->context= 0;
526 if(read_old_huffman_tables(s) < 0)
527 return -1;
530 switch(s->bitstream_bpp){
531 case 12:
532 avctx->pix_fmt = PIX_FMT_YUV420P;
533 break;
534 case 16:
535 if(s->yuy2){
536 avctx->pix_fmt = PIX_FMT_YUYV422;
537 }else{
538 avctx->pix_fmt = PIX_FMT_YUV422P;
540 break;
541 case 24:
542 case 32:
543 if(s->bgr32){
544 avctx->pix_fmt = PIX_FMT_RGB32;
545 }else{
546 avctx->pix_fmt = PIX_FMT_BGR24;
548 break;
549 default:
550 assert(0);
553 alloc_temp(s);
555 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
557 return 0;
559 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
561 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
562 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
563 int i;
564 int index= 0;
566 for(i=0; i<256;){
567 int val= len[i];
568 int repeat=0;
570 for(; i<256 && len[i]==val && repeat<255; i++)
571 repeat++;
573 assert(val < 32 && val >0 && repeat<256 && repeat>0);
574 if(repeat>7){
575 buf[index++]= val;
576 buf[index++]= repeat;
577 }else{
578 buf[index++]= val | (repeat<<5);
582 return index;
585 static av_cold int encode_init(AVCodecContext *avctx)
587 HYuvContext *s = avctx->priv_data;
588 int i, j;
590 common_init(avctx);
592 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
593 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
594 s->version=2;
596 avctx->coded_frame= &s->picture;
598 switch(avctx->pix_fmt){
599 case PIX_FMT_YUV420P:
600 s->bitstream_bpp= 12;
601 break;
602 case PIX_FMT_YUV422P:
603 s->bitstream_bpp= 16;
604 break;
605 case PIX_FMT_RGB32:
606 s->bitstream_bpp= 24;
607 break;
608 default:
609 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
610 return -1;
612 avctx->bits_per_coded_sample= s->bitstream_bpp;
613 s->decorrelate= s->bitstream_bpp >= 24;
614 s->predictor= avctx->prediction_method;
615 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
616 if(avctx->context_model==1){
617 s->context= avctx->context_model;
618 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
619 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
620 return -1;
622 }else s->context= 0;
624 if(avctx->codec->id==CODEC_ID_HUFFYUV){
625 if(avctx->pix_fmt==PIX_FMT_YUV420P){
626 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
627 return -1;
629 if(avctx->context_model){
630 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
631 return -1;
633 if(s->interlaced != ( s->height > 288 ))
634 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
637 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
638 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
639 return -1;
642 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
643 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
644 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
645 if(s->context)
646 ((uint8_t*)avctx->extradata)[2]|= 0x40;
647 ((uint8_t*)avctx->extradata)[3]= 0;
648 s->avctx->extradata_size= 4;
650 if(avctx->stats_in){
651 char *p= avctx->stats_in;
653 for(i=0; i<3; i++)
654 for(j=0; j<256; j++)
655 s->stats[i][j]= 1;
657 for(;;){
658 for(i=0; i<3; i++){
659 char *next;
661 for(j=0; j<256; j++){
662 s->stats[i][j]+= strtol(p, &next, 0);
663 if(next==p) return -1;
664 p=next;
667 if(p[0]==0 || p[1]==0 || p[2]==0) break;
669 }else{
670 for(i=0; i<3; i++)
671 for(j=0; j<256; j++){
672 int d= FFMIN(j, 256-j);
674 s->stats[i][j]= 100000000/(d+1);
678 for(i=0; i<3; i++){
679 generate_len_table(s->len[i], s->stats[i], 256);
681 if(generate_bits_table(s->bits[i], s->len[i])<0){
682 return -1;
685 s->avctx->extradata_size+=
686 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
689 if(s->context){
690 for(i=0; i<3; i++){
691 int pels = s->width*s->height / (i?40:10);
692 for(j=0; j<256; j++){
693 int d= FFMIN(j, 256-j);
694 s->stats[i][j]= pels/(d+1);
697 }else{
698 for(i=0; i<3; i++)
699 for(j=0; j<256; j++)
700 s->stats[i][j]= 0;
703 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
705 alloc_temp(s);
707 s->picture_number=0;
709 return 0;
711 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
713 /* TODO instead of restarting the read when the code isn't in the first level
714 * of the joint table, jump into the 2nd level of the individual table. */
715 #define READ_2PIX(dst0, dst1, plane1){\
716 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
717 if(code != 0xffff){\
718 dst0 = code>>8;\
719 dst1 = code;\
720 }else{\
721 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
722 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
726 static void decode_422_bitstream(HYuvContext *s, int count){
727 int i;
729 count/=2;
731 for(i=0; i<count; i++){
732 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
733 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
737 static void decode_gray_bitstream(HYuvContext *s, int count){
738 int i;
740 count/=2;
742 for(i=0; i<count; i++){
743 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
747 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
748 static int encode_422_bitstream(HYuvContext *s, int count){
749 int i;
751 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
752 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
753 return -1;
756 #define LOAD4\
757 int y0 = s->temp[0][2*i];\
758 int y1 = s->temp[0][2*i+1];\
759 int u0 = s->temp[1][i];\
760 int v0 = s->temp[2][i];
762 count/=2;
763 if(s->flags&CODEC_FLAG_PASS1){
764 for(i=0; i<count; i++){
765 LOAD4;
766 s->stats[0][y0]++;
767 s->stats[1][u0]++;
768 s->stats[0][y1]++;
769 s->stats[2][v0]++;
772 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
773 return 0;
774 if(s->context){
775 for(i=0; i<count; i++){
776 LOAD4;
777 s->stats[0][y0]++;
778 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
779 s->stats[1][u0]++;
780 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
781 s->stats[0][y1]++;
782 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
783 s->stats[2][v0]++;
784 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
786 }else{
787 for(i=0; i<count; i++){
788 LOAD4;
789 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
790 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
791 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
792 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
795 return 0;
798 static int encode_gray_bitstream(HYuvContext *s, int count){
799 int i;
801 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
802 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
803 return -1;
806 #define LOAD2\
807 int y0 = s->temp[0][2*i];\
808 int y1 = s->temp[0][2*i+1];
809 #define STAT2\
810 s->stats[0][y0]++;\
811 s->stats[0][y1]++;
812 #define WRITE2\
813 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
814 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
816 count/=2;
817 if(s->flags&CODEC_FLAG_PASS1){
818 for(i=0; i<count; i++){
819 LOAD2;
820 STAT2;
823 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
824 return 0;
826 if(s->context){
827 for(i=0; i<count; i++){
828 LOAD2;
829 STAT2;
830 WRITE2;
832 }else{
833 for(i=0; i<count; i++){
834 LOAD2;
835 WRITE2;
838 return 0;
840 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
842 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
843 int i;
844 for(i=0; i<count; i++){
845 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
846 if(code != -1){
847 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
848 }else if(decorrelate){
849 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
850 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
851 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852 }else{
853 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
854 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
855 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
857 if(alpha)
858 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
862 static void decode_bgr_bitstream(HYuvContext *s, int count){
863 if(s->decorrelate){
864 if(s->bitstream_bpp==24)
865 decode_bgr_1(s, count, 1, 0);
866 else
867 decode_bgr_1(s, count, 1, 1);
868 }else{
869 if(s->bitstream_bpp==24)
870 decode_bgr_1(s, count, 0, 0);
871 else
872 decode_bgr_1(s, count, 0, 1);
876 static int encode_bgr_bitstream(HYuvContext *s, int count){
877 int i;
879 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
880 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
881 return -1;
884 #define LOAD3\
885 int g= s->temp[0][4*i+G];\
886 int b= (s->temp[0][4*i+B] - g) & 0xff;\
887 int r= (s->temp[0][4*i+R] - g) & 0xff;
888 #define STAT3\
889 s->stats[0][b]++;\
890 s->stats[1][g]++;\
891 s->stats[2][r]++;
892 #define WRITE3\
893 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
894 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
895 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
897 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
898 for(i=0; i<count; i++){
899 LOAD3;
900 STAT3;
902 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
903 for(i=0; i<count; i++){
904 LOAD3;
905 STAT3;
906 WRITE3;
908 }else{
909 for(i=0; i<count; i++){
910 LOAD3;
911 WRITE3;
914 return 0;
917 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
918 static void draw_slice(HYuvContext *s, int y){
919 int h, cy;
920 int offset[4];
922 if(s->avctx->draw_horiz_band==NULL)
923 return;
925 h= y - s->last_slice_end;
926 y -= h;
928 if(s->bitstream_bpp==12){
929 cy= y>>1;
930 }else{
931 cy= y;
934 offset[0] = s->picture.linesize[0]*y;
935 offset[1] = s->picture.linesize[1]*cy;
936 offset[2] = s->picture.linesize[2]*cy;
937 offset[3] = 0;
938 emms_c();
940 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
942 s->last_slice_end= y + h;
945 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size){
946 HYuvContext *s = avctx->priv_data;
947 const int width= s->width;
948 const int width2= s->width>>1;
949 const int height= s->height;
950 int fake_ystride, fake_ustride, fake_vstride;
951 AVFrame * const p= &s->picture;
952 int table_size= 0;
954 AVFrame *picture = data;
956 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
958 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
960 if(p->data[0])
961 avctx->release_buffer(avctx, p);
963 p->reference= 0;
964 if(avctx->get_buffer(avctx, p) < 0){
965 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
966 return -1;
969 if(s->context){
970 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
971 if(table_size < 0)
972 return -1;
975 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
976 return -1;
978 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
980 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
981 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
982 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
984 s->last_slice_end= 0;
986 if(s->bitstream_bpp<24){
987 int y, cy;
988 int lefty, leftu, leftv;
989 int lefttopy, lefttopu, lefttopv;
991 if(s->yuy2){
992 p->data[0][3]= get_bits(&s->gb, 8);
993 p->data[0][2]= get_bits(&s->gb, 8);
994 p->data[0][1]= get_bits(&s->gb, 8);
995 p->data[0][0]= get_bits(&s->gb, 8);
997 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
998 return -1;
999 }else{
1001 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1002 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1003 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1004 p->data[0][0]= get_bits(&s->gb, 8);
1006 switch(s->predictor){
1007 case LEFT:
1008 case PLANE:
1009 decode_422_bitstream(s, width-2);
1010 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1011 if(!(s->flags&CODEC_FLAG_GRAY)){
1012 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1013 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1016 for(cy=y=1; y<s->height; y++,cy++){
1017 uint8_t *ydst, *udst, *vdst;
1019 if(s->bitstream_bpp==12){
1020 decode_gray_bitstream(s, width);
1022 ydst= p->data[0] + p->linesize[0]*y;
1024 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1025 if(s->predictor == PLANE){
1026 if(y>s->interlaced)
1027 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1029 y++;
1030 if(y>=s->height) break;
1033 draw_slice(s, y);
1035 ydst= p->data[0] + p->linesize[0]*y;
1036 udst= p->data[1] + p->linesize[1]*cy;
1037 vdst= p->data[2] + p->linesize[2]*cy;
1039 decode_422_bitstream(s, width);
1040 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1041 if(!(s->flags&CODEC_FLAG_GRAY)){
1042 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1043 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1045 if(s->predictor == PLANE){
1046 if(cy>s->interlaced){
1047 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1048 if(!(s->flags&CODEC_FLAG_GRAY)){
1049 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1050 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1055 draw_slice(s, height);
1057 break;
1058 case MEDIAN:
1059 /* first line except first 2 pixels is left predicted */
1060 decode_422_bitstream(s, width-2);
1061 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1062 if(!(s->flags&CODEC_FLAG_GRAY)){
1063 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1064 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1067 cy=y=1;
1069 /* second line is left predicted for interlaced case */
1070 if(s->interlaced){
1071 decode_422_bitstream(s, width);
1072 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1073 if(!(s->flags&CODEC_FLAG_GRAY)){
1074 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1075 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1077 y++; cy++;
1080 /* next 4 pixels are left predicted too */
1081 decode_422_bitstream(s, 4);
1082 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1083 if(!(s->flags&CODEC_FLAG_GRAY)){
1084 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1085 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1088 /* next line except the first 4 pixels is median predicted */
1089 lefttopy= p->data[0][3];
1090 decode_422_bitstream(s, width-4);
1091 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1092 if(!(s->flags&CODEC_FLAG_GRAY)){
1093 lefttopu= p->data[1][1];
1094 lefttopv= p->data[2][1];
1095 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1096 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1098 y++; cy++;
1100 for(; y<height; y++,cy++){
1101 uint8_t *ydst, *udst, *vdst;
1103 if(s->bitstream_bpp==12){
1104 while(2*cy > y){
1105 decode_gray_bitstream(s, width);
1106 ydst= p->data[0] + p->linesize[0]*y;
1107 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1108 y++;
1110 if(y>=height) break;
1112 draw_slice(s, y);
1114 decode_422_bitstream(s, width);
1116 ydst= p->data[0] + p->linesize[0]*y;
1117 udst= p->data[1] + p->linesize[1]*cy;
1118 vdst= p->data[2] + p->linesize[2]*cy;
1120 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1121 if(!(s->flags&CODEC_FLAG_GRAY)){
1122 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1123 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1127 draw_slice(s, height);
1128 break;
1131 }else{
1132 int y;
1133 int leftr, leftg, leftb;
1134 const int last_line= (height-1)*p->linesize[0];
1136 if(s->bitstream_bpp==32){
1137 skip_bits(&s->gb, 8);
1138 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1139 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1140 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1141 }else{
1142 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1143 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1144 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1145 skip_bits(&s->gb, 8);
1148 if(s->bgr32){
1149 switch(s->predictor){
1150 case LEFT:
1151 case PLANE:
1152 decode_bgr_bitstream(s, width-1);
1153 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1155 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1156 decode_bgr_bitstream(s, width);
1158 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1159 if(s->predictor == PLANE){
1160 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1161 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1162 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1166 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1167 break;
1168 default:
1169 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1171 }else{
1173 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1174 return -1;
1177 emms_c();
1179 *picture= *p;
1180 *data_size = sizeof(AVFrame);
1182 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1184 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1186 static int common_end(HYuvContext *s){
1187 int i;
1189 for(i=0; i<3; i++){
1190 av_freep(&s->temp[i]);
1192 return 0;
1195 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1196 static av_cold int decode_end(AVCodecContext *avctx)
1198 HYuvContext *s = avctx->priv_data;
1199 int i;
1201 common_end(s);
1202 av_freep(&s->bitstream_buffer);
1204 for(i=0; i<6; i++){
1205 free_vlc(&s->vlc[i]);
1208 return 0;
1210 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1212 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1213 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1214 HYuvContext *s = avctx->priv_data;
1215 AVFrame *pict = data;
1216 const int width= s->width;
1217 const int width2= s->width>>1;
1218 const int height= s->height;
1219 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1220 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1221 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1222 AVFrame * const p= &s->picture;
1223 int i, j, size=0;
1225 *p = *pict;
1226 p->pict_type= FF_I_TYPE;
1227 p->key_frame= 1;
1229 if(s->context){
1230 for(i=0; i<3; i++){
1231 generate_len_table(s->len[i], s->stats[i], 256);
1232 if(generate_bits_table(s->bits[i], s->len[i])<0)
1233 return -1;
1234 size+= store_table(s, s->len[i], &buf[size]);
1237 for(i=0; i<3; i++)
1238 for(j=0; j<256; j++)
1239 s->stats[i][j] >>= 1;
1242 init_put_bits(&s->pb, buf+size, buf_size-size);
1244 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1245 int lefty, leftu, leftv, y, cy;
1247 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1248 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1249 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1250 put_bits(&s->pb, 8, p->data[0][0]);
1252 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1253 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1254 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1256 encode_422_bitstream(s, width-2);
1258 if(s->predictor==MEDIAN){
1259 int lefttopy, lefttopu, lefttopv;
1260 cy=y=1;
1261 if(s->interlaced){
1262 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1263 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1264 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1266 encode_422_bitstream(s, width);
1267 y++; cy++;
1270 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1271 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1272 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1274 encode_422_bitstream(s, 4);
1276 lefttopy= p->data[0][3];
1277 lefttopu= p->data[1][1];
1278 lefttopv= p->data[2][1];
1279 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1280 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1281 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1282 encode_422_bitstream(s, width-4);
1283 y++; cy++;
1285 for(; y<height; y++,cy++){
1286 uint8_t *ydst, *udst, *vdst;
1288 if(s->bitstream_bpp==12){
1289 while(2*cy > y){
1290 ydst= p->data[0] + p->linesize[0]*y;
1291 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1292 encode_gray_bitstream(s, width);
1293 y++;
1295 if(y>=height) break;
1297 ydst= p->data[0] + p->linesize[0]*y;
1298 udst= p->data[1] + p->linesize[1]*cy;
1299 vdst= p->data[2] + p->linesize[2]*cy;
1301 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1302 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1303 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1305 encode_422_bitstream(s, width);
1307 }else{
1308 for(cy=y=1; y<height; y++,cy++){
1309 uint8_t *ydst, *udst, *vdst;
1311 /* encode a luma only line & y++ */
1312 if(s->bitstream_bpp==12){
1313 ydst= p->data[0] + p->linesize[0]*y;
1315 if(s->predictor == PLANE && s->interlaced < y){
1316 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1318 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1319 }else{
1320 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1322 encode_gray_bitstream(s, width);
1323 y++;
1324 if(y>=height) break;
1327 ydst= p->data[0] + p->linesize[0]*y;
1328 udst= p->data[1] + p->linesize[1]*cy;
1329 vdst= p->data[2] + p->linesize[2]*cy;
1331 if(s->predictor == PLANE && s->interlaced < cy){
1332 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1333 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1334 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1336 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1337 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1338 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1339 }else{
1340 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1341 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1342 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1345 encode_422_bitstream(s, width);
1348 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1349 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1350 const int stride = -p->linesize[0];
1351 const int fake_stride = -fake_ystride;
1352 int y;
1353 int leftr, leftg, leftb;
1355 put_bits(&s->pb, 8, leftr= data[R]);
1356 put_bits(&s->pb, 8, leftg= data[G]);
1357 put_bits(&s->pb, 8, leftb= data[B]);
1358 put_bits(&s->pb, 8, 0);
1360 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1361 encode_bgr_bitstream(s, width-1);
1363 for(y=1; y<s->height; y++){
1364 uint8_t *dst = data + y*stride;
1365 if(s->predictor == PLANE && s->interlaced < y){
1366 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1367 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1368 }else{
1369 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1371 encode_bgr_bitstream(s, width);
1373 }else{
1374 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1376 emms_c();
1378 size+= (put_bits_count(&s->pb)+31)/8;
1379 size/= 4;
1381 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1382 int j;
1383 char *p= avctx->stats_out;
1384 char *end= p + 1024*30;
1385 for(i=0; i<3; i++){
1386 for(j=0; j<256; j++){
1387 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1388 p+= strlen(p);
1389 s->stats[i][j]= 0;
1391 snprintf(p, end-p, "\n");
1392 p++;
1394 } else
1395 avctx->stats_out[0] = '\0';
1396 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1397 flush_put_bits(&s->pb);
1398 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1401 s->picture_number++;
1403 return size*4;
1406 static av_cold int encode_end(AVCodecContext *avctx)
1408 HYuvContext *s = avctx->priv_data;
1410 common_end(s);
1412 av_freep(&avctx->extradata);
1413 av_freep(&avctx->stats_out);
1415 return 0;
1417 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1419 #if CONFIG_HUFFYUV_DECODER
1420 AVCodec huffyuv_decoder = {
1421 "huffyuv",
1422 CODEC_TYPE_VIDEO,
1423 CODEC_ID_HUFFYUV,
1424 sizeof(HYuvContext),
1425 decode_init,
1426 NULL,
1427 decode_end,
1428 decode_frame,
1429 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1430 NULL,
1431 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1433 #endif
1435 #if CONFIG_FFVHUFF_DECODER
1436 AVCodec ffvhuff_decoder = {
1437 "ffvhuff",
1438 CODEC_TYPE_VIDEO,
1439 CODEC_ID_FFVHUFF,
1440 sizeof(HYuvContext),
1441 decode_init,
1442 NULL,
1443 decode_end,
1444 decode_frame,
1445 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1446 NULL,
1447 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1449 #endif
1451 #if CONFIG_HUFFYUV_ENCODER
1452 AVCodec huffyuv_encoder = {
1453 "huffyuv",
1454 CODEC_TYPE_VIDEO,
1455 CODEC_ID_HUFFYUV,
1456 sizeof(HYuvContext),
1457 encode_init,
1458 encode_frame,
1459 encode_end,
1460 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1461 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1463 #endif
1465 #if CONFIG_FFVHUFF_ENCODER
1466 AVCodec ffvhuff_encoder = {
1467 "ffvhuff",
1468 CODEC_TYPE_VIDEO,
1469 CODEC_ID_FFVHUFF,
1470 sizeof(HYuvContext),
1471 encode_init,
1472 encode_frame,
1473 encode_end,
1474 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1475 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1477 #endif