Add channel layout support to the AC-3 encoder.
[FFMpeg-mirror/lagarith.git] / libavcodec / huffyuv.c
blob27de662804f80e2c6d6224e2de848411ae566210
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
36 #define VLC_BITS 11
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133 int i;
135 for(i=0; i<w-1; i++){
136 acc+= src[i];
137 dst[i]= acc;
138 i++;
139 acc+= src[i];
140 dst[i]= acc;
143 for(; i<w; i++){
144 acc+= src[i];
145 dst[i]= acc;
148 return acc;
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
152 int i;
153 int r,g,b;
154 r= *red;
155 g= *green;
156 b= *blue;
158 for(i=0; i<w; i++){
159 b+= src[4*i+B];
160 g+= src[4*i+G];
161 r+= src[4*i+R];
163 dst[4*i+B]= b;
164 dst[4*i+G]= g;
165 dst[4*i+R]= r;
168 *red= r;
169 *green= g;
170 *blue= b;
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
174 int i;
175 if(w<32){
176 for(i=0; i<w; i++){
177 const int temp= src[i];
178 dst[i]= temp - left;
179 left= temp;
181 return left;
182 }else{
183 for(i=0; i<16; i++){
184 const int temp= src[i];
185 dst[i]= temp - left;
186 left= temp;
188 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
189 return src[w-1];
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
194 int i;
195 int r,g,b;
196 r= *red;
197 g= *green;
198 b= *blue;
199 for(i=0; i<FFMIN(w,4); i++){
200 const int rt= src[i*4+R];
201 const int gt= src[i*4+G];
202 const int bt= src[i*4+B];
203 dst[i*4+R]= rt - r;
204 dst[i*4+G]= gt - g;
205 dst[i*4+B]= bt - b;
206 r = rt;
207 g = gt;
208 b = bt;
210 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211 *red= src[(w-1)*4+R];
212 *green= src[(w-1)*4+G];
213 *blue= src[(w-1)*4+B];
216 static void read_len_table(uint8_t *dst, GetBitContext *gb){
217 int i, val, repeat;
219 for(i=0; i<256;){
220 repeat= get_bits(gb, 3);
221 val = get_bits(gb, 5);
222 if(repeat==0)
223 repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
225 while (repeat--)
226 dst[i++] = val;
230 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
231 int len, index;
232 uint32_t bits=0;
234 for(len=32; len>0; len--){
235 for(index=0; index<256; index++){
236 if(len_table[index]==len)
237 dst[index]= bits++;
239 if(bits & 1){
240 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
241 return -1;
243 bits >>= 1;
245 return 0;
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 typedef struct {
250 uint64_t val;
251 int name;
252 } HeapElem;
254 static void heap_sift(HeapElem *h, int root, int size)
256 while(root*2+1 < size) {
257 int child = root*2+1;
258 if(child < size-1 && h[child].val > h[child+1].val)
259 child++;
260 if(h[root].val > h[child].val) {
261 FFSWAP(HeapElem, h[root], h[child]);
262 root = child;
263 } else
264 break;
268 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
269 HeapElem h[size];
270 int up[2*size];
271 int len[2*size];
272 int offset, i, next;
274 for(offset=1; ; offset<<=1){
275 for(i=0; i<size; i++){
276 h[i].name = i;
277 h[i].val = (stats[i] << 8) + offset;
279 for(i=size/2-1; i>=0; i--)
280 heap_sift(h, i, size);
282 for(next=size; next<size*2-1; next++){
283 // merge the two smallest entries, and put it back in the heap
284 uint64_t min1v = h[0].val;
285 up[h[0].name] = next;
286 h[0].val = INT64_MAX;
287 heap_sift(h, 0, size);
288 up[h[0].name] = next;
289 h[0].name = next;
290 h[0].val += min1v;
291 heap_sift(h, 0, size);
294 len[2*size-2] = 0;
295 for(i=2*size-3; i>=size; i--)
296 len[i] = len[up[i]] + 1;
297 for(i=0; i<size; i++) {
298 dst[i] = len[up[i]] + 1;
299 if(dst[i] >= 32) break;
301 if(i==size) break;
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
306 static void generate_joint_tables(HYuvContext *s){
307 uint16_t symbols[1<<VLC_BITS];
308 uint16_t bits[1<<VLC_BITS];
309 uint8_t len[1<<VLC_BITS];
310 if(s->bitstream_bpp < 24){
311 int p, i, y, u;
312 for(p=0; p<3; p++){
313 for(i=y=0; y<256; y++){
314 int len0 = s->len[0][y];
315 int limit = VLC_BITS - len0;
316 if(limit <= 0)
317 continue;
318 for(u=0; u<256; u++){
319 int len1 = s->len[p][u];
320 if(len1 > limit)
321 continue;
322 len[i] = len0 + len1;
323 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324 symbols[i] = (y<<8) + u;
325 if(symbols[i] != 0xffff) // reserved to mean "invalid"
326 i++;
329 free_vlc(&s->vlc[3+p]);
330 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
332 }else{
333 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334 int i, b, g, r, code;
335 int p0 = s->decorrelate;
336 int p1 = !s->decorrelate;
337 // restrict the range to +/-16 becaues that's pretty much guaranteed to
338 // cover all the combinations that fit in 11 bits total, and it doesn't
339 // matter if we miss a few rare codes.
340 for(i=0, g=-16; g<16; g++){
341 int len0 = s->len[p0][g&255];
342 int limit0 = VLC_BITS - len0;
343 if(limit0 < 2)
344 continue;
345 for(b=-16; b<16; b++){
346 int len1 = s->len[p1][b&255];
347 int limit1 = limit0 - len1;
348 if(limit1 < 1)
349 continue;
350 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351 for(r=-16; r<16; r++){
352 int len2 = s->len[2][r&255];
353 if(len2 > limit1)
354 continue;
355 len[i] = len0 + len1 + len2;
356 bits[i] = (code << len2) + s->bits[2][r&255];
357 if(s->decorrelate){
358 map[i][G] = g;
359 map[i][B] = g+b;
360 map[i][R] = g+r;
361 }else{
362 map[i][B] = g;
363 map[i][G] = b;
364 map[i][R] = r;
366 i++;
370 free_vlc(&s->vlc[3]);
371 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
375 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
376 GetBitContext gb;
377 int i;
379 init_get_bits(&gb, src, length*8);
381 for(i=0; i<3; i++){
382 read_len_table(s->len[i], &gb);
384 if(generate_bits_table(s->bits[i], s->len[i])<0){
385 return -1;
387 #if 0
388 for(j=0; j<256; j++){
389 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
391 #endif
392 free_vlc(&s->vlc[i]);
393 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
396 generate_joint_tables(s);
398 return (get_bits_count(&gb)+7)/8;
401 static int read_old_huffman_tables(HYuvContext *s){
402 #if 1
403 GetBitContext gb;
404 int i;
406 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
407 read_len_table(s->len[0], &gb);
408 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
409 read_len_table(s->len[1], &gb);
411 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
412 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
414 if(s->bitstream_bpp >= 24){
415 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
416 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
418 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
419 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
421 for(i=0; i<3; i++){
422 free_vlc(&s->vlc[i]);
423 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
426 generate_joint_tables(s);
428 return 0;
429 #else
430 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
431 return -1;
432 #endif
435 static av_cold void alloc_temp(HYuvContext *s){
436 int i;
438 if(s->bitstream_bpp<24){
439 for(i=0; i<3; i++){
440 s->temp[i]= av_malloc(s->width + 16);
442 }else{
443 for(i=0; i<2; i++){
444 s->temp[i]= av_malloc(4*s->width + 16);
449 static av_cold int common_init(AVCodecContext *avctx){
450 HYuvContext *s = avctx->priv_data;
452 s->avctx= avctx;
453 s->flags= avctx->flags;
455 dsputil_init(&s->dsp, avctx);
457 s->width= avctx->width;
458 s->height= avctx->height;
459 assert(s->width>0 && s->height>0);
461 return 0;
464 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
465 static av_cold int decode_init(AVCodecContext *avctx)
467 HYuvContext *s = avctx->priv_data;
469 common_init(avctx);
470 memset(s->vlc, 0, 3*sizeof(VLC));
472 avctx->coded_frame= &s->picture;
473 s->interlaced= s->height > 288;
475 s->bgr32=1;
476 //if(avctx->extradata)
477 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
478 if(avctx->extradata_size){
479 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
480 s->version=1; // do such files exist at all?
481 else
482 s->version=2;
483 }else
484 s->version=0;
486 if(s->version==2){
487 int method, interlace;
489 method= ((uint8_t*)avctx->extradata)[0];
490 s->decorrelate= method&64 ? 1 : 0;
491 s->predictor= method&63;
492 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
493 if(s->bitstream_bpp==0)
494 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
495 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
496 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
497 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
499 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
500 return -1;
501 }else{
502 switch(avctx->bits_per_coded_sample&7){
503 case 1:
504 s->predictor= LEFT;
505 s->decorrelate= 0;
506 break;
507 case 2:
508 s->predictor= LEFT;
509 s->decorrelate= 1;
510 break;
511 case 3:
512 s->predictor= PLANE;
513 s->decorrelate= avctx->bits_per_coded_sample >= 24;
514 break;
515 case 4:
516 s->predictor= MEDIAN;
517 s->decorrelate= 0;
518 break;
519 default:
520 s->predictor= LEFT; //OLD
521 s->decorrelate= 0;
522 break;
524 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
525 s->context= 0;
527 if(read_old_huffman_tables(s) < 0)
528 return -1;
531 switch(s->bitstream_bpp){
532 case 12:
533 avctx->pix_fmt = PIX_FMT_YUV420P;
534 break;
535 case 16:
536 if(s->yuy2){
537 avctx->pix_fmt = PIX_FMT_YUYV422;
538 }else{
539 avctx->pix_fmt = PIX_FMT_YUV422P;
541 break;
542 case 24:
543 case 32:
544 if(s->bgr32){
545 avctx->pix_fmt = PIX_FMT_RGB32;
546 }else{
547 avctx->pix_fmt = PIX_FMT_BGR24;
549 break;
550 default:
551 assert(0);
554 alloc_temp(s);
556 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
558 return 0;
560 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
562 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
563 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
564 int i;
565 int index= 0;
567 for(i=0; i<256;){
568 int val= len[i];
569 int repeat=0;
571 for(; i<256 && len[i]==val && repeat<255; i++)
572 repeat++;
574 assert(val < 32 && val >0 && repeat<256 && repeat>0);
575 if(repeat>7){
576 buf[index++]= val;
577 buf[index++]= repeat;
578 }else{
579 buf[index++]= val | (repeat<<5);
583 return index;
586 static av_cold int encode_init(AVCodecContext *avctx)
588 HYuvContext *s = avctx->priv_data;
589 int i, j;
591 common_init(avctx);
593 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
594 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
595 s->version=2;
597 avctx->coded_frame= &s->picture;
599 switch(avctx->pix_fmt){
600 case PIX_FMT_YUV420P:
601 s->bitstream_bpp= 12;
602 break;
603 case PIX_FMT_YUV422P:
604 s->bitstream_bpp= 16;
605 break;
606 case PIX_FMT_RGB32:
607 s->bitstream_bpp= 24;
608 break;
609 default:
610 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
611 return -1;
613 avctx->bits_per_coded_sample= s->bitstream_bpp;
614 s->decorrelate= s->bitstream_bpp >= 24;
615 s->predictor= avctx->prediction_method;
616 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
617 if(avctx->context_model==1){
618 s->context= avctx->context_model;
619 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
620 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
621 return -1;
623 }else s->context= 0;
625 if(avctx->codec->id==CODEC_ID_HUFFYUV){
626 if(avctx->pix_fmt==PIX_FMT_YUV420P){
627 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
628 return -1;
630 if(avctx->context_model){
631 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
632 return -1;
634 if(s->interlaced != ( s->height > 288 ))
635 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
638 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
639 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
640 return -1;
643 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
644 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
645 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
646 if(s->context)
647 ((uint8_t*)avctx->extradata)[2]|= 0x40;
648 ((uint8_t*)avctx->extradata)[3]= 0;
649 s->avctx->extradata_size= 4;
651 if(avctx->stats_in){
652 char *p= avctx->stats_in;
654 for(i=0; i<3; i++)
655 for(j=0; j<256; j++)
656 s->stats[i][j]= 1;
658 for(;;){
659 for(i=0; i<3; i++){
660 char *next;
662 for(j=0; j<256; j++){
663 s->stats[i][j]+= strtol(p, &next, 0);
664 if(next==p) return -1;
665 p=next;
668 if(p[0]==0 || p[1]==0 || p[2]==0) break;
670 }else{
671 for(i=0; i<3; i++)
672 for(j=0; j<256; j++){
673 int d= FFMIN(j, 256-j);
675 s->stats[i][j]= 100000000/(d+1);
679 for(i=0; i<3; i++){
680 generate_len_table(s->len[i], s->stats[i], 256);
682 if(generate_bits_table(s->bits[i], s->len[i])<0){
683 return -1;
686 s->avctx->extradata_size+=
687 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
690 if(s->context){
691 for(i=0; i<3; i++){
692 int pels = s->width*s->height / (i?40:10);
693 for(j=0; j<256; j++){
694 int d= FFMIN(j, 256-j);
695 s->stats[i][j]= pels/(d+1);
698 }else{
699 for(i=0; i<3; i++)
700 for(j=0; j<256; j++)
701 s->stats[i][j]= 0;
704 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
706 alloc_temp(s);
708 s->picture_number=0;
710 return 0;
712 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
714 /* TODO instead of restarting the read when the code isn't in the first level
715 * of the joint table, jump into the 2nd level of the individual table. */
716 #define READ_2PIX(dst0, dst1, plane1){\
717 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
718 if(code != 0xffff){\
719 dst0 = code>>8;\
720 dst1 = code;\
721 }else{\
722 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
723 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
727 static void decode_422_bitstream(HYuvContext *s, int count){
728 int i;
730 count/=2;
732 for(i=0; i<count; i++){
733 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
734 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
738 static void decode_gray_bitstream(HYuvContext *s, int count){
739 int i;
741 count/=2;
743 for(i=0; i<count; i++){
744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
748 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
749 static int encode_422_bitstream(HYuvContext *s, int count){
750 int i;
752 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
753 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
754 return -1;
757 #define LOAD4\
758 int y0 = s->temp[0][2*i];\
759 int y1 = s->temp[0][2*i+1];\
760 int u0 = s->temp[1][i];\
761 int v0 = s->temp[2][i];
763 count/=2;
764 if(s->flags&CODEC_FLAG_PASS1){
765 for(i=0; i<count; i++){
766 LOAD4;
767 s->stats[0][y0]++;
768 s->stats[1][u0]++;
769 s->stats[0][y1]++;
770 s->stats[2][v0]++;
773 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
774 return 0;
775 if(s->context){
776 for(i=0; i<count; i++){
777 LOAD4;
778 s->stats[0][y0]++;
779 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
780 s->stats[1][u0]++;
781 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
782 s->stats[0][y1]++;
783 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
784 s->stats[2][v0]++;
785 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
787 }else{
788 for(i=0; i<count; i++){
789 LOAD4;
790 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
791 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
792 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
793 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
796 return 0;
799 static int encode_gray_bitstream(HYuvContext *s, int count){
800 int i;
802 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
803 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
804 return -1;
807 #define LOAD2\
808 int y0 = s->temp[0][2*i];\
809 int y1 = s->temp[0][2*i+1];
810 #define STAT2\
811 s->stats[0][y0]++;\
812 s->stats[0][y1]++;
813 #define WRITE2\
814 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
815 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
817 count/=2;
818 if(s->flags&CODEC_FLAG_PASS1){
819 for(i=0; i<count; i++){
820 LOAD2;
821 STAT2;
824 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
825 return 0;
827 if(s->context){
828 for(i=0; i<count; i++){
829 LOAD2;
830 STAT2;
831 WRITE2;
833 }else{
834 for(i=0; i<count; i++){
835 LOAD2;
836 WRITE2;
839 return 0;
841 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
843 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
844 int i;
845 for(i=0; i<count; i++){
846 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
847 if(code != -1){
848 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
849 }else if(decorrelate){
850 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
851 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
853 }else{
854 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
855 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
856 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
858 if(alpha)
859 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
863 static void decode_bgr_bitstream(HYuvContext *s, int count){
864 if(s->decorrelate){
865 if(s->bitstream_bpp==24)
866 decode_bgr_1(s, count, 1, 0);
867 else
868 decode_bgr_1(s, count, 1, 1);
869 }else{
870 if(s->bitstream_bpp==24)
871 decode_bgr_1(s, count, 0, 0);
872 else
873 decode_bgr_1(s, count, 0, 1);
877 static int encode_bgr_bitstream(HYuvContext *s, int count){
878 int i;
880 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
881 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
882 return -1;
885 #define LOAD3\
886 int g= s->temp[0][4*i+G];\
887 int b= (s->temp[0][4*i+B] - g) & 0xff;\
888 int r= (s->temp[0][4*i+R] - g) & 0xff;
889 #define STAT3\
890 s->stats[0][b]++;\
891 s->stats[1][g]++;\
892 s->stats[2][r]++;
893 #define WRITE3\
894 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
895 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
896 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
898 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
899 for(i=0; i<count; i++){
900 LOAD3;
901 STAT3;
903 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
904 for(i=0; i<count; i++){
905 LOAD3;
906 STAT3;
907 WRITE3;
909 }else{
910 for(i=0; i<count; i++){
911 LOAD3;
912 WRITE3;
915 return 0;
918 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
919 static void draw_slice(HYuvContext *s, int y){
920 int h, cy;
921 int offset[4];
923 if(s->avctx->draw_horiz_band==NULL)
924 return;
926 h= y - s->last_slice_end;
927 y -= h;
929 if(s->bitstream_bpp==12){
930 cy= y>>1;
931 }else{
932 cy= y;
935 offset[0] = s->picture.linesize[0]*y;
936 offset[1] = s->picture.linesize[1]*cy;
937 offset[2] = s->picture.linesize[2]*cy;
938 offset[3] = 0;
939 emms_c();
941 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
943 s->last_slice_end= y + h;
946 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
947 const uint8_t *buf = avpkt->data;
948 int buf_size = avpkt->size;
949 HYuvContext *s = avctx->priv_data;
950 const int width= s->width;
951 const int width2= s->width>>1;
952 const int height= s->height;
953 int fake_ystride, fake_ustride, fake_vstride;
954 AVFrame * const p= &s->picture;
955 int table_size= 0;
957 AVFrame *picture = data;
959 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
960 if (!s->bitstream_buffer)
961 return AVERROR(ENOMEM);
963 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
965 if(p->data[0])
966 avctx->release_buffer(avctx, p);
968 p->reference= 0;
969 if(avctx->get_buffer(avctx, p) < 0){
970 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
971 return -1;
974 if(s->context){
975 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
976 if(table_size < 0)
977 return -1;
980 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
981 return -1;
983 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
985 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
986 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
987 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
989 s->last_slice_end= 0;
991 if(s->bitstream_bpp<24){
992 int y, cy;
993 int lefty, leftu, leftv;
994 int lefttopy, lefttopu, lefttopv;
996 if(s->yuy2){
997 p->data[0][3]= get_bits(&s->gb, 8);
998 p->data[0][2]= get_bits(&s->gb, 8);
999 p->data[0][1]= get_bits(&s->gb, 8);
1000 p->data[0][0]= get_bits(&s->gb, 8);
1002 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1003 return -1;
1004 }else{
1006 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1007 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1008 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1009 p->data[0][0]= get_bits(&s->gb, 8);
1011 switch(s->predictor){
1012 case LEFT:
1013 case PLANE:
1014 decode_422_bitstream(s, width-2);
1015 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1016 if(!(s->flags&CODEC_FLAG_GRAY)){
1017 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1018 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1021 for(cy=y=1; y<s->height; y++,cy++){
1022 uint8_t *ydst, *udst, *vdst;
1024 if(s->bitstream_bpp==12){
1025 decode_gray_bitstream(s, width);
1027 ydst= p->data[0] + p->linesize[0]*y;
1029 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1030 if(s->predictor == PLANE){
1031 if(y>s->interlaced)
1032 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1034 y++;
1035 if(y>=s->height) break;
1038 draw_slice(s, y);
1040 ydst= p->data[0] + p->linesize[0]*y;
1041 udst= p->data[1] + p->linesize[1]*cy;
1042 vdst= p->data[2] + p->linesize[2]*cy;
1044 decode_422_bitstream(s, width);
1045 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1046 if(!(s->flags&CODEC_FLAG_GRAY)){
1047 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1048 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1050 if(s->predictor == PLANE){
1051 if(cy>s->interlaced){
1052 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1053 if(!(s->flags&CODEC_FLAG_GRAY)){
1054 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1055 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1060 draw_slice(s, height);
1062 break;
1063 case MEDIAN:
1064 /* first line except first 2 pixels is left predicted */
1065 decode_422_bitstream(s, width-2);
1066 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1067 if(!(s->flags&CODEC_FLAG_GRAY)){
1068 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1069 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1072 cy=y=1;
1074 /* second line is left predicted for interlaced case */
1075 if(s->interlaced){
1076 decode_422_bitstream(s, width);
1077 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1078 if(!(s->flags&CODEC_FLAG_GRAY)){
1079 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1080 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1082 y++; cy++;
1085 /* next 4 pixels are left predicted too */
1086 decode_422_bitstream(s, 4);
1087 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1088 if(!(s->flags&CODEC_FLAG_GRAY)){
1089 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1090 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1093 /* next line except the first 4 pixels is median predicted */
1094 lefttopy= p->data[0][3];
1095 decode_422_bitstream(s, width-4);
1096 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1097 if(!(s->flags&CODEC_FLAG_GRAY)){
1098 lefttopu= p->data[1][1];
1099 lefttopv= p->data[2][1];
1100 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1101 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1103 y++; cy++;
1105 for(; y<height; y++,cy++){
1106 uint8_t *ydst, *udst, *vdst;
1108 if(s->bitstream_bpp==12){
1109 while(2*cy > y){
1110 decode_gray_bitstream(s, width);
1111 ydst= p->data[0] + p->linesize[0]*y;
1112 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1113 y++;
1115 if(y>=height) break;
1117 draw_slice(s, y);
1119 decode_422_bitstream(s, width);
1121 ydst= p->data[0] + p->linesize[0]*y;
1122 udst= p->data[1] + p->linesize[1]*cy;
1123 vdst= p->data[2] + p->linesize[2]*cy;
1125 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1126 if(!(s->flags&CODEC_FLAG_GRAY)){
1127 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1128 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1132 draw_slice(s, height);
1133 break;
1136 }else{
1137 int y;
1138 int leftr, leftg, leftb;
1139 const int last_line= (height-1)*p->linesize[0];
1141 if(s->bitstream_bpp==32){
1142 skip_bits(&s->gb, 8);
1143 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1144 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1145 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1146 }else{
1147 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1148 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1149 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1150 skip_bits(&s->gb, 8);
1153 if(s->bgr32){
1154 switch(s->predictor){
1155 case LEFT:
1156 case PLANE:
1157 decode_bgr_bitstream(s, width-1);
1158 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1160 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1161 decode_bgr_bitstream(s, width);
1163 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1164 if(s->predictor == PLANE){
1165 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1166 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1167 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1171 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1172 break;
1173 default:
1174 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1176 }else{
1178 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1179 return -1;
1182 emms_c();
1184 *picture= *p;
1185 *data_size = sizeof(AVFrame);
1187 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1189 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1191 static int common_end(HYuvContext *s){
1192 int i;
1194 for(i=0; i<3; i++){
1195 av_freep(&s->temp[i]);
1197 return 0;
1200 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1201 static av_cold int decode_end(AVCodecContext *avctx)
1203 HYuvContext *s = avctx->priv_data;
1204 int i;
1206 common_end(s);
1207 av_freep(&s->bitstream_buffer);
1209 for(i=0; i<6; i++){
1210 free_vlc(&s->vlc[i]);
1213 return 0;
1215 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1218 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1219 HYuvContext *s = avctx->priv_data;
1220 AVFrame *pict = data;
1221 const int width= s->width;
1222 const int width2= s->width>>1;
1223 const int height= s->height;
1224 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1225 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1226 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1227 AVFrame * const p= &s->picture;
1228 int i, j, size=0;
1230 *p = *pict;
1231 p->pict_type= FF_I_TYPE;
1232 p->key_frame= 1;
1234 if(s->context){
1235 for(i=0; i<3; i++){
1236 generate_len_table(s->len[i], s->stats[i], 256);
1237 if(generate_bits_table(s->bits[i], s->len[i])<0)
1238 return -1;
1239 size+= store_table(s, s->len[i], &buf[size]);
1242 for(i=0; i<3; i++)
1243 for(j=0; j<256; j++)
1244 s->stats[i][j] >>= 1;
1247 init_put_bits(&s->pb, buf+size, buf_size-size);
1249 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1250 int lefty, leftu, leftv, y, cy;
1252 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1253 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1254 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1255 put_bits(&s->pb, 8, p->data[0][0]);
1257 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1258 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1259 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1261 encode_422_bitstream(s, width-2);
1263 if(s->predictor==MEDIAN){
1264 int lefttopy, lefttopu, lefttopv;
1265 cy=y=1;
1266 if(s->interlaced){
1267 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1268 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1269 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1271 encode_422_bitstream(s, width);
1272 y++; cy++;
1275 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1276 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1277 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1279 encode_422_bitstream(s, 4);
1281 lefttopy= p->data[0][3];
1282 lefttopu= p->data[1][1];
1283 lefttopv= p->data[2][1];
1284 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1285 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1286 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1287 encode_422_bitstream(s, width-4);
1288 y++; cy++;
1290 for(; y<height; y++,cy++){
1291 uint8_t *ydst, *udst, *vdst;
1293 if(s->bitstream_bpp==12){
1294 while(2*cy > y){
1295 ydst= p->data[0] + p->linesize[0]*y;
1296 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1297 encode_gray_bitstream(s, width);
1298 y++;
1300 if(y>=height) break;
1302 ydst= p->data[0] + p->linesize[0]*y;
1303 udst= p->data[1] + p->linesize[1]*cy;
1304 vdst= p->data[2] + p->linesize[2]*cy;
1306 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1307 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1308 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1310 encode_422_bitstream(s, width);
1312 }else{
1313 for(cy=y=1; y<height; y++,cy++){
1314 uint8_t *ydst, *udst, *vdst;
1316 /* encode a luma only line & y++ */
1317 if(s->bitstream_bpp==12){
1318 ydst= p->data[0] + p->linesize[0]*y;
1320 if(s->predictor == PLANE && s->interlaced < y){
1321 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1323 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1324 }else{
1325 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1327 encode_gray_bitstream(s, width);
1328 y++;
1329 if(y>=height) break;
1332 ydst= p->data[0] + p->linesize[0]*y;
1333 udst= p->data[1] + p->linesize[1]*cy;
1334 vdst= p->data[2] + p->linesize[2]*cy;
1336 if(s->predictor == PLANE && s->interlaced < cy){
1337 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1338 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1339 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1341 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1342 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1343 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1344 }else{
1345 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1346 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1347 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1350 encode_422_bitstream(s, width);
1353 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1354 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1355 const int stride = -p->linesize[0];
1356 const int fake_stride = -fake_ystride;
1357 int y;
1358 int leftr, leftg, leftb;
1360 put_bits(&s->pb, 8, leftr= data[R]);
1361 put_bits(&s->pb, 8, leftg= data[G]);
1362 put_bits(&s->pb, 8, leftb= data[B]);
1363 put_bits(&s->pb, 8, 0);
1365 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1366 encode_bgr_bitstream(s, width-1);
1368 for(y=1; y<s->height; y++){
1369 uint8_t *dst = data + y*stride;
1370 if(s->predictor == PLANE && s->interlaced < y){
1371 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1372 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1373 }else{
1374 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1376 encode_bgr_bitstream(s, width);
1378 }else{
1379 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1381 emms_c();
1383 size+= (put_bits_count(&s->pb)+31)/8;
1384 put_bits(&s->pb, 16, 0);
1385 put_bits(&s->pb, 15, 0);
1386 size/= 4;
1388 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1389 int j;
1390 char *p= avctx->stats_out;
1391 char *end= p + 1024*30;
1392 for(i=0; i<3; i++){
1393 for(j=0; j<256; j++){
1394 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1395 p+= strlen(p);
1396 s->stats[i][j]= 0;
1398 snprintf(p, end-p, "\n");
1399 p++;
1401 } else
1402 avctx->stats_out[0] = '\0';
1403 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1404 flush_put_bits(&s->pb);
1405 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1408 s->picture_number++;
1410 return size*4;
1413 static av_cold int encode_end(AVCodecContext *avctx)
1415 HYuvContext *s = avctx->priv_data;
1417 common_end(s);
1419 av_freep(&avctx->extradata);
1420 av_freep(&avctx->stats_out);
1422 return 0;
1424 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1426 #if CONFIG_HUFFYUV_DECODER
1427 AVCodec huffyuv_decoder = {
1428 "huffyuv",
1429 CODEC_TYPE_VIDEO,
1430 CODEC_ID_HUFFYUV,
1431 sizeof(HYuvContext),
1432 decode_init,
1433 NULL,
1434 decode_end,
1435 decode_frame,
1436 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1437 NULL,
1438 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1440 #endif
1442 #if CONFIG_FFVHUFF_DECODER
1443 AVCodec ffvhuff_decoder = {
1444 "ffvhuff",
1445 CODEC_TYPE_VIDEO,
1446 CODEC_ID_FFVHUFF,
1447 sizeof(HYuvContext),
1448 decode_init,
1449 NULL,
1450 decode_end,
1451 decode_frame,
1452 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1453 NULL,
1454 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1456 #endif
1458 #if CONFIG_HUFFYUV_ENCODER
1459 AVCodec huffyuv_encoder = {
1460 "huffyuv",
1461 CODEC_TYPE_VIDEO,
1462 CODEC_ID_HUFFYUV,
1463 sizeof(HYuvContext),
1464 encode_init,
1465 encode_frame,
1466 encode_end,
1467 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1468 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1470 #endif
1472 #if CONFIG_FFVHUFF_ENCODER
1473 AVCodec ffvhuff_encoder = {
1474 "ffvhuff",
1475 CODEC_TYPE_VIDEO,
1476 CODEC_ID_FFVHUFF,
1477 sizeof(HYuvContext),
1478 encode_init,
1479 encode_frame,
1480 encode_end,
1481 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1482 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1484 #endif