eac3dec: get right of unnecessary left shifts in 16-bit * 24-bit
[FFMpeg-mirror/lagarith.git] / libavcodec / huffyuv.c
blob092a1096b62e46eb1d9e49692d22bcf696fdec51
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
36 #define VLC_BITS 11
38 #ifdef WORDS_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133 int i;
135 for(i=0; i<w-1; i++){
136 acc+= src[i];
137 dst[i]= acc;
138 i++;
139 acc+= src[i];
140 dst[i]= acc;
143 for(; i<w; i++){
144 acc+= src[i];
145 dst[i]= acc;
148 return acc;
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
152 int i;
153 int r,g,b;
154 r= *red;
155 g= *green;
156 b= *blue;
158 for(i=0; i<w; i++){
159 b+= src[4*i+B];
160 g+= src[4*i+G];
161 r+= src[4*i+R];
163 dst[4*i+B]= b;
164 dst[4*i+G]= g;
165 dst[4*i+R]= r;
168 *red= r;
169 *green= g;
170 *blue= b;
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
174 int i;
175 if(w<32){
176 for(i=0; i<w; i++){
177 const int temp= src[i];
178 dst[i]= temp - left;
179 left= temp;
181 return left;
182 }else{
183 for(i=0; i<16; i++){
184 const int temp= src[i];
185 dst[i]= temp - left;
186 left= temp;
188 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
189 return src[w-1];
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
194 int i;
195 int r,g,b;
196 r= *red;
197 g= *green;
198 b= *blue;
199 for(i=0; i<FFMIN(w,4); i++){
200 const int rt= src[i*4+R];
201 const int gt= src[i*4+G];
202 const int bt= src[i*4+B];
203 dst[i*4+R]= rt - r;
204 dst[i*4+G]= gt - g;
205 dst[i*4+B]= bt - b;
206 r = rt;
207 g = gt;
208 b = bt;
210 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211 *red= src[(w-1)*4+R];
212 *green= src[(w-1)*4+G];
213 *blue= src[(w-1)*4+B];
216 static void read_len_table(uint8_t *dst, GetBitContext *gb){
217 int i, val, repeat;
219 for(i=0; i<256;){
220 repeat= get_bits(gb, 3);
221 val = get_bits(gb, 5);
222 if(repeat==0)
223 repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
225 while (repeat--)
226 dst[i++] = val;
230 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
231 int len, index;
232 uint32_t bits=0;
234 for(len=32; len>0; len--){
235 for(index=0; index<256; index++){
236 if(len_table[index]==len)
237 dst[index]= bits++;
239 if(bits & 1){
240 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
241 return -1;
243 bits >>= 1;
245 return 0;
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 typedef struct {
250 uint64_t val;
251 int name;
252 } HeapElem;
254 static void heap_sift(HeapElem *h, int root, int size)
256 while(root*2+1 < size) {
257 int child = root*2+1;
258 if(child < size-1 && h[child].val > h[child+1].val)
259 child++;
260 if(h[root].val > h[child].val) {
261 FFSWAP(HeapElem, h[root], h[child]);
262 root = child;
263 } else
264 break;
268 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
269 HeapElem h[size];
270 int up[2*size];
271 int len[2*size];
272 int offset, i, next;
274 for(offset=1; ; offset<<=1){
275 for(i=0; i<size; i++){
276 h[i].name = i;
277 h[i].val = (stats[i] << 8) + offset;
279 for(i=size/2-1; i>=0; i--)
280 heap_sift(h, i, size);
282 for(next=size; next<size*2-1; next++){
283 // merge the two smallest entries, and put it back in the heap
284 uint64_t min1v = h[0].val;
285 up[h[0].name] = next;
286 h[0].val = INT64_MAX;
287 heap_sift(h, 0, size);
288 up[h[0].name] = next;
289 h[0].name = next;
290 h[0].val += min1v;
291 heap_sift(h, 0, size);
294 len[2*size-2] = 0;
295 for(i=2*size-3; i>=size; i--)
296 len[i] = len[up[i]] + 1;
297 for(i=0; i<size; i++) {
298 dst[i] = len[up[i]] + 1;
299 if(dst[i] >= 32) break;
301 if(i==size) break;
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
306 static void generate_joint_tables(HYuvContext *s){
307 uint16_t symbols[1<<VLC_BITS];
308 uint16_t bits[1<<VLC_BITS];
309 uint8_t len[1<<VLC_BITS];
310 if(s->bitstream_bpp < 24){
311 int p, i, y, u;
312 for(p=0; p<3; p++){
313 for(i=y=0; y<256; y++){
314 int len0 = s->len[0][y];
315 int limit = VLC_BITS - len0;
316 if(limit <= 0)
317 continue;
318 for(u=0; u<256; u++){
319 int len1 = s->len[p][u];
320 if(len1 > limit)
321 continue;
322 len[i] = len0 + len1;
323 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324 symbols[i] = (y<<8) + u;
325 if(symbols[i] != 0xffff) // reserved to mean "invalid"
326 i++;
329 free_vlc(&s->vlc[3+p]);
330 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
332 }else{
333 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334 int i, b, g, r, code;
335 int p0 = s->decorrelate;
336 int p1 = !s->decorrelate;
337 // restrict the range to +/-16 becaues that's pretty much guaranteed to
338 // cover all the combinations that fit in 11 bits total, and it doesn't
339 // matter if we miss a few rare codes.
340 for(i=0, g=-16; g<16; g++){
341 int len0 = s->len[p0][g&255];
342 int limit0 = VLC_BITS - len0;
343 if(limit0 < 2)
344 continue;
345 for(b=-16; b<16; b++){
346 int len1 = s->len[p1][b&255];
347 int limit1 = limit0 - len1;
348 if(limit1 < 1)
349 continue;
350 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351 for(r=-16; r<16; r++){
352 int len2 = s->len[2][r&255];
353 if(len2 > limit1)
354 continue;
355 len[i] = len0 + len1 + len2;
356 bits[i] = (code << len2) + s->bits[2][r&255];
357 if(s->decorrelate){
358 map[i][G] = g;
359 map[i][B] = g+b;
360 map[i][R] = g+r;
361 }else{
362 map[i][B] = g;
363 map[i][G] = b;
364 map[i][R] = r;
366 i++;
370 free_vlc(&s->vlc[3]);
371 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
375 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
376 GetBitContext gb;
377 int i;
379 init_get_bits(&gb, src, length*8);
381 for(i=0; i<3; i++){
382 read_len_table(s->len[i], &gb);
384 if(generate_bits_table(s->bits[i], s->len[i])<0){
385 return -1;
387 #if 0
388 for(j=0; j<256; j++){
389 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
391 #endif
392 free_vlc(&s->vlc[i]);
393 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
396 generate_joint_tables(s);
398 return (get_bits_count(&gb)+7)/8;
401 static int read_old_huffman_tables(HYuvContext *s){
402 #if 1
403 GetBitContext gb;
404 int i;
406 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
407 read_len_table(s->len[0], &gb);
408 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
409 read_len_table(s->len[1], &gb);
411 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
412 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
414 if(s->bitstream_bpp >= 24){
415 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
416 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
418 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
419 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
421 for(i=0; i<3; i++){
422 free_vlc(&s->vlc[i]);
423 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
426 generate_joint_tables(s);
428 return 0;
429 #else
430 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
431 return -1;
432 #endif
435 static av_cold void alloc_temp(HYuvContext *s){
436 int i;
438 if(s->bitstream_bpp<24){
439 for(i=0; i<3; i++){
440 s->temp[i]= av_malloc(s->width + 16);
442 }else{
443 for(i=0; i<2; i++){
444 s->temp[i]= av_malloc(4*s->width + 16);
449 static av_cold int common_init(AVCodecContext *avctx){
450 HYuvContext *s = avctx->priv_data;
452 s->avctx= avctx;
453 s->flags= avctx->flags;
455 dsputil_init(&s->dsp, avctx);
457 s->width= avctx->width;
458 s->height= avctx->height;
459 assert(s->width>0 && s->height>0);
461 return 0;
464 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
465 static av_cold int decode_init(AVCodecContext *avctx)
467 HYuvContext *s = avctx->priv_data;
469 common_init(avctx);
470 memset(s->vlc, 0, 3*sizeof(VLC));
472 avctx->coded_frame= &s->picture;
473 s->interlaced= s->height > 288;
475 s->bgr32=1;
476 //if(avctx->extradata)
477 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
478 if(avctx->extradata_size){
479 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
480 s->version=1; // do such files exist at all?
481 else
482 s->version=2;
483 }else
484 s->version=0;
486 if(s->version==2){
487 int method, interlace;
489 method= ((uint8_t*)avctx->extradata)[0];
490 s->decorrelate= method&64 ? 1 : 0;
491 s->predictor= method&63;
492 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
493 if(s->bitstream_bpp==0)
494 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
495 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
496 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
497 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
499 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
500 return -1;
501 }else{
502 switch(avctx->bits_per_coded_sample&7){
503 case 1:
504 s->predictor= LEFT;
505 s->decorrelate= 0;
506 break;
507 case 2:
508 s->predictor= LEFT;
509 s->decorrelate= 1;
510 break;
511 case 3:
512 s->predictor= PLANE;
513 s->decorrelate= avctx->bits_per_coded_sample >= 24;
514 break;
515 case 4:
516 s->predictor= MEDIAN;
517 s->decorrelate= 0;
518 break;
519 default:
520 s->predictor= LEFT; //OLD
521 s->decorrelate= 0;
522 break;
524 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
525 s->context= 0;
527 if(read_old_huffman_tables(s) < 0)
528 return -1;
531 switch(s->bitstream_bpp){
532 case 12:
533 avctx->pix_fmt = PIX_FMT_YUV420P;
534 break;
535 case 16:
536 if(s->yuy2){
537 avctx->pix_fmt = PIX_FMT_YUYV422;
538 }else{
539 avctx->pix_fmt = PIX_FMT_YUV422P;
541 break;
542 case 24:
543 case 32:
544 if(s->bgr32){
545 avctx->pix_fmt = PIX_FMT_RGB32;
546 }else{
547 avctx->pix_fmt = PIX_FMT_BGR24;
549 break;
550 default:
551 assert(0);
554 alloc_temp(s);
556 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
558 return 0;
560 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
562 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
563 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
564 int i;
565 int index= 0;
567 for(i=0; i<256;){
568 int val= len[i];
569 int repeat=0;
571 for(; i<256 && len[i]==val && repeat<255; i++)
572 repeat++;
574 assert(val < 32 && val >0 && repeat<256 && repeat>0);
575 if(repeat>7){
576 buf[index++]= val;
577 buf[index++]= repeat;
578 }else{
579 buf[index++]= val | (repeat<<5);
583 return index;
586 static av_cold int encode_init(AVCodecContext *avctx)
588 HYuvContext *s = avctx->priv_data;
589 int i, j;
591 common_init(avctx);
593 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
594 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
595 s->version=2;
597 avctx->coded_frame= &s->picture;
599 switch(avctx->pix_fmt){
600 case PIX_FMT_YUV420P:
601 s->bitstream_bpp= 12;
602 break;
603 case PIX_FMT_YUV422P:
604 s->bitstream_bpp= 16;
605 break;
606 case PIX_FMT_RGB32:
607 s->bitstream_bpp= 24;
608 break;
609 default:
610 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
611 return -1;
613 avctx->bits_per_coded_sample= s->bitstream_bpp;
614 s->decorrelate= s->bitstream_bpp >= 24;
615 s->predictor= avctx->prediction_method;
616 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
617 if(avctx->context_model==1){
618 s->context= avctx->context_model;
619 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
620 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
621 return -1;
623 }else s->context= 0;
625 if(avctx->codec->id==CODEC_ID_HUFFYUV){
626 if(avctx->pix_fmt==PIX_FMT_YUV420P){
627 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
628 return -1;
630 if(avctx->context_model){
631 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
632 return -1;
634 if(s->interlaced != ( s->height > 288 ))
635 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
638 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
639 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
640 return -1;
643 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
644 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
645 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
646 if(s->context)
647 ((uint8_t*)avctx->extradata)[2]|= 0x40;
648 ((uint8_t*)avctx->extradata)[3]= 0;
649 s->avctx->extradata_size= 4;
651 if(avctx->stats_in){
652 char *p= avctx->stats_in;
654 for(i=0; i<3; i++)
655 for(j=0; j<256; j++)
656 s->stats[i][j]= 1;
658 for(;;){
659 for(i=0; i<3; i++){
660 char *next;
662 for(j=0; j<256; j++){
663 s->stats[i][j]+= strtol(p, &next, 0);
664 if(next==p) return -1;
665 p=next;
668 if(p[0]==0 || p[1]==0 || p[2]==0) break;
670 }else{
671 for(i=0; i<3; i++)
672 for(j=0; j<256; j++){
673 int d= FFMIN(j, 256-j);
675 s->stats[i][j]= 100000000/(d+1);
679 for(i=0; i<3; i++){
680 generate_len_table(s->len[i], s->stats[i], 256);
682 if(generate_bits_table(s->bits[i], s->len[i])<0){
683 return -1;
686 s->avctx->extradata_size+=
687 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
690 if(s->context){
691 for(i=0; i<3; i++){
692 int pels = s->width*s->height / (i?40:10);
693 for(j=0; j<256; j++){
694 int d= FFMIN(j, 256-j);
695 s->stats[i][j]= pels/(d+1);
698 }else{
699 for(i=0; i<3; i++)
700 for(j=0; j<256; j++)
701 s->stats[i][j]= 0;
704 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
706 alloc_temp(s);
708 s->picture_number=0;
710 return 0;
712 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
714 /* TODO instead of restarting the read when the code isn't in the first level
715 * of the joint table, jump into the 2nd level of the individual table. */
716 #define READ_2PIX(dst0, dst1, plane1){\
717 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
718 if(code != 0xffff){\
719 dst0 = code>>8;\
720 dst1 = code;\
721 }else{\
722 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
723 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
727 static void decode_422_bitstream(HYuvContext *s, int count){
728 int i;
730 count/=2;
732 for(i=0; i<count; i++){
733 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
734 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
738 static void decode_gray_bitstream(HYuvContext *s, int count){
739 int i;
741 count/=2;
743 for(i=0; i<count; i++){
744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
748 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
749 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
750 int i;
751 const uint8_t *y = s->temp[0] + offset;
752 const uint8_t *u = s->temp[1] + offset/2;
753 const uint8_t *v = s->temp[2] + offset/2;
755 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
756 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
757 return -1;
760 #define LOAD4\
761 int y0 = y[2*i];\
762 int y1 = y[2*i+1];\
763 int u0 = u[i];\
764 int v0 = v[i];
766 count/=2;
767 if(s->flags&CODEC_FLAG_PASS1){
768 for(i=0; i<count; i++){
769 LOAD4;
770 s->stats[0][y0]++;
771 s->stats[1][u0]++;
772 s->stats[0][y1]++;
773 s->stats[2][v0]++;
776 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
777 return 0;
778 if(s->context){
779 for(i=0; i<count; i++){
780 LOAD4;
781 s->stats[0][y0]++;
782 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
783 s->stats[1][u0]++;
784 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
785 s->stats[0][y1]++;
786 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
787 s->stats[2][v0]++;
788 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
790 }else{
791 for(i=0; i<count; i++){
792 LOAD4;
793 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
794 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
795 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
796 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
799 return 0;
802 static int encode_gray_bitstream(HYuvContext *s, int count){
803 int i;
805 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
806 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
807 return -1;
810 #define LOAD2\
811 int y0 = s->temp[0][2*i];\
812 int y1 = s->temp[0][2*i+1];
813 #define STAT2\
814 s->stats[0][y0]++;\
815 s->stats[0][y1]++;
816 #define WRITE2\
817 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
818 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
820 count/=2;
821 if(s->flags&CODEC_FLAG_PASS1){
822 for(i=0; i<count; i++){
823 LOAD2;
824 STAT2;
827 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
828 return 0;
830 if(s->context){
831 for(i=0; i<count; i++){
832 LOAD2;
833 STAT2;
834 WRITE2;
836 }else{
837 for(i=0; i<count; i++){
838 LOAD2;
839 WRITE2;
842 return 0;
844 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
846 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
847 int i;
848 for(i=0; i<count; i++){
849 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
850 if(code != -1){
851 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
852 }else if(decorrelate){
853 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
854 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
855 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
856 }else{
857 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
858 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
859 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
861 if(alpha)
862 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
866 static void decode_bgr_bitstream(HYuvContext *s, int count){
867 if(s->decorrelate){
868 if(s->bitstream_bpp==24)
869 decode_bgr_1(s, count, 1, 0);
870 else
871 decode_bgr_1(s, count, 1, 1);
872 }else{
873 if(s->bitstream_bpp==24)
874 decode_bgr_1(s, count, 0, 0);
875 else
876 decode_bgr_1(s, count, 0, 1);
880 static int encode_bgr_bitstream(HYuvContext *s, int count){
881 int i;
883 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
884 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
885 return -1;
888 #define LOAD3\
889 int g= s->temp[0][4*i+G];\
890 int b= (s->temp[0][4*i+B] - g) & 0xff;\
891 int r= (s->temp[0][4*i+R] - g) & 0xff;
892 #define STAT3\
893 s->stats[0][b]++;\
894 s->stats[1][g]++;\
895 s->stats[2][r]++;
896 #define WRITE3\
897 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
898 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
899 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
901 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
902 for(i=0; i<count; i++){
903 LOAD3;
904 STAT3;
906 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
907 for(i=0; i<count; i++){
908 LOAD3;
909 STAT3;
910 WRITE3;
912 }else{
913 for(i=0; i<count; i++){
914 LOAD3;
915 WRITE3;
918 return 0;
921 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
922 static void draw_slice(HYuvContext *s, int y){
923 int h, cy;
924 int offset[4];
926 if(s->avctx->draw_horiz_band==NULL)
927 return;
929 h= y - s->last_slice_end;
930 y -= h;
932 if(s->bitstream_bpp==12){
933 cy= y>>1;
934 }else{
935 cy= y;
938 offset[0] = s->picture.linesize[0]*y;
939 offset[1] = s->picture.linesize[1]*cy;
940 offset[2] = s->picture.linesize[2]*cy;
941 offset[3] = 0;
942 emms_c();
944 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
946 s->last_slice_end= y + h;
949 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
950 const uint8_t *buf = avpkt->data;
951 int buf_size = avpkt->size;
952 HYuvContext *s = avctx->priv_data;
953 const int width= s->width;
954 const int width2= s->width>>1;
955 const int height= s->height;
956 int fake_ystride, fake_ustride, fake_vstride;
957 AVFrame * const p= &s->picture;
958 int table_size= 0;
960 AVFrame *picture = data;
962 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
963 if (!s->bitstream_buffer)
964 return AVERROR(ENOMEM);
966 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
968 if(p->data[0])
969 avctx->release_buffer(avctx, p);
971 p->reference= 0;
972 if(avctx->get_buffer(avctx, p) < 0){
973 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
974 return -1;
977 if(s->context){
978 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
979 if(table_size < 0)
980 return -1;
983 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
984 return -1;
986 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
988 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
989 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
990 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
992 s->last_slice_end= 0;
994 if(s->bitstream_bpp<24){
995 int y, cy;
996 int lefty, leftu, leftv;
997 int lefttopy, lefttopu, lefttopv;
999 if(s->yuy2){
1000 p->data[0][3]= get_bits(&s->gb, 8);
1001 p->data[0][2]= get_bits(&s->gb, 8);
1002 p->data[0][1]= get_bits(&s->gb, 8);
1003 p->data[0][0]= get_bits(&s->gb, 8);
1005 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1006 return -1;
1007 }else{
1009 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1010 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1011 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1012 p->data[0][0]= get_bits(&s->gb, 8);
1014 switch(s->predictor){
1015 case LEFT:
1016 case PLANE:
1017 decode_422_bitstream(s, width-2);
1018 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1019 if(!(s->flags&CODEC_FLAG_GRAY)){
1020 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1021 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1024 for(cy=y=1; y<s->height; y++,cy++){
1025 uint8_t *ydst, *udst, *vdst;
1027 if(s->bitstream_bpp==12){
1028 decode_gray_bitstream(s, width);
1030 ydst= p->data[0] + p->linesize[0]*y;
1032 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1033 if(s->predictor == PLANE){
1034 if(y>s->interlaced)
1035 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1037 y++;
1038 if(y>=s->height) break;
1041 draw_slice(s, y);
1043 ydst= p->data[0] + p->linesize[0]*y;
1044 udst= p->data[1] + p->linesize[1]*cy;
1045 vdst= p->data[2] + p->linesize[2]*cy;
1047 decode_422_bitstream(s, width);
1048 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1049 if(!(s->flags&CODEC_FLAG_GRAY)){
1050 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1051 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1053 if(s->predictor == PLANE){
1054 if(cy>s->interlaced){
1055 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1056 if(!(s->flags&CODEC_FLAG_GRAY)){
1057 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1058 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1063 draw_slice(s, height);
1065 break;
1066 case MEDIAN:
1067 /* first line except first 2 pixels is left predicted */
1068 decode_422_bitstream(s, width-2);
1069 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1070 if(!(s->flags&CODEC_FLAG_GRAY)){
1071 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1072 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1075 cy=y=1;
1077 /* second line is left predicted for interlaced case */
1078 if(s->interlaced){
1079 decode_422_bitstream(s, width);
1080 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1081 if(!(s->flags&CODEC_FLAG_GRAY)){
1082 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1083 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1085 y++; cy++;
1088 /* next 4 pixels are left predicted too */
1089 decode_422_bitstream(s, 4);
1090 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1091 if(!(s->flags&CODEC_FLAG_GRAY)){
1092 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1093 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1096 /* next line except the first 4 pixels is median predicted */
1097 lefttopy= p->data[0][3];
1098 decode_422_bitstream(s, width-4);
1099 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1100 if(!(s->flags&CODEC_FLAG_GRAY)){
1101 lefttopu= p->data[1][1];
1102 lefttopv= p->data[2][1];
1103 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1104 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1106 y++; cy++;
1108 for(; y<height; y++,cy++){
1109 uint8_t *ydst, *udst, *vdst;
1111 if(s->bitstream_bpp==12){
1112 while(2*cy > y){
1113 decode_gray_bitstream(s, width);
1114 ydst= p->data[0] + p->linesize[0]*y;
1115 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1116 y++;
1118 if(y>=height) break;
1120 draw_slice(s, y);
1122 decode_422_bitstream(s, width);
1124 ydst= p->data[0] + p->linesize[0]*y;
1125 udst= p->data[1] + p->linesize[1]*cy;
1126 vdst= p->data[2] + p->linesize[2]*cy;
1128 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1129 if(!(s->flags&CODEC_FLAG_GRAY)){
1130 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1131 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1135 draw_slice(s, height);
1136 break;
1139 }else{
1140 int y;
1141 int leftr, leftg, leftb;
1142 const int last_line= (height-1)*p->linesize[0];
1144 if(s->bitstream_bpp==32){
1145 skip_bits(&s->gb, 8);
1146 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1147 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1148 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1149 }else{
1150 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1151 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1152 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1153 skip_bits(&s->gb, 8);
1156 if(s->bgr32){
1157 switch(s->predictor){
1158 case LEFT:
1159 case PLANE:
1160 decode_bgr_bitstream(s, width-1);
1161 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1163 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1164 decode_bgr_bitstream(s, width);
1166 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1167 if(s->predictor == PLANE){
1168 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1169 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1170 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1174 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1175 break;
1176 default:
1177 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1179 }else{
1181 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1182 return -1;
1185 emms_c();
1187 *picture= *p;
1188 *data_size = sizeof(AVFrame);
1190 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1192 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1194 static int common_end(HYuvContext *s){
1195 int i;
1197 for(i=0; i<3; i++){
1198 av_freep(&s->temp[i]);
1200 return 0;
1203 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1204 static av_cold int decode_end(AVCodecContext *avctx)
1206 HYuvContext *s = avctx->priv_data;
1207 int i;
1209 common_end(s);
1210 av_freep(&s->bitstream_buffer);
1212 for(i=0; i<6; i++){
1213 free_vlc(&s->vlc[i]);
1216 return 0;
1218 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1220 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1221 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1222 HYuvContext *s = avctx->priv_data;
1223 AVFrame *pict = data;
1224 const int width= s->width;
1225 const int width2= s->width>>1;
1226 const int height= s->height;
1227 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1228 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1229 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1230 AVFrame * const p= &s->picture;
1231 int i, j, size=0;
1233 *p = *pict;
1234 p->pict_type= FF_I_TYPE;
1235 p->key_frame= 1;
1237 if(s->context){
1238 for(i=0; i<3; i++){
1239 generate_len_table(s->len[i], s->stats[i], 256);
1240 if(generate_bits_table(s->bits[i], s->len[i])<0)
1241 return -1;
1242 size+= store_table(s, s->len[i], &buf[size]);
1245 for(i=0; i<3; i++)
1246 for(j=0; j<256; j++)
1247 s->stats[i][j] >>= 1;
1250 init_put_bits(&s->pb, buf+size, buf_size-size);
1252 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1253 int lefty, leftu, leftv, y, cy;
1255 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1256 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1257 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1258 put_bits(&s->pb, 8, p->data[0][0]);
1260 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1261 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1262 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1264 encode_422_bitstream(s, 2, width-2);
1266 if(s->predictor==MEDIAN){
1267 int lefttopy, lefttopu, lefttopv;
1268 cy=y=1;
1269 if(s->interlaced){
1270 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1271 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1272 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1274 encode_422_bitstream(s, 0, width);
1275 y++; cy++;
1278 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1279 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1280 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1282 encode_422_bitstream(s, 0, 4);
1284 lefttopy= p->data[0][3];
1285 lefttopu= p->data[1][1];
1286 lefttopv= p->data[2][1];
1287 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1288 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1289 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1290 encode_422_bitstream(s, 0, width-4);
1291 y++; cy++;
1293 for(; y<height; y++,cy++){
1294 uint8_t *ydst, *udst, *vdst;
1296 if(s->bitstream_bpp==12){
1297 while(2*cy > y){
1298 ydst= p->data[0] + p->linesize[0]*y;
1299 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1300 encode_gray_bitstream(s, width);
1301 y++;
1303 if(y>=height) break;
1305 ydst= p->data[0] + p->linesize[0]*y;
1306 udst= p->data[1] + p->linesize[1]*cy;
1307 vdst= p->data[2] + p->linesize[2]*cy;
1309 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1310 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1311 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1313 encode_422_bitstream(s, 0, width);
1315 }else{
1316 for(cy=y=1; y<height; y++,cy++){
1317 uint8_t *ydst, *udst, *vdst;
1319 /* encode a luma only line & y++ */
1320 if(s->bitstream_bpp==12){
1321 ydst= p->data[0] + p->linesize[0]*y;
1323 if(s->predictor == PLANE && s->interlaced < y){
1324 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1326 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1327 }else{
1328 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1330 encode_gray_bitstream(s, width);
1331 y++;
1332 if(y>=height) break;
1335 ydst= p->data[0] + p->linesize[0]*y;
1336 udst= p->data[1] + p->linesize[1]*cy;
1337 vdst= p->data[2] + p->linesize[2]*cy;
1339 if(s->predictor == PLANE && s->interlaced < cy){
1340 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1341 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1342 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1344 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1345 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1346 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1347 }else{
1348 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1349 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1350 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1353 encode_422_bitstream(s, 0, width);
1356 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1357 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1358 const int stride = -p->linesize[0];
1359 const int fake_stride = -fake_ystride;
1360 int y;
1361 int leftr, leftg, leftb;
1363 put_bits(&s->pb, 8, leftr= data[R]);
1364 put_bits(&s->pb, 8, leftg= data[G]);
1365 put_bits(&s->pb, 8, leftb= data[B]);
1366 put_bits(&s->pb, 8, 0);
1368 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1369 encode_bgr_bitstream(s, width-1);
1371 for(y=1; y<s->height; y++){
1372 uint8_t *dst = data + y*stride;
1373 if(s->predictor == PLANE && s->interlaced < y){
1374 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1375 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1376 }else{
1377 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1379 encode_bgr_bitstream(s, width);
1381 }else{
1382 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1384 emms_c();
1386 size+= (put_bits_count(&s->pb)+31)/8;
1387 put_bits(&s->pb, 16, 0);
1388 put_bits(&s->pb, 15, 0);
1389 size/= 4;
1391 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1392 int j;
1393 char *p= avctx->stats_out;
1394 char *end= p + 1024*30;
1395 for(i=0; i<3; i++){
1396 for(j=0; j<256; j++){
1397 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1398 p+= strlen(p);
1399 s->stats[i][j]= 0;
1401 snprintf(p, end-p, "\n");
1402 p++;
1404 } else
1405 avctx->stats_out[0] = '\0';
1406 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1407 flush_put_bits(&s->pb);
1408 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1411 s->picture_number++;
1413 return size*4;
1416 static av_cold int encode_end(AVCodecContext *avctx)
1418 HYuvContext *s = avctx->priv_data;
1420 common_end(s);
1422 av_freep(&avctx->extradata);
1423 av_freep(&avctx->stats_out);
1425 return 0;
1427 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1429 #if CONFIG_HUFFYUV_DECODER
1430 AVCodec huffyuv_decoder = {
1431 "huffyuv",
1432 CODEC_TYPE_VIDEO,
1433 CODEC_ID_HUFFYUV,
1434 sizeof(HYuvContext),
1435 decode_init,
1436 NULL,
1437 decode_end,
1438 decode_frame,
1439 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1440 NULL,
1441 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1443 #endif
1445 #if CONFIG_FFVHUFF_DECODER
1446 AVCodec ffvhuff_decoder = {
1447 "ffvhuff",
1448 CODEC_TYPE_VIDEO,
1449 CODEC_ID_FFVHUFF,
1450 sizeof(HYuvContext),
1451 decode_init,
1452 NULL,
1453 decode_end,
1454 decode_frame,
1455 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1456 NULL,
1457 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1459 #endif
1461 #if CONFIG_HUFFYUV_ENCODER
1462 AVCodec huffyuv_encoder = {
1463 "huffyuv",
1464 CODEC_TYPE_VIDEO,
1465 CODEC_ID_HUFFYUV,
1466 sizeof(HYuvContext),
1467 encode_init,
1468 encode_frame,
1469 encode_end,
1470 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1471 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1473 #endif
1475 #if CONFIG_FFVHUFF_ENCODER
1476 AVCodec ffvhuff_encoder = {
1477 "ffvhuff",
1478 CODEC_TYPE_VIDEO,
1479 CODEC_ID_FFVHUFF,
1480 sizeof(HYuvContext),
1481 encode_init,
1482 encode_frame,
1483 encode_end,
1484 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1485 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1487 #endif