Replace 5 with AOT_SBR when referring to the MPEG-4 audio object type.
[FFMpeg-mirror/lagarith.git] / libavcodec / huffyuv.c
blob53142c074e45292a47e1ae2892971fafd29c02e2
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
36 #define VLC_BITS 11
38 #if HAVE_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
133 int i;
135 for(i=0; i<w-1; i++){
136 acc+= src[i];
137 dst[i]= acc;
138 i++;
139 acc+= src[i];
140 dst[i]= acc;
143 for(; i<w; i++){
144 acc+= src[i];
145 dst[i]= acc;
148 return acc;
151 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
152 int i;
153 int r,g,b;
154 r= *red;
155 g= *green;
156 b= *blue;
158 for(i=0; i<w; i++){
159 b+= src[4*i+B];
160 g+= src[4*i+G];
161 r+= src[4*i+R];
163 dst[4*i+B]= b;
164 dst[4*i+G]= g;
165 dst[4*i+R]= r;
168 *red= r;
169 *green= g;
170 *blue= b;
173 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
174 int i;
175 if(w<32){
176 for(i=0; i<w; i++){
177 const int temp= src[i];
178 dst[i]= temp - left;
179 left= temp;
181 return left;
182 }else{
183 for(i=0; i<16; i++){
184 const int temp= src[i];
185 dst[i]= temp - left;
186 left= temp;
188 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
189 return src[w-1];
193 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
194 int i;
195 int r,g,b;
196 r= *red;
197 g= *green;
198 b= *blue;
199 for(i=0; i<FFMIN(w,4); i++){
200 const int rt= src[i*4+R];
201 const int gt= src[i*4+G];
202 const int bt= src[i*4+B];
203 dst[i*4+R]= rt - r;
204 dst[i*4+G]= gt - g;
205 dst[i*4+B]= bt - b;
206 r = rt;
207 g = gt;
208 b = bt;
210 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
211 *red= src[(w-1)*4+R];
212 *green= src[(w-1)*4+G];
213 *blue= src[(w-1)*4+B];
216 static int read_len_table(uint8_t *dst, GetBitContext *gb){
217 int i, val, repeat;
219 for(i=0; i<256;){
220 repeat= get_bits(gb, 3);
221 val = get_bits(gb, 5);
222 if(repeat==0)
223 repeat= get_bits(gb, 8);
224 //printf("%d %d\n", val, repeat);
225 if(i+repeat > 256) {
226 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
227 return -1;
229 while (repeat--)
230 dst[i++] = val;
232 return 0;
235 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
236 int len, index;
237 uint32_t bits=0;
239 for(len=32; len>0; len--){
240 for(index=0; index<256; index++){
241 if(len_table[index]==len)
242 dst[index]= bits++;
244 if(bits & 1){
245 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
246 return -1;
248 bits >>= 1;
250 return 0;
253 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
254 typedef struct {
255 uint64_t val;
256 int name;
257 } HeapElem;
259 static void heap_sift(HeapElem *h, int root, int size)
261 while(root*2+1 < size) {
262 int child = root*2+1;
263 if(child < size-1 && h[child].val > h[child+1].val)
264 child++;
265 if(h[root].val > h[child].val) {
266 FFSWAP(HeapElem, h[root], h[child]);
267 root = child;
268 } else
269 break;
273 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
274 HeapElem h[size];
275 int up[2*size];
276 int len[2*size];
277 int offset, i, next;
279 for(offset=1; ; offset<<=1){
280 for(i=0; i<size; i++){
281 h[i].name = i;
282 h[i].val = (stats[i] << 8) + offset;
284 for(i=size/2-1; i>=0; i--)
285 heap_sift(h, i, size);
287 for(next=size; next<size*2-1; next++){
288 // merge the two smallest entries, and put it back in the heap
289 uint64_t min1v = h[0].val;
290 up[h[0].name] = next;
291 h[0].val = INT64_MAX;
292 heap_sift(h, 0, size);
293 up[h[0].name] = next;
294 h[0].name = next;
295 h[0].val += min1v;
296 heap_sift(h, 0, size);
299 len[2*size-2] = 0;
300 for(i=2*size-3; i>=size; i--)
301 len[i] = len[up[i]] + 1;
302 for(i=0; i<size; i++) {
303 dst[i] = len[up[i]] + 1;
304 if(dst[i] >= 32) break;
306 if(i==size) break;
309 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
311 static void generate_joint_tables(HYuvContext *s){
312 uint16_t symbols[1<<VLC_BITS];
313 uint16_t bits[1<<VLC_BITS];
314 uint8_t len[1<<VLC_BITS];
315 if(s->bitstream_bpp < 24){
316 int p, i, y, u;
317 for(p=0; p<3; p++){
318 for(i=y=0; y<256; y++){
319 int len0 = s->len[0][y];
320 int limit = VLC_BITS - len0;
321 if(limit <= 0)
322 continue;
323 for(u=0; u<256; u++){
324 int len1 = s->len[p][u];
325 if(len1 > limit)
326 continue;
327 len[i] = len0 + len1;
328 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
329 symbols[i] = (y<<8) + u;
330 if(symbols[i] != 0xffff) // reserved to mean "invalid"
331 i++;
334 free_vlc(&s->vlc[3+p]);
335 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
337 }else{
338 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
339 int i, b, g, r, code;
340 int p0 = s->decorrelate;
341 int p1 = !s->decorrelate;
342 // restrict the range to +/-16 becaues that's pretty much guaranteed to
343 // cover all the combinations that fit in 11 bits total, and it doesn't
344 // matter if we miss a few rare codes.
345 for(i=0, g=-16; g<16; g++){
346 int len0 = s->len[p0][g&255];
347 int limit0 = VLC_BITS - len0;
348 if(limit0 < 2)
349 continue;
350 for(b=-16; b<16; b++){
351 int len1 = s->len[p1][b&255];
352 int limit1 = limit0 - len1;
353 if(limit1 < 1)
354 continue;
355 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
356 for(r=-16; r<16; r++){
357 int len2 = s->len[2][r&255];
358 if(len2 > limit1)
359 continue;
360 len[i] = len0 + len1 + len2;
361 bits[i] = (code << len2) + s->bits[2][r&255];
362 if(s->decorrelate){
363 map[i][G] = g;
364 map[i][B] = g+b;
365 map[i][R] = g+r;
366 }else{
367 map[i][B] = g;
368 map[i][G] = b;
369 map[i][R] = r;
371 i++;
375 free_vlc(&s->vlc[3]);
376 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
380 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
381 GetBitContext gb;
382 int i;
384 init_get_bits(&gb, src, length*8);
386 for(i=0; i<3; i++){
387 if(read_len_table(s->len[i], &gb)<0)
388 return -1;
389 if(generate_bits_table(s->bits[i], s->len[i])<0){
390 return -1;
392 #if 0
393 for(j=0; j<256; j++){
394 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
396 #endif
397 free_vlc(&s->vlc[i]);
398 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
401 generate_joint_tables(s);
403 return (get_bits_count(&gb)+7)/8;
406 static int read_old_huffman_tables(HYuvContext *s){
407 #if 1
408 GetBitContext gb;
409 int i;
411 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
412 if(read_len_table(s->len[0], &gb)<0)
413 return -1;
414 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
415 if(read_len_table(s->len[1], &gb)<0)
416 return -1;
418 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
419 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
421 if(s->bitstream_bpp >= 24){
422 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
423 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
425 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
426 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
428 for(i=0; i<3; i++){
429 free_vlc(&s->vlc[i]);
430 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
433 generate_joint_tables(s);
435 return 0;
436 #else
437 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
438 return -1;
439 #endif
442 static av_cold void alloc_temp(HYuvContext *s){
443 int i;
445 if(s->bitstream_bpp<24){
446 for(i=0; i<3; i++){
447 s->temp[i]= av_malloc(s->width + 16);
449 }else{
450 for(i=0; i<2; i++){
451 s->temp[i]= av_malloc(4*s->width + 16);
456 static av_cold int common_init(AVCodecContext *avctx){
457 HYuvContext *s = avctx->priv_data;
459 s->avctx= avctx;
460 s->flags= avctx->flags;
462 dsputil_init(&s->dsp, avctx);
464 s->width= avctx->width;
465 s->height= avctx->height;
466 assert(s->width>0 && s->height>0);
468 return 0;
471 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
472 static av_cold int decode_init(AVCodecContext *avctx)
474 HYuvContext *s = avctx->priv_data;
476 common_init(avctx);
477 memset(s->vlc, 0, 3*sizeof(VLC));
479 avctx->coded_frame= &s->picture;
480 s->interlaced= s->height > 288;
482 s->bgr32=1;
483 //if(avctx->extradata)
484 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
485 if(avctx->extradata_size){
486 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
487 s->version=1; // do such files exist at all?
488 else
489 s->version=2;
490 }else
491 s->version=0;
493 if(s->version==2){
494 int method, interlace;
496 method= ((uint8_t*)avctx->extradata)[0];
497 s->decorrelate= method&64 ? 1 : 0;
498 s->predictor= method&63;
499 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
500 if(s->bitstream_bpp==0)
501 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
502 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
503 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
504 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
506 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
507 return -1;
508 }else{
509 switch(avctx->bits_per_coded_sample&7){
510 case 1:
511 s->predictor= LEFT;
512 s->decorrelate= 0;
513 break;
514 case 2:
515 s->predictor= LEFT;
516 s->decorrelate= 1;
517 break;
518 case 3:
519 s->predictor= PLANE;
520 s->decorrelate= avctx->bits_per_coded_sample >= 24;
521 break;
522 case 4:
523 s->predictor= MEDIAN;
524 s->decorrelate= 0;
525 break;
526 default:
527 s->predictor= LEFT; //OLD
528 s->decorrelate= 0;
529 break;
531 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
532 s->context= 0;
534 if(read_old_huffman_tables(s) < 0)
535 return -1;
538 switch(s->bitstream_bpp){
539 case 12:
540 avctx->pix_fmt = PIX_FMT_YUV420P;
541 break;
542 case 16:
543 if(s->yuy2){
544 avctx->pix_fmt = PIX_FMT_YUYV422;
545 }else{
546 avctx->pix_fmt = PIX_FMT_YUV422P;
548 break;
549 case 24:
550 case 32:
551 if(s->bgr32){
552 avctx->pix_fmt = PIX_FMT_RGB32;
553 }else{
554 avctx->pix_fmt = PIX_FMT_BGR24;
556 break;
557 default:
558 assert(0);
561 alloc_temp(s);
563 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
565 return 0;
567 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
569 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
570 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
571 int i;
572 int index= 0;
574 for(i=0; i<256;){
575 int val= len[i];
576 int repeat=0;
578 for(; i<256 && len[i]==val && repeat<255; i++)
579 repeat++;
581 assert(val < 32 && val >0 && repeat<256 && repeat>0);
582 if(repeat>7){
583 buf[index++]= val;
584 buf[index++]= repeat;
585 }else{
586 buf[index++]= val | (repeat<<5);
590 return index;
593 static av_cold int encode_init(AVCodecContext *avctx)
595 HYuvContext *s = avctx->priv_data;
596 int i, j;
598 common_init(avctx);
600 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
601 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
602 s->version=2;
604 avctx->coded_frame= &s->picture;
606 switch(avctx->pix_fmt){
607 case PIX_FMT_YUV420P:
608 s->bitstream_bpp= 12;
609 break;
610 case PIX_FMT_YUV422P:
611 s->bitstream_bpp= 16;
612 break;
613 case PIX_FMT_RGB32:
614 s->bitstream_bpp= 24;
615 break;
616 default:
617 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
618 return -1;
620 avctx->bits_per_coded_sample= s->bitstream_bpp;
621 s->decorrelate= s->bitstream_bpp >= 24;
622 s->predictor= avctx->prediction_method;
623 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
624 if(avctx->context_model==1){
625 s->context= avctx->context_model;
626 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
627 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
628 return -1;
630 }else s->context= 0;
632 if(avctx->codec->id==CODEC_ID_HUFFYUV){
633 if(avctx->pix_fmt==PIX_FMT_YUV420P){
634 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
635 return -1;
637 if(avctx->context_model){
638 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
639 return -1;
641 if(s->interlaced != ( s->height > 288 ))
642 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
645 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
646 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
647 return -1;
650 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
651 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
652 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
653 if(s->context)
654 ((uint8_t*)avctx->extradata)[2]|= 0x40;
655 ((uint8_t*)avctx->extradata)[3]= 0;
656 s->avctx->extradata_size= 4;
658 if(avctx->stats_in){
659 char *p= avctx->stats_in;
661 for(i=0; i<3; i++)
662 for(j=0; j<256; j++)
663 s->stats[i][j]= 1;
665 for(;;){
666 for(i=0; i<3; i++){
667 char *next;
669 for(j=0; j<256; j++){
670 s->stats[i][j]+= strtol(p, &next, 0);
671 if(next==p) return -1;
672 p=next;
675 if(p[0]==0 || p[1]==0 || p[2]==0) break;
677 }else{
678 for(i=0; i<3; i++)
679 for(j=0; j<256; j++){
680 int d= FFMIN(j, 256-j);
682 s->stats[i][j]= 100000000/(d+1);
686 for(i=0; i<3; i++){
687 generate_len_table(s->len[i], s->stats[i], 256);
689 if(generate_bits_table(s->bits[i], s->len[i])<0){
690 return -1;
693 s->avctx->extradata_size+=
694 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
697 if(s->context){
698 for(i=0; i<3; i++){
699 int pels = s->width*s->height / (i?40:10);
700 for(j=0; j<256; j++){
701 int d= FFMIN(j, 256-j);
702 s->stats[i][j]= pels/(d+1);
705 }else{
706 for(i=0; i<3; i++)
707 for(j=0; j<256; j++)
708 s->stats[i][j]= 0;
711 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
713 alloc_temp(s);
715 s->picture_number=0;
717 return 0;
719 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
721 /* TODO instead of restarting the read when the code isn't in the first level
722 * of the joint table, jump into the 2nd level of the individual table. */
723 #define READ_2PIX(dst0, dst1, plane1){\
724 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
725 if(code != 0xffff){\
726 dst0 = code>>8;\
727 dst1 = code;\
728 }else{\
729 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
730 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
734 static void decode_422_bitstream(HYuvContext *s, int count){
735 int i;
737 count/=2;
739 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*4)){
740 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
741 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
742 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
744 }else{
745 for(i=0; i<count; i++){
746 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
747 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
752 static void decode_gray_bitstream(HYuvContext *s, int count){
753 int i;
755 count/=2;
757 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*2)){
758 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
759 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
761 }else{
762 for(i=0; i<count; i++){
763 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
768 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
769 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
770 int i;
771 const uint8_t *y = s->temp[0] + offset;
772 const uint8_t *u = s->temp[1] + offset/2;
773 const uint8_t *v = s->temp[2] + offset/2;
775 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
776 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
777 return -1;
780 #define LOAD4\
781 int y0 = y[2*i];\
782 int y1 = y[2*i+1];\
783 int u0 = u[i];\
784 int v0 = v[i];
786 count/=2;
787 if(s->flags&CODEC_FLAG_PASS1){
788 for(i=0; i<count; i++){
789 LOAD4;
790 s->stats[0][y0]++;
791 s->stats[1][u0]++;
792 s->stats[0][y1]++;
793 s->stats[2][v0]++;
796 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
797 return 0;
798 if(s->context){
799 for(i=0; i<count; i++){
800 LOAD4;
801 s->stats[0][y0]++;
802 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
803 s->stats[1][u0]++;
804 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
805 s->stats[0][y1]++;
806 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
807 s->stats[2][v0]++;
808 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
810 }else{
811 for(i=0; i<count; i++){
812 LOAD4;
813 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
814 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
815 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
816 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
819 return 0;
822 static int encode_gray_bitstream(HYuvContext *s, int count){
823 int i;
825 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
826 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
827 return -1;
830 #define LOAD2\
831 int y0 = s->temp[0][2*i];\
832 int y1 = s->temp[0][2*i+1];
833 #define STAT2\
834 s->stats[0][y0]++;\
835 s->stats[0][y1]++;
836 #define WRITE2\
837 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
838 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
840 count/=2;
841 if(s->flags&CODEC_FLAG_PASS1){
842 for(i=0; i<count; i++){
843 LOAD2;
844 STAT2;
847 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
848 return 0;
850 if(s->context){
851 for(i=0; i<count; i++){
852 LOAD2;
853 STAT2;
854 WRITE2;
856 }else{
857 for(i=0; i<count; i++){
858 LOAD2;
859 WRITE2;
862 return 0;
864 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
866 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
867 int i;
868 for(i=0; i<count; i++){
869 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
870 if(code != -1){
871 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
872 }else if(decorrelate){
873 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
874 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
875 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
876 }else{
877 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
878 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
879 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
881 if(alpha)
882 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
886 static void decode_bgr_bitstream(HYuvContext *s, int count){
887 if(s->decorrelate){
888 if(s->bitstream_bpp==24)
889 decode_bgr_1(s, count, 1, 0);
890 else
891 decode_bgr_1(s, count, 1, 1);
892 }else{
893 if(s->bitstream_bpp==24)
894 decode_bgr_1(s, count, 0, 0);
895 else
896 decode_bgr_1(s, count, 0, 1);
900 static int encode_bgr_bitstream(HYuvContext *s, int count){
901 int i;
903 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
904 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
905 return -1;
908 #define LOAD3\
909 int g= s->temp[0][4*i+G];\
910 int b= (s->temp[0][4*i+B] - g) & 0xff;\
911 int r= (s->temp[0][4*i+R] - g) & 0xff;
912 #define STAT3\
913 s->stats[0][b]++;\
914 s->stats[1][g]++;\
915 s->stats[2][r]++;
916 #define WRITE3\
917 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
918 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
919 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
921 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
922 for(i=0; i<count; i++){
923 LOAD3;
924 STAT3;
926 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
927 for(i=0; i<count; i++){
928 LOAD3;
929 STAT3;
930 WRITE3;
932 }else{
933 for(i=0; i<count; i++){
934 LOAD3;
935 WRITE3;
938 return 0;
941 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
942 static void draw_slice(HYuvContext *s, int y){
943 int h, cy;
944 int offset[4];
946 if(s->avctx->draw_horiz_band==NULL)
947 return;
949 h= y - s->last_slice_end;
950 y -= h;
952 if(s->bitstream_bpp==12){
953 cy= y>>1;
954 }else{
955 cy= y;
958 offset[0] = s->picture.linesize[0]*y;
959 offset[1] = s->picture.linesize[1]*cy;
960 offset[2] = s->picture.linesize[2]*cy;
961 offset[3] = 0;
962 emms_c();
964 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
966 s->last_slice_end= y + h;
969 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
970 const uint8_t *buf = avpkt->data;
971 int buf_size = avpkt->size;
972 HYuvContext *s = avctx->priv_data;
973 const int width= s->width;
974 const int width2= s->width>>1;
975 const int height= s->height;
976 int fake_ystride, fake_ustride, fake_vstride;
977 AVFrame * const p= &s->picture;
978 int table_size= 0;
980 AVFrame *picture = data;
982 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
983 if (!s->bitstream_buffer)
984 return AVERROR(ENOMEM);
986 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
988 if(p->data[0])
989 avctx->release_buffer(avctx, p);
991 p->reference= 0;
992 if(avctx->get_buffer(avctx, p) < 0){
993 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
994 return -1;
997 if(s->context){
998 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
999 if(table_size < 0)
1000 return -1;
1003 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1004 return -1;
1006 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
1008 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1009 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1010 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1012 s->last_slice_end= 0;
1014 if(s->bitstream_bpp<24){
1015 int y, cy;
1016 int lefty, leftu, leftv;
1017 int lefttopy, lefttopu, lefttopv;
1019 if(s->yuy2){
1020 p->data[0][3]= get_bits(&s->gb, 8);
1021 p->data[0][2]= get_bits(&s->gb, 8);
1022 p->data[0][1]= get_bits(&s->gb, 8);
1023 p->data[0][0]= get_bits(&s->gb, 8);
1025 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1026 return -1;
1027 }else{
1029 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1030 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1031 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1032 p->data[0][0]= get_bits(&s->gb, 8);
1034 switch(s->predictor){
1035 case LEFT:
1036 case PLANE:
1037 decode_422_bitstream(s, width-2);
1038 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1039 if(!(s->flags&CODEC_FLAG_GRAY)){
1040 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1041 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1044 for(cy=y=1; y<s->height; y++,cy++){
1045 uint8_t *ydst, *udst, *vdst;
1047 if(s->bitstream_bpp==12){
1048 decode_gray_bitstream(s, width);
1050 ydst= p->data[0] + p->linesize[0]*y;
1052 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1053 if(s->predictor == PLANE){
1054 if(y>s->interlaced)
1055 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1057 y++;
1058 if(y>=s->height) break;
1061 draw_slice(s, y);
1063 ydst= p->data[0] + p->linesize[0]*y;
1064 udst= p->data[1] + p->linesize[1]*cy;
1065 vdst= p->data[2] + p->linesize[2]*cy;
1067 decode_422_bitstream(s, width);
1068 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1069 if(!(s->flags&CODEC_FLAG_GRAY)){
1070 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1071 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1073 if(s->predictor == PLANE){
1074 if(cy>s->interlaced){
1075 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1076 if(!(s->flags&CODEC_FLAG_GRAY)){
1077 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1078 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1083 draw_slice(s, height);
1085 break;
1086 case MEDIAN:
1087 /* first line except first 2 pixels is left predicted */
1088 decode_422_bitstream(s, width-2);
1089 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1090 if(!(s->flags&CODEC_FLAG_GRAY)){
1091 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1092 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1095 cy=y=1;
1097 /* second line is left predicted for interlaced case */
1098 if(s->interlaced){
1099 decode_422_bitstream(s, width);
1100 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1101 if(!(s->flags&CODEC_FLAG_GRAY)){
1102 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1103 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1105 y++; cy++;
1108 /* next 4 pixels are left predicted too */
1109 decode_422_bitstream(s, 4);
1110 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1111 if(!(s->flags&CODEC_FLAG_GRAY)){
1112 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1113 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1116 /* next line except the first 4 pixels is median predicted */
1117 lefttopy= p->data[0][3];
1118 decode_422_bitstream(s, width-4);
1119 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1120 if(!(s->flags&CODEC_FLAG_GRAY)){
1121 lefttopu= p->data[1][1];
1122 lefttopv= p->data[2][1];
1123 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1124 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1126 y++; cy++;
1128 for(; y<height; y++,cy++){
1129 uint8_t *ydst, *udst, *vdst;
1131 if(s->bitstream_bpp==12){
1132 while(2*cy > y){
1133 decode_gray_bitstream(s, width);
1134 ydst= p->data[0] + p->linesize[0]*y;
1135 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1136 y++;
1138 if(y>=height) break;
1140 draw_slice(s, y);
1142 decode_422_bitstream(s, width);
1144 ydst= p->data[0] + p->linesize[0]*y;
1145 udst= p->data[1] + p->linesize[1]*cy;
1146 vdst= p->data[2] + p->linesize[2]*cy;
1148 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1149 if(!(s->flags&CODEC_FLAG_GRAY)){
1150 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1151 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1155 draw_slice(s, height);
1156 break;
1159 }else{
1160 int y;
1161 int leftr, leftg, leftb;
1162 const int last_line= (height-1)*p->linesize[0];
1164 if(s->bitstream_bpp==32){
1165 skip_bits(&s->gb, 8);
1166 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1167 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1168 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1169 }else{
1170 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1171 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1172 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1173 skip_bits(&s->gb, 8);
1176 if(s->bgr32){
1177 switch(s->predictor){
1178 case LEFT:
1179 case PLANE:
1180 decode_bgr_bitstream(s, width-1);
1181 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1183 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1184 decode_bgr_bitstream(s, width);
1186 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1187 if(s->predictor == PLANE){
1188 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1189 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1190 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1194 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1195 break;
1196 default:
1197 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1199 }else{
1201 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1202 return -1;
1205 emms_c();
1207 *picture= *p;
1208 *data_size = sizeof(AVFrame);
1210 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1212 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1214 static int common_end(HYuvContext *s){
1215 int i;
1217 for(i=0; i<3; i++){
1218 av_freep(&s->temp[i]);
1220 return 0;
1223 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1224 static av_cold int decode_end(AVCodecContext *avctx)
1226 HYuvContext *s = avctx->priv_data;
1227 int i;
1229 common_end(s);
1230 av_freep(&s->bitstream_buffer);
1232 for(i=0; i<6; i++){
1233 free_vlc(&s->vlc[i]);
1236 return 0;
1238 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1240 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1241 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1242 HYuvContext *s = avctx->priv_data;
1243 AVFrame *pict = data;
1244 const int width= s->width;
1245 const int width2= s->width>>1;
1246 const int height= s->height;
1247 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1248 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1249 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1250 AVFrame * const p= &s->picture;
1251 int i, j, size=0;
1253 *p = *pict;
1254 p->pict_type= FF_I_TYPE;
1255 p->key_frame= 1;
1257 if(s->context){
1258 for(i=0; i<3; i++){
1259 generate_len_table(s->len[i], s->stats[i], 256);
1260 if(generate_bits_table(s->bits[i], s->len[i])<0)
1261 return -1;
1262 size+= store_table(s, s->len[i], &buf[size]);
1265 for(i=0; i<3; i++)
1266 for(j=0; j<256; j++)
1267 s->stats[i][j] >>= 1;
1270 init_put_bits(&s->pb, buf+size, buf_size-size);
1272 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1273 int lefty, leftu, leftv, y, cy;
1275 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1276 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1277 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1278 put_bits(&s->pb, 8, p->data[0][0]);
1280 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1281 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1282 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1284 encode_422_bitstream(s, 2, width-2);
1286 if(s->predictor==MEDIAN){
1287 int lefttopy, lefttopu, lefttopv;
1288 cy=y=1;
1289 if(s->interlaced){
1290 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1291 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1292 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1294 encode_422_bitstream(s, 0, width);
1295 y++; cy++;
1298 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1299 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1300 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1302 encode_422_bitstream(s, 0, 4);
1304 lefttopy= p->data[0][3];
1305 lefttopu= p->data[1][1];
1306 lefttopv= p->data[2][1];
1307 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1308 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1309 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1310 encode_422_bitstream(s, 0, width-4);
1311 y++; cy++;
1313 for(; y<height; y++,cy++){
1314 uint8_t *ydst, *udst, *vdst;
1316 if(s->bitstream_bpp==12){
1317 while(2*cy > y){
1318 ydst= p->data[0] + p->linesize[0]*y;
1319 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1320 encode_gray_bitstream(s, width);
1321 y++;
1323 if(y>=height) break;
1325 ydst= p->data[0] + p->linesize[0]*y;
1326 udst= p->data[1] + p->linesize[1]*cy;
1327 vdst= p->data[2] + p->linesize[2]*cy;
1329 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1330 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1331 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1333 encode_422_bitstream(s, 0, width);
1335 }else{
1336 for(cy=y=1; y<height; y++,cy++){
1337 uint8_t *ydst, *udst, *vdst;
1339 /* encode a luma only line & y++ */
1340 if(s->bitstream_bpp==12){
1341 ydst= p->data[0] + p->linesize[0]*y;
1343 if(s->predictor == PLANE && s->interlaced < y){
1344 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1346 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1347 }else{
1348 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1350 encode_gray_bitstream(s, width);
1351 y++;
1352 if(y>=height) break;
1355 ydst= p->data[0] + p->linesize[0]*y;
1356 udst= p->data[1] + p->linesize[1]*cy;
1357 vdst= p->data[2] + p->linesize[2]*cy;
1359 if(s->predictor == PLANE && s->interlaced < cy){
1360 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1361 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1362 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1364 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1365 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1366 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1367 }else{
1368 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1369 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1370 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1373 encode_422_bitstream(s, 0, width);
1376 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1377 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1378 const int stride = -p->linesize[0];
1379 const int fake_stride = -fake_ystride;
1380 int y;
1381 int leftr, leftg, leftb;
1383 put_bits(&s->pb, 8, leftr= data[R]);
1384 put_bits(&s->pb, 8, leftg= data[G]);
1385 put_bits(&s->pb, 8, leftb= data[B]);
1386 put_bits(&s->pb, 8, 0);
1388 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1389 encode_bgr_bitstream(s, width-1);
1391 for(y=1; y<s->height; y++){
1392 uint8_t *dst = data + y*stride;
1393 if(s->predictor == PLANE && s->interlaced < y){
1394 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1395 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1396 }else{
1397 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1399 encode_bgr_bitstream(s, width);
1401 }else{
1402 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1404 emms_c();
1406 size+= (put_bits_count(&s->pb)+31)/8;
1407 put_bits(&s->pb, 16, 0);
1408 put_bits(&s->pb, 15, 0);
1409 size/= 4;
1411 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1412 int j;
1413 char *p= avctx->stats_out;
1414 char *end= p + 1024*30;
1415 for(i=0; i<3; i++){
1416 for(j=0; j<256; j++){
1417 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1418 p+= strlen(p);
1419 s->stats[i][j]= 0;
1421 snprintf(p, end-p, "\n");
1422 p++;
1424 } else
1425 avctx->stats_out[0] = '\0';
1426 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1427 flush_put_bits(&s->pb);
1428 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1431 s->picture_number++;
1433 return size*4;
1436 static av_cold int encode_end(AVCodecContext *avctx)
1438 HYuvContext *s = avctx->priv_data;
1440 common_end(s);
1442 av_freep(&avctx->extradata);
1443 av_freep(&avctx->stats_out);
1445 return 0;
1447 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1449 #if CONFIG_HUFFYUV_DECODER
1450 AVCodec huffyuv_decoder = {
1451 "huffyuv",
1452 CODEC_TYPE_VIDEO,
1453 CODEC_ID_HUFFYUV,
1454 sizeof(HYuvContext),
1455 decode_init,
1456 NULL,
1457 decode_end,
1458 decode_frame,
1459 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1460 NULL,
1461 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1463 #endif
1465 #if CONFIG_FFVHUFF_DECODER
1466 AVCodec ffvhuff_decoder = {
1467 "ffvhuff",
1468 CODEC_TYPE_VIDEO,
1469 CODEC_ID_FFVHUFF,
1470 sizeof(HYuvContext),
1471 decode_init,
1472 NULL,
1473 decode_end,
1474 decode_frame,
1475 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1476 NULL,
1477 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1479 #endif
1481 #if CONFIG_HUFFYUV_ENCODER
1482 AVCodec huffyuv_encoder = {
1483 "huffyuv",
1484 CODEC_TYPE_VIDEO,
1485 CODEC_ID_HUFFYUV,
1486 sizeof(HYuvContext),
1487 encode_init,
1488 encode_frame,
1489 encode_end,
1490 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1491 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1493 #endif
1495 #if CONFIG_FFVHUFF_ENCODER
1496 AVCodec ffvhuff_encoder = {
1497 "ffvhuff",
1498 CODEC_TYPE_VIDEO,
1499 CODEC_ID_FFVHUFF,
1500 sizeof(HYuvContext),
1501 encode_init,
1502 encode_frame,
1503 encode_end,
1504 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1505 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1507 #endif