Use git_bits_left() instead of size_in_bits - get_bits_count().
[ffmpeg-lucabe.git] / libavcodec / huffyuv.c
blob4fa6ddf7f8a8c5815411b67cac6ba20a65b0f20a
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
36 #define VLC_BITS 11
38 #if HAVE_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
133 int i;
134 if(w<32){
135 for(i=0; i<w; i++){
136 const int temp= src[i];
137 dst[i]= temp - left;
138 left= temp;
140 return left;
141 }else{
142 for(i=0; i<16; i++){
143 const int temp= src[i];
144 dst[i]= temp - left;
145 left= temp;
147 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
148 return src[w-1];
152 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
153 int i;
154 int r,g,b;
155 r= *red;
156 g= *green;
157 b= *blue;
158 for(i=0; i<FFMIN(w,4); i++){
159 const int rt= src[i*4+R];
160 const int gt= src[i*4+G];
161 const int bt= src[i*4+B];
162 dst[i*4+R]= rt - r;
163 dst[i*4+G]= gt - g;
164 dst[i*4+B]= bt - b;
165 r = rt;
166 g = gt;
167 b = bt;
169 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
170 *red= src[(w-1)*4+R];
171 *green= src[(w-1)*4+G];
172 *blue= src[(w-1)*4+B];
175 static int read_len_table(uint8_t *dst, GetBitContext *gb){
176 int i, val, repeat;
178 for(i=0; i<256;){
179 repeat= get_bits(gb, 3);
180 val = get_bits(gb, 5);
181 if(repeat==0)
182 repeat= get_bits(gb, 8);
183 //printf("%d %d\n", val, repeat);
184 if(i+repeat > 256) {
185 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
186 return -1;
188 while (repeat--)
189 dst[i++] = val;
191 return 0;
194 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
195 int len, index;
196 uint32_t bits=0;
198 for(len=32; len>0; len--){
199 for(index=0; index<256; index++){
200 if(len_table[index]==len)
201 dst[index]= bits++;
203 if(bits & 1){
204 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
205 return -1;
207 bits >>= 1;
209 return 0;
212 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
213 typedef struct {
214 uint64_t val;
215 int name;
216 } HeapElem;
218 static void heap_sift(HeapElem *h, int root, int size)
220 while(root*2+1 < size) {
221 int child = root*2+1;
222 if(child < size-1 && h[child].val > h[child+1].val)
223 child++;
224 if(h[root].val > h[child].val) {
225 FFSWAP(HeapElem, h[root], h[child]);
226 root = child;
227 } else
228 break;
232 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
233 HeapElem h[size];
234 int up[2*size];
235 int len[2*size];
236 int offset, i, next;
238 for(offset=1; ; offset<<=1){
239 for(i=0; i<size; i++){
240 h[i].name = i;
241 h[i].val = (stats[i] << 8) + offset;
243 for(i=size/2-1; i>=0; i--)
244 heap_sift(h, i, size);
246 for(next=size; next<size*2-1; next++){
247 // merge the two smallest entries, and put it back in the heap
248 uint64_t min1v = h[0].val;
249 up[h[0].name] = next;
250 h[0].val = INT64_MAX;
251 heap_sift(h, 0, size);
252 up[h[0].name] = next;
253 h[0].name = next;
254 h[0].val += min1v;
255 heap_sift(h, 0, size);
258 len[2*size-2] = 0;
259 for(i=2*size-3; i>=size; i--)
260 len[i] = len[up[i]] + 1;
261 for(i=0; i<size; i++) {
262 dst[i] = len[up[i]] + 1;
263 if(dst[i] >= 32) break;
265 if(i==size) break;
268 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
270 static void generate_joint_tables(HYuvContext *s){
271 uint16_t symbols[1<<VLC_BITS];
272 uint16_t bits[1<<VLC_BITS];
273 uint8_t len[1<<VLC_BITS];
274 if(s->bitstream_bpp < 24){
275 int p, i, y, u;
276 for(p=0; p<3; p++){
277 for(i=y=0; y<256; y++){
278 int len0 = s->len[0][y];
279 int limit = VLC_BITS - len0;
280 if(limit <= 0)
281 continue;
282 for(u=0; u<256; u++){
283 int len1 = s->len[p][u];
284 if(len1 > limit)
285 continue;
286 len[i] = len0 + len1;
287 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
288 symbols[i] = (y<<8) + u;
289 if(symbols[i] != 0xffff) // reserved to mean "invalid"
290 i++;
293 free_vlc(&s->vlc[3+p]);
294 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
296 }else{
297 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
298 int i, b, g, r, code;
299 int p0 = s->decorrelate;
300 int p1 = !s->decorrelate;
301 // restrict the range to +/-16 becaues that's pretty much guaranteed to
302 // cover all the combinations that fit in 11 bits total, and it doesn't
303 // matter if we miss a few rare codes.
304 for(i=0, g=-16; g<16; g++){
305 int len0 = s->len[p0][g&255];
306 int limit0 = VLC_BITS - len0;
307 if(limit0 < 2)
308 continue;
309 for(b=-16; b<16; b++){
310 int len1 = s->len[p1][b&255];
311 int limit1 = limit0 - len1;
312 if(limit1 < 1)
313 continue;
314 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
315 for(r=-16; r<16; r++){
316 int len2 = s->len[2][r&255];
317 if(len2 > limit1)
318 continue;
319 len[i] = len0 + len1 + len2;
320 bits[i] = (code << len2) + s->bits[2][r&255];
321 if(s->decorrelate){
322 map[i][G] = g;
323 map[i][B] = g+b;
324 map[i][R] = g+r;
325 }else{
326 map[i][B] = g;
327 map[i][G] = b;
328 map[i][R] = r;
330 i++;
334 free_vlc(&s->vlc[3]);
335 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
339 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
340 GetBitContext gb;
341 int i;
343 init_get_bits(&gb, src, length*8);
345 for(i=0; i<3; i++){
346 if(read_len_table(s->len[i], &gb)<0)
347 return -1;
348 if(generate_bits_table(s->bits[i], s->len[i])<0){
349 return -1;
351 #if 0
352 for(j=0; j<256; j++){
353 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
355 #endif
356 free_vlc(&s->vlc[i]);
357 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
360 generate_joint_tables(s);
362 return (get_bits_count(&gb)+7)/8;
365 static int read_old_huffman_tables(HYuvContext *s){
366 #if 1
367 GetBitContext gb;
368 int i;
370 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
371 if(read_len_table(s->len[0], &gb)<0)
372 return -1;
373 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
374 if(read_len_table(s->len[1], &gb)<0)
375 return -1;
377 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
378 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
380 if(s->bitstream_bpp >= 24){
381 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
382 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
384 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
385 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387 for(i=0; i<3; i++){
388 free_vlc(&s->vlc[i]);
389 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392 generate_joint_tables(s);
394 return 0;
395 #else
396 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
397 return -1;
398 #endif
401 static av_cold void alloc_temp(HYuvContext *s){
402 int i;
404 if(s->bitstream_bpp<24){
405 for(i=0; i<3; i++){
406 s->temp[i]= av_malloc(s->width + 16);
408 }else{
409 s->temp[0]= av_malloc(4*s->width + 16);
413 static av_cold int common_init(AVCodecContext *avctx){
414 HYuvContext *s = avctx->priv_data;
416 s->avctx= avctx;
417 s->flags= avctx->flags;
419 dsputil_init(&s->dsp, avctx);
421 s->width= avctx->width;
422 s->height= avctx->height;
423 assert(s->width>0 && s->height>0);
425 return 0;
428 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
429 static av_cold int decode_init(AVCodecContext *avctx)
431 HYuvContext *s = avctx->priv_data;
433 common_init(avctx);
434 memset(s->vlc, 0, 3*sizeof(VLC));
436 avctx->coded_frame= &s->picture;
437 s->interlaced= s->height > 288;
439 s->bgr32=1;
440 //if(avctx->extradata)
441 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
442 if(avctx->extradata_size){
443 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
444 s->version=1; // do such files exist at all?
445 else
446 s->version=2;
447 }else
448 s->version=0;
450 if(s->version==2){
451 int method, interlace;
453 method= ((uint8_t*)avctx->extradata)[0];
454 s->decorrelate= method&64 ? 1 : 0;
455 s->predictor= method&63;
456 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
457 if(s->bitstream_bpp==0)
458 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
459 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
460 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
461 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
463 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
464 return -1;
465 }else{
466 switch(avctx->bits_per_coded_sample&7){
467 case 1:
468 s->predictor= LEFT;
469 s->decorrelate= 0;
470 break;
471 case 2:
472 s->predictor= LEFT;
473 s->decorrelate= 1;
474 break;
475 case 3:
476 s->predictor= PLANE;
477 s->decorrelate= avctx->bits_per_coded_sample >= 24;
478 break;
479 case 4:
480 s->predictor= MEDIAN;
481 s->decorrelate= 0;
482 break;
483 default:
484 s->predictor= LEFT; //OLD
485 s->decorrelate= 0;
486 break;
488 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
489 s->context= 0;
491 if(read_old_huffman_tables(s) < 0)
492 return -1;
495 switch(s->bitstream_bpp){
496 case 12:
497 avctx->pix_fmt = PIX_FMT_YUV420P;
498 break;
499 case 16:
500 if(s->yuy2){
501 avctx->pix_fmt = PIX_FMT_YUYV422;
502 }else{
503 avctx->pix_fmt = PIX_FMT_YUV422P;
505 break;
506 case 24:
507 case 32:
508 if(s->bgr32){
509 avctx->pix_fmt = PIX_FMT_RGB32;
510 }else{
511 avctx->pix_fmt = PIX_FMT_BGR24;
513 break;
514 default:
515 assert(0);
518 alloc_temp(s);
520 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
522 return 0;
524 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
526 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
527 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
528 int i;
529 int index= 0;
531 for(i=0; i<256;){
532 int val= len[i];
533 int repeat=0;
535 for(; i<256 && len[i]==val && repeat<255; i++)
536 repeat++;
538 assert(val < 32 && val >0 && repeat<256 && repeat>0);
539 if(repeat>7){
540 buf[index++]= val;
541 buf[index++]= repeat;
542 }else{
543 buf[index++]= val | (repeat<<5);
547 return index;
550 static av_cold int encode_init(AVCodecContext *avctx)
552 HYuvContext *s = avctx->priv_data;
553 int i, j;
555 common_init(avctx);
557 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
558 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
559 s->version=2;
561 avctx->coded_frame= &s->picture;
563 switch(avctx->pix_fmt){
564 case PIX_FMT_YUV420P:
565 s->bitstream_bpp= 12;
566 break;
567 case PIX_FMT_YUV422P:
568 s->bitstream_bpp= 16;
569 break;
570 case PIX_FMT_RGB32:
571 s->bitstream_bpp= 24;
572 break;
573 default:
574 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
575 return -1;
577 avctx->bits_per_coded_sample= s->bitstream_bpp;
578 s->decorrelate= s->bitstream_bpp >= 24;
579 s->predictor= avctx->prediction_method;
580 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
581 if(avctx->context_model==1){
582 s->context= avctx->context_model;
583 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
584 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
585 return -1;
587 }else s->context= 0;
589 if(avctx->codec->id==CODEC_ID_HUFFYUV){
590 if(avctx->pix_fmt==PIX_FMT_YUV420P){
591 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
592 return -1;
594 if(avctx->context_model){
595 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
596 return -1;
598 if(s->interlaced != ( s->height > 288 ))
599 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
602 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
603 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
604 return -1;
607 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
608 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
609 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
610 if(s->context)
611 ((uint8_t*)avctx->extradata)[2]|= 0x40;
612 ((uint8_t*)avctx->extradata)[3]= 0;
613 s->avctx->extradata_size= 4;
615 if(avctx->stats_in){
616 char *p= avctx->stats_in;
618 for(i=0; i<3; i++)
619 for(j=0; j<256; j++)
620 s->stats[i][j]= 1;
622 for(;;){
623 for(i=0; i<3; i++){
624 char *next;
626 for(j=0; j<256; j++){
627 s->stats[i][j]+= strtol(p, &next, 0);
628 if(next==p) return -1;
629 p=next;
632 if(p[0]==0 || p[1]==0 || p[2]==0) break;
634 }else{
635 for(i=0; i<3; i++)
636 for(j=0; j<256; j++){
637 int d= FFMIN(j, 256-j);
639 s->stats[i][j]= 100000000/(d+1);
643 for(i=0; i<3; i++){
644 generate_len_table(s->len[i], s->stats[i], 256);
646 if(generate_bits_table(s->bits[i], s->len[i])<0){
647 return -1;
650 s->avctx->extradata_size+=
651 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
654 if(s->context){
655 for(i=0; i<3; i++){
656 int pels = s->width*s->height / (i?40:10);
657 for(j=0; j<256; j++){
658 int d= FFMIN(j, 256-j);
659 s->stats[i][j]= pels/(d+1);
662 }else{
663 for(i=0; i<3; i++)
664 for(j=0; j<256; j++)
665 s->stats[i][j]= 0;
668 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
670 alloc_temp(s);
672 s->picture_number=0;
674 return 0;
676 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
678 /* TODO instead of restarting the read when the code isn't in the first level
679 * of the joint table, jump into the 2nd level of the individual table. */
680 #define READ_2PIX(dst0, dst1, plane1){\
681 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
682 if(code != 0xffff){\
683 dst0 = code>>8;\
684 dst1 = code;\
685 }else{\
686 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
687 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
691 static void decode_422_bitstream(HYuvContext *s, int count){
692 int i;
694 count/=2;
696 if(count >= (get_bits_left(&s->gb))/(31*4)){
697 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
698 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
699 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
701 }else{
702 for(i=0; i<count; i++){
703 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
704 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
709 static void decode_gray_bitstream(HYuvContext *s, int count){
710 int i;
712 count/=2;
714 if(count >= (get_bits_left(&s->gb))/(31*2)){
715 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
716 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
718 }else{
719 for(i=0; i<count; i++){
720 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
725 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
726 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
727 int i;
728 const uint8_t *y = s->temp[0] + offset;
729 const uint8_t *u = s->temp[1] + offset/2;
730 const uint8_t *v = s->temp[2] + offset/2;
732 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
733 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
734 return -1;
737 #define LOAD4\
738 int y0 = y[2*i];\
739 int y1 = y[2*i+1];\
740 int u0 = u[i];\
741 int v0 = v[i];
743 count/=2;
744 if(s->flags&CODEC_FLAG_PASS1){
745 for(i=0; i<count; i++){
746 LOAD4;
747 s->stats[0][y0]++;
748 s->stats[1][u0]++;
749 s->stats[0][y1]++;
750 s->stats[2][v0]++;
753 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
754 return 0;
755 if(s->context){
756 for(i=0; i<count; i++){
757 LOAD4;
758 s->stats[0][y0]++;
759 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
760 s->stats[1][u0]++;
761 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
762 s->stats[0][y1]++;
763 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
764 s->stats[2][v0]++;
765 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
767 }else{
768 for(i=0; i<count; i++){
769 LOAD4;
770 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
771 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
772 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
773 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
776 return 0;
779 static int encode_gray_bitstream(HYuvContext *s, int count){
780 int i;
782 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
783 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
784 return -1;
787 #define LOAD2\
788 int y0 = s->temp[0][2*i];\
789 int y1 = s->temp[0][2*i+1];
790 #define STAT2\
791 s->stats[0][y0]++;\
792 s->stats[0][y1]++;
793 #define WRITE2\
794 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
795 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
797 count/=2;
798 if(s->flags&CODEC_FLAG_PASS1){
799 for(i=0; i<count; i++){
800 LOAD2;
801 STAT2;
804 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
805 return 0;
807 if(s->context){
808 for(i=0; i<count; i++){
809 LOAD2;
810 STAT2;
811 WRITE2;
813 }else{
814 for(i=0; i<count; i++){
815 LOAD2;
816 WRITE2;
819 return 0;
821 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
823 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
824 int i;
825 for(i=0; i<count; i++){
826 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
827 if(code != -1){
828 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
829 }else if(decorrelate){
830 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
831 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
832 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
833 }else{
834 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
835 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
838 if(alpha)
839 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
843 static void decode_bgr_bitstream(HYuvContext *s, int count){
844 if(s->decorrelate){
845 if(s->bitstream_bpp==24)
846 decode_bgr_1(s, count, 1, 0);
847 else
848 decode_bgr_1(s, count, 1, 1);
849 }else{
850 if(s->bitstream_bpp==24)
851 decode_bgr_1(s, count, 0, 0);
852 else
853 decode_bgr_1(s, count, 0, 1);
857 static int encode_bgr_bitstream(HYuvContext *s, int count){
858 int i;
860 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
861 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
862 return -1;
865 #define LOAD3\
866 int g= s->temp[0][4*i+G];\
867 int b= (s->temp[0][4*i+B] - g) & 0xff;\
868 int r= (s->temp[0][4*i+R] - g) & 0xff;
869 #define STAT3\
870 s->stats[0][b]++;\
871 s->stats[1][g]++;\
872 s->stats[2][r]++;
873 #define WRITE3\
874 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
875 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
876 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
878 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
879 for(i=0; i<count; i++){
880 LOAD3;
881 STAT3;
883 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
884 for(i=0; i<count; i++){
885 LOAD3;
886 STAT3;
887 WRITE3;
889 }else{
890 for(i=0; i<count; i++){
891 LOAD3;
892 WRITE3;
895 return 0;
898 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
899 static void draw_slice(HYuvContext *s, int y){
900 int h, cy;
901 int offset[4];
903 if(s->avctx->draw_horiz_band==NULL)
904 return;
906 h= y - s->last_slice_end;
907 y -= h;
909 if(s->bitstream_bpp==12){
910 cy= y>>1;
911 }else{
912 cy= y;
915 offset[0] = s->picture.linesize[0]*y;
916 offset[1] = s->picture.linesize[1]*cy;
917 offset[2] = s->picture.linesize[2]*cy;
918 offset[3] = 0;
919 emms_c();
921 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
923 s->last_slice_end= y + h;
926 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
927 const uint8_t *buf = avpkt->data;
928 int buf_size = avpkt->size;
929 HYuvContext *s = avctx->priv_data;
930 const int width= s->width;
931 const int width2= s->width>>1;
932 const int height= s->height;
933 int fake_ystride, fake_ustride, fake_vstride;
934 AVFrame * const p= &s->picture;
935 int table_size= 0;
937 AVFrame *picture = data;
939 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
940 if (!s->bitstream_buffer)
941 return AVERROR(ENOMEM);
943 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
944 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
946 if(p->data[0])
947 avctx->release_buffer(avctx, p);
949 p->reference= 0;
950 if(avctx->get_buffer(avctx, p) < 0){
951 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
952 return -1;
955 if(s->context){
956 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
957 if(table_size < 0)
958 return -1;
961 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
962 return -1;
964 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
966 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
967 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
968 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
970 s->last_slice_end= 0;
972 if(s->bitstream_bpp<24){
973 int y, cy;
974 int lefty, leftu, leftv;
975 int lefttopy, lefttopu, lefttopv;
977 if(s->yuy2){
978 p->data[0][3]= get_bits(&s->gb, 8);
979 p->data[0][2]= get_bits(&s->gb, 8);
980 p->data[0][1]= get_bits(&s->gb, 8);
981 p->data[0][0]= get_bits(&s->gb, 8);
983 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
984 return -1;
985 }else{
987 leftv= p->data[2][0]= get_bits(&s->gb, 8);
988 lefty= p->data[0][1]= get_bits(&s->gb, 8);
989 leftu= p->data[1][0]= get_bits(&s->gb, 8);
990 p->data[0][0]= get_bits(&s->gb, 8);
992 switch(s->predictor){
993 case LEFT:
994 case PLANE:
995 decode_422_bitstream(s, width-2);
996 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
997 if(!(s->flags&CODEC_FLAG_GRAY)){
998 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
999 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1002 for(cy=y=1; y<s->height; y++,cy++){
1003 uint8_t *ydst, *udst, *vdst;
1005 if(s->bitstream_bpp==12){
1006 decode_gray_bitstream(s, width);
1008 ydst= p->data[0] + p->linesize[0]*y;
1010 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1011 if(s->predictor == PLANE){
1012 if(y>s->interlaced)
1013 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1015 y++;
1016 if(y>=s->height) break;
1019 draw_slice(s, y);
1021 ydst= p->data[0] + p->linesize[0]*y;
1022 udst= p->data[1] + p->linesize[1]*cy;
1023 vdst= p->data[2] + p->linesize[2]*cy;
1025 decode_422_bitstream(s, width);
1026 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1027 if(!(s->flags&CODEC_FLAG_GRAY)){
1028 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1029 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1031 if(s->predictor == PLANE){
1032 if(cy>s->interlaced){
1033 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1034 if(!(s->flags&CODEC_FLAG_GRAY)){
1035 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1036 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1041 draw_slice(s, height);
1043 break;
1044 case MEDIAN:
1045 /* first line except first 2 pixels is left predicted */
1046 decode_422_bitstream(s, width-2);
1047 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1048 if(!(s->flags&CODEC_FLAG_GRAY)){
1049 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1050 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1053 cy=y=1;
1055 /* second line is left predicted for interlaced case */
1056 if(s->interlaced){
1057 decode_422_bitstream(s, width);
1058 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1059 if(!(s->flags&CODEC_FLAG_GRAY)){
1060 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1061 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1063 y++; cy++;
1066 /* next 4 pixels are left predicted too */
1067 decode_422_bitstream(s, 4);
1068 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1069 if(!(s->flags&CODEC_FLAG_GRAY)){
1070 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1071 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1074 /* next line except the first 4 pixels is median predicted */
1075 lefttopy= p->data[0][3];
1076 decode_422_bitstream(s, width-4);
1077 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1078 if(!(s->flags&CODEC_FLAG_GRAY)){
1079 lefttopu= p->data[1][1];
1080 lefttopv= p->data[2][1];
1081 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1082 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1084 y++; cy++;
1086 for(; y<height; y++,cy++){
1087 uint8_t *ydst, *udst, *vdst;
1089 if(s->bitstream_bpp==12){
1090 while(2*cy > y){
1091 decode_gray_bitstream(s, width);
1092 ydst= p->data[0] + p->linesize[0]*y;
1093 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1094 y++;
1096 if(y>=height) break;
1098 draw_slice(s, y);
1100 decode_422_bitstream(s, width);
1102 ydst= p->data[0] + p->linesize[0]*y;
1103 udst= p->data[1] + p->linesize[1]*cy;
1104 vdst= p->data[2] + p->linesize[2]*cy;
1106 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1107 if(!(s->flags&CODEC_FLAG_GRAY)){
1108 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1109 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1113 draw_slice(s, height);
1114 break;
1117 }else{
1118 int y;
1119 int leftr, leftg, leftb;
1120 const int last_line= (height-1)*p->linesize[0];
1122 if(s->bitstream_bpp==32){
1123 skip_bits(&s->gb, 8);
1124 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1125 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1126 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1127 }else{
1128 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1129 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1130 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1131 skip_bits(&s->gb, 8);
1134 if(s->bgr32){
1135 switch(s->predictor){
1136 case LEFT:
1137 case PLANE:
1138 decode_bgr_bitstream(s, width-1);
1139 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1141 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1142 decode_bgr_bitstream(s, width);
1144 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1145 if(s->predictor == PLANE){
1146 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1147 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1148 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1152 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1153 break;
1154 default:
1155 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1157 }else{
1159 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1160 return -1;
1163 emms_c();
1165 *picture= *p;
1166 *data_size = sizeof(AVFrame);
1168 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1170 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1172 static int common_end(HYuvContext *s){
1173 int i;
1175 for(i=0; i<3; i++){
1176 av_freep(&s->temp[i]);
1178 return 0;
1181 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1182 static av_cold int decode_end(AVCodecContext *avctx)
1184 HYuvContext *s = avctx->priv_data;
1185 int i;
1187 if (s->picture.data[0])
1188 avctx->release_buffer(avctx, &s->picture);
1190 common_end(s);
1191 av_freep(&s->bitstream_buffer);
1193 for(i=0; i<6; i++){
1194 free_vlc(&s->vlc[i]);
1197 return 0;
1199 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1201 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1202 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1203 HYuvContext *s = avctx->priv_data;
1204 AVFrame *pict = data;
1205 const int width= s->width;
1206 const int width2= s->width>>1;
1207 const int height= s->height;
1208 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1209 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1210 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1211 AVFrame * const p= &s->picture;
1212 int i, j, size=0;
1214 *p = *pict;
1215 p->pict_type= FF_I_TYPE;
1216 p->key_frame= 1;
1218 if(s->context){
1219 for(i=0; i<3; i++){
1220 generate_len_table(s->len[i], s->stats[i], 256);
1221 if(generate_bits_table(s->bits[i], s->len[i])<0)
1222 return -1;
1223 size+= store_table(s, s->len[i], &buf[size]);
1226 for(i=0; i<3; i++)
1227 for(j=0; j<256; j++)
1228 s->stats[i][j] >>= 1;
1231 init_put_bits(&s->pb, buf+size, buf_size-size);
1233 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1234 int lefty, leftu, leftv, y, cy;
1236 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1237 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1238 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1239 put_bits(&s->pb, 8, p->data[0][0]);
1241 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1242 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1243 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1245 encode_422_bitstream(s, 2, width-2);
1247 if(s->predictor==MEDIAN){
1248 int lefttopy, lefttopu, lefttopv;
1249 cy=y=1;
1250 if(s->interlaced){
1251 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1252 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1253 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1255 encode_422_bitstream(s, 0, width);
1256 y++; cy++;
1259 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1260 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1261 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1263 encode_422_bitstream(s, 0, 4);
1265 lefttopy= p->data[0][3];
1266 lefttopu= p->data[1][1];
1267 lefttopv= p->data[2][1];
1268 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1269 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1270 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1271 encode_422_bitstream(s, 0, width-4);
1272 y++; cy++;
1274 for(; y<height; y++,cy++){
1275 uint8_t *ydst, *udst, *vdst;
1277 if(s->bitstream_bpp==12){
1278 while(2*cy > y){
1279 ydst= p->data[0] + p->linesize[0]*y;
1280 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1281 encode_gray_bitstream(s, width);
1282 y++;
1284 if(y>=height) break;
1286 ydst= p->data[0] + p->linesize[0]*y;
1287 udst= p->data[1] + p->linesize[1]*cy;
1288 vdst= p->data[2] + p->linesize[2]*cy;
1290 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1291 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1292 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1294 encode_422_bitstream(s, 0, width);
1296 }else{
1297 for(cy=y=1; y<height; y++,cy++){
1298 uint8_t *ydst, *udst, *vdst;
1300 /* encode a luma only line & y++ */
1301 if(s->bitstream_bpp==12){
1302 ydst= p->data[0] + p->linesize[0]*y;
1304 if(s->predictor == PLANE && s->interlaced < y){
1305 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1307 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1308 }else{
1309 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1311 encode_gray_bitstream(s, width);
1312 y++;
1313 if(y>=height) break;
1316 ydst= p->data[0] + p->linesize[0]*y;
1317 udst= p->data[1] + p->linesize[1]*cy;
1318 vdst= p->data[2] + p->linesize[2]*cy;
1320 if(s->predictor == PLANE && s->interlaced < cy){
1321 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1322 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1323 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1325 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1326 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1327 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1328 }else{
1329 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1330 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1331 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1334 encode_422_bitstream(s, 0, width);
1337 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1338 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1339 const int stride = -p->linesize[0];
1340 const int fake_stride = -fake_ystride;
1341 int y;
1342 int leftr, leftg, leftb;
1344 put_bits(&s->pb, 8, leftr= data[R]);
1345 put_bits(&s->pb, 8, leftg= data[G]);
1346 put_bits(&s->pb, 8, leftb= data[B]);
1347 put_bits(&s->pb, 8, 0);
1349 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1350 encode_bgr_bitstream(s, width-1);
1352 for(y=1; y<s->height; y++){
1353 uint8_t *dst = data + y*stride;
1354 if(s->predictor == PLANE && s->interlaced < y){
1355 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1356 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1357 }else{
1358 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1360 encode_bgr_bitstream(s, width);
1362 }else{
1363 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1365 emms_c();
1367 size+= (put_bits_count(&s->pb)+31)/8;
1368 put_bits(&s->pb, 16, 0);
1369 put_bits(&s->pb, 15, 0);
1370 size/= 4;
1372 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1373 int j;
1374 char *p= avctx->stats_out;
1375 char *end= p + 1024*30;
1376 for(i=0; i<3; i++){
1377 for(j=0; j<256; j++){
1378 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1379 p+= strlen(p);
1380 s->stats[i][j]= 0;
1382 snprintf(p, end-p, "\n");
1383 p++;
1385 } else
1386 avctx->stats_out[0] = '\0';
1387 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1388 flush_put_bits(&s->pb);
1389 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1392 s->picture_number++;
1394 return size*4;
1397 static av_cold int encode_end(AVCodecContext *avctx)
1399 HYuvContext *s = avctx->priv_data;
1401 common_end(s);
1403 av_freep(&avctx->extradata);
1404 av_freep(&avctx->stats_out);
1406 return 0;
1408 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1410 #if CONFIG_HUFFYUV_DECODER
1411 AVCodec huffyuv_decoder = {
1412 "huffyuv",
1413 CODEC_TYPE_VIDEO,
1414 CODEC_ID_HUFFYUV,
1415 sizeof(HYuvContext),
1416 decode_init,
1417 NULL,
1418 decode_end,
1419 decode_frame,
1420 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1421 NULL,
1422 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1424 #endif
1426 #if CONFIG_FFVHUFF_DECODER
1427 AVCodec ffvhuff_decoder = {
1428 "ffvhuff",
1429 CODEC_TYPE_VIDEO,
1430 CODEC_ID_FFVHUFF,
1431 sizeof(HYuvContext),
1432 decode_init,
1433 NULL,
1434 decode_end,
1435 decode_frame,
1436 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1437 NULL,
1438 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1440 #endif
1442 #if CONFIG_HUFFYUV_ENCODER
1443 AVCodec huffyuv_encoder = {
1444 "huffyuv",
1445 CODEC_TYPE_VIDEO,
1446 CODEC_ID_HUFFYUV,
1447 sizeof(HYuvContext),
1448 encode_init,
1449 encode_frame,
1450 encode_end,
1451 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1452 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1454 #endif
1456 #if CONFIG_FFVHUFF_ENCODER
1457 AVCodec ffvhuff_encoder = {
1458 "ffvhuff",
1459 CODEC_TYPE_VIDEO,
1460 CODEC_ID_FFVHUFF,
1461 sizeof(HYuvContext),
1462 encode_init,
1463 encode_frame,
1464 encode_end,
1465 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1466 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1468 #endif