Add Speex support to the Ogg muxer.
[FFMpeg-mirror/lagarith.git] / libavcodec / huffyuv.c
blob32a6c0b29606816e371fcb61b4db5d83e809638d
1 /*
2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7 * the algorithm used
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 /**
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
36 #define VLC_BITS 11
38 #if HAVE_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #else
43 #define B 0
44 #define G 1
45 #define R 2
46 #endif
48 typedef enum Predictor{
49 LEFT= 0,
50 PLANE,
51 MEDIAN,
52 } Predictor;
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
56 Predictor predictor;
57 GetBitContext gb;
58 PutBitContext pb;
59 int interlaced;
60 int decorrelate;
61 int bitstream_bpp;
62 int version;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
65 int width, height;
66 int flags;
67 int context;
68 int picture_number;
69 int last_slice_end;
70 uint8_t *temp[3];
71 uint64_t stats[3][256];
72 uint8_t len[3][256];
73 uint32_t bits[3][256];
74 uint32_t pix_bgr_map[1<<VLC_BITS];
75 VLC vlc[6]; //Y,U,V,YY,YU,YV
76 AVFrame picture;
77 uint8_t *bitstream_buffer;
78 unsigned int bitstream_buffer_size;
79 DSPContext dsp;
80 }HYuvContext;
82 static const unsigned char classic_shift_luma[] = {
83 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
84 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 69,68, 0
88 static const unsigned char classic_shift_chroma[] = {
89 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
90 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
91 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 static const unsigned char classic_add_luma[256] = {
95 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
96 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
97 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
98 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
99 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
100 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
101 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
102 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
103 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
104 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
105 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
106 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
107 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
108 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
109 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
110 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
113 static const unsigned char classic_add_chroma[256] = {
114 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
115 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
116 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
117 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
118 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
119 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
120 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
121 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
122 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
123 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
124 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
125 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
126 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
127 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
128 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
129 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
132 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
133 int i;
134 if(w<32){
135 for(i=0; i<w; i++){
136 const int temp= src[i];
137 dst[i]= temp - left;
138 left= temp;
140 return left;
141 }else{
142 for(i=0; i<16; i++){
143 const int temp= src[i];
144 dst[i]= temp - left;
145 left= temp;
147 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
148 return src[w-1];
152 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
153 int i;
154 int r,g,b;
155 r= *red;
156 g= *green;
157 b= *blue;
158 for(i=0; i<FFMIN(w,4); i++){
159 const int rt= src[i*4+R];
160 const int gt= src[i*4+G];
161 const int bt= src[i*4+B];
162 dst[i*4+R]= rt - r;
163 dst[i*4+G]= gt - g;
164 dst[i*4+B]= bt - b;
165 r = rt;
166 g = gt;
167 b = bt;
169 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
170 *red= src[(w-1)*4+R];
171 *green= src[(w-1)*4+G];
172 *blue= src[(w-1)*4+B];
175 static int read_len_table(uint8_t *dst, GetBitContext *gb){
176 int i, val, repeat;
178 for(i=0; i<256;){
179 repeat= get_bits(gb, 3);
180 val = get_bits(gb, 5);
181 if(repeat==0)
182 repeat= get_bits(gb, 8);
183 //printf("%d %d\n", val, repeat);
184 if(i+repeat > 256) {
185 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
186 return -1;
188 while (repeat--)
189 dst[i++] = val;
191 return 0;
194 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
195 int len, index;
196 uint32_t bits=0;
198 for(len=32; len>0; len--){
199 for(index=0; index<256; index++){
200 if(len_table[index]==len)
201 dst[index]= bits++;
203 if(bits & 1){
204 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
205 return -1;
207 bits >>= 1;
209 return 0;
212 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
213 typedef struct {
214 uint64_t val;
215 int name;
216 } HeapElem;
218 static void heap_sift(HeapElem *h, int root, int size)
220 while(root*2+1 < size) {
221 int child = root*2+1;
222 if(child < size-1 && h[child].val > h[child+1].val)
223 child++;
224 if(h[root].val > h[child].val) {
225 FFSWAP(HeapElem, h[root], h[child]);
226 root = child;
227 } else
228 break;
232 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
233 HeapElem h[size];
234 int up[2*size];
235 int len[2*size];
236 int offset, i, next;
238 for(offset=1; ; offset<<=1){
239 for(i=0; i<size; i++){
240 h[i].name = i;
241 h[i].val = (stats[i] << 8) + offset;
243 for(i=size/2-1; i>=0; i--)
244 heap_sift(h, i, size);
246 for(next=size; next<size*2-1; next++){
247 // merge the two smallest entries, and put it back in the heap
248 uint64_t min1v = h[0].val;
249 up[h[0].name] = next;
250 h[0].val = INT64_MAX;
251 heap_sift(h, 0, size);
252 up[h[0].name] = next;
253 h[0].name = next;
254 h[0].val += min1v;
255 heap_sift(h, 0, size);
258 len[2*size-2] = 0;
259 for(i=2*size-3; i>=size; i--)
260 len[i] = len[up[i]] + 1;
261 for(i=0; i<size; i++) {
262 dst[i] = len[up[i]] + 1;
263 if(dst[i] >= 32) break;
265 if(i==size) break;
268 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
270 static void generate_joint_tables(HYuvContext *s){
271 uint16_t symbols[1<<VLC_BITS];
272 uint16_t bits[1<<VLC_BITS];
273 uint8_t len[1<<VLC_BITS];
274 if(s->bitstream_bpp < 24){
275 int p, i, y, u;
276 for(p=0; p<3; p++){
277 for(i=y=0; y<256; y++){
278 int len0 = s->len[0][y];
279 int limit = VLC_BITS - len0;
280 if(limit <= 0)
281 continue;
282 for(u=0; u<256; u++){
283 int len1 = s->len[p][u];
284 if(len1 > limit)
285 continue;
286 len[i] = len0 + len1;
287 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
288 symbols[i] = (y<<8) + u;
289 if(symbols[i] != 0xffff) // reserved to mean "invalid"
290 i++;
293 free_vlc(&s->vlc[3+p]);
294 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
296 }else{
297 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
298 int i, b, g, r, code;
299 int p0 = s->decorrelate;
300 int p1 = !s->decorrelate;
301 // restrict the range to +/-16 becaues that's pretty much guaranteed to
302 // cover all the combinations that fit in 11 bits total, and it doesn't
303 // matter if we miss a few rare codes.
304 for(i=0, g=-16; g<16; g++){
305 int len0 = s->len[p0][g&255];
306 int limit0 = VLC_BITS - len0;
307 if(limit0 < 2)
308 continue;
309 for(b=-16; b<16; b++){
310 int len1 = s->len[p1][b&255];
311 int limit1 = limit0 - len1;
312 if(limit1 < 1)
313 continue;
314 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
315 for(r=-16; r<16; r++){
316 int len2 = s->len[2][r&255];
317 if(len2 > limit1)
318 continue;
319 len[i] = len0 + len1 + len2;
320 bits[i] = (code << len2) + s->bits[2][r&255];
321 if(s->decorrelate){
322 map[i][G] = g;
323 map[i][B] = g+b;
324 map[i][R] = g+r;
325 }else{
326 map[i][B] = g;
327 map[i][G] = b;
328 map[i][R] = r;
330 i++;
334 free_vlc(&s->vlc[3]);
335 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
339 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
340 GetBitContext gb;
341 int i;
343 init_get_bits(&gb, src, length*8);
345 for(i=0; i<3; i++){
346 if(read_len_table(s->len[i], &gb)<0)
347 return -1;
348 if(generate_bits_table(s->bits[i], s->len[i])<0){
349 return -1;
351 #if 0
352 for(j=0; j<256; j++){
353 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
355 #endif
356 free_vlc(&s->vlc[i]);
357 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
360 generate_joint_tables(s);
362 return (get_bits_count(&gb)+7)/8;
365 static int read_old_huffman_tables(HYuvContext *s){
366 #if 1
367 GetBitContext gb;
368 int i;
370 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
371 if(read_len_table(s->len[0], &gb)<0)
372 return -1;
373 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
374 if(read_len_table(s->len[1], &gb)<0)
375 return -1;
377 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
378 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
380 if(s->bitstream_bpp >= 24){
381 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
382 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
384 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
385 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387 for(i=0; i<3; i++){
388 free_vlc(&s->vlc[i]);
389 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392 generate_joint_tables(s);
394 return 0;
395 #else
396 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
397 return -1;
398 #endif
401 static av_cold void alloc_temp(HYuvContext *s){
402 int i;
404 if(s->bitstream_bpp<24){
405 for(i=0; i<3; i++){
406 s->temp[i]= av_malloc(s->width + 16);
408 }else{
409 for(i=0; i<2; i++){
410 s->temp[i]= av_malloc(4*s->width + 16);
415 static av_cold int common_init(AVCodecContext *avctx){
416 HYuvContext *s = avctx->priv_data;
418 s->avctx= avctx;
419 s->flags= avctx->flags;
421 dsputil_init(&s->dsp, avctx);
423 s->width= avctx->width;
424 s->height= avctx->height;
425 assert(s->width>0 && s->height>0);
427 return 0;
430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
431 static av_cold int decode_init(AVCodecContext *avctx)
433 HYuvContext *s = avctx->priv_data;
435 common_init(avctx);
436 memset(s->vlc, 0, 3*sizeof(VLC));
438 avctx->coded_frame= &s->picture;
439 s->interlaced= s->height > 288;
441 s->bgr32=1;
442 //if(avctx->extradata)
443 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
444 if(avctx->extradata_size){
445 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
446 s->version=1; // do such files exist at all?
447 else
448 s->version=2;
449 }else
450 s->version=0;
452 if(s->version==2){
453 int method, interlace;
455 method= ((uint8_t*)avctx->extradata)[0];
456 s->decorrelate= method&64 ? 1 : 0;
457 s->predictor= method&63;
458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459 if(s->bitstream_bpp==0)
460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
466 return -1;
467 }else{
468 switch(avctx->bits_per_coded_sample&7){
469 case 1:
470 s->predictor= LEFT;
471 s->decorrelate= 0;
472 break;
473 case 2:
474 s->predictor= LEFT;
475 s->decorrelate= 1;
476 break;
477 case 3:
478 s->predictor= PLANE;
479 s->decorrelate= avctx->bits_per_coded_sample >= 24;
480 break;
481 case 4:
482 s->predictor= MEDIAN;
483 s->decorrelate= 0;
484 break;
485 default:
486 s->predictor= LEFT; //OLD
487 s->decorrelate= 0;
488 break;
490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
491 s->context= 0;
493 if(read_old_huffman_tables(s) < 0)
494 return -1;
497 switch(s->bitstream_bpp){
498 case 12:
499 avctx->pix_fmt = PIX_FMT_YUV420P;
500 break;
501 case 16:
502 if(s->yuy2){
503 avctx->pix_fmt = PIX_FMT_YUYV422;
504 }else{
505 avctx->pix_fmt = PIX_FMT_YUV422P;
507 break;
508 case 24:
509 case 32:
510 if(s->bgr32){
511 avctx->pix_fmt = PIX_FMT_RGB32;
512 }else{
513 avctx->pix_fmt = PIX_FMT_BGR24;
515 break;
516 default:
517 assert(0);
520 alloc_temp(s);
522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
524 return 0;
526 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
528 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
529 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
530 int i;
531 int index= 0;
533 for(i=0; i<256;){
534 int val= len[i];
535 int repeat=0;
537 for(; i<256 && len[i]==val && repeat<255; i++)
538 repeat++;
540 assert(val < 32 && val >0 && repeat<256 && repeat>0);
541 if(repeat>7){
542 buf[index++]= val;
543 buf[index++]= repeat;
544 }else{
545 buf[index++]= val | (repeat<<5);
549 return index;
552 static av_cold int encode_init(AVCodecContext *avctx)
554 HYuvContext *s = avctx->priv_data;
555 int i, j;
557 common_init(avctx);
559 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
560 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
561 s->version=2;
563 avctx->coded_frame= &s->picture;
565 switch(avctx->pix_fmt){
566 case PIX_FMT_YUV420P:
567 s->bitstream_bpp= 12;
568 break;
569 case PIX_FMT_YUV422P:
570 s->bitstream_bpp= 16;
571 break;
572 case PIX_FMT_RGB32:
573 s->bitstream_bpp= 24;
574 break;
575 default:
576 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
577 return -1;
579 avctx->bits_per_coded_sample= s->bitstream_bpp;
580 s->decorrelate= s->bitstream_bpp >= 24;
581 s->predictor= avctx->prediction_method;
582 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
583 if(avctx->context_model==1){
584 s->context= avctx->context_model;
585 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
586 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
587 return -1;
589 }else s->context= 0;
591 if(avctx->codec->id==CODEC_ID_HUFFYUV){
592 if(avctx->pix_fmt==PIX_FMT_YUV420P){
593 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
594 return -1;
596 if(avctx->context_model){
597 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
598 return -1;
600 if(s->interlaced != ( s->height > 288 ))
601 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
604 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
605 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
606 return -1;
609 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
610 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
611 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
612 if(s->context)
613 ((uint8_t*)avctx->extradata)[2]|= 0x40;
614 ((uint8_t*)avctx->extradata)[3]= 0;
615 s->avctx->extradata_size= 4;
617 if(avctx->stats_in){
618 char *p= avctx->stats_in;
620 for(i=0; i<3; i++)
621 for(j=0; j<256; j++)
622 s->stats[i][j]= 1;
624 for(;;){
625 for(i=0; i<3; i++){
626 char *next;
628 for(j=0; j<256; j++){
629 s->stats[i][j]+= strtol(p, &next, 0);
630 if(next==p) return -1;
631 p=next;
634 if(p[0]==0 || p[1]==0 || p[2]==0) break;
636 }else{
637 for(i=0; i<3; i++)
638 for(j=0; j<256; j++){
639 int d= FFMIN(j, 256-j);
641 s->stats[i][j]= 100000000/(d+1);
645 for(i=0; i<3; i++){
646 generate_len_table(s->len[i], s->stats[i], 256);
648 if(generate_bits_table(s->bits[i], s->len[i])<0){
649 return -1;
652 s->avctx->extradata_size+=
653 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
656 if(s->context){
657 for(i=0; i<3; i++){
658 int pels = s->width*s->height / (i?40:10);
659 for(j=0; j<256; j++){
660 int d= FFMIN(j, 256-j);
661 s->stats[i][j]= pels/(d+1);
664 }else{
665 for(i=0; i<3; i++)
666 for(j=0; j<256; j++)
667 s->stats[i][j]= 0;
670 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
672 alloc_temp(s);
674 s->picture_number=0;
676 return 0;
678 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
680 /* TODO instead of restarting the read when the code isn't in the first level
681 * of the joint table, jump into the 2nd level of the individual table. */
682 #define READ_2PIX(dst0, dst1, plane1){\
683 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
684 if(code != 0xffff){\
685 dst0 = code>>8;\
686 dst1 = code;\
687 }else{\
688 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
689 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
693 static void decode_422_bitstream(HYuvContext *s, int count){
694 int i;
696 count/=2;
698 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*4)){
699 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
700 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
701 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
703 }else{
704 for(i=0; i<count; i++){
705 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
706 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
711 static void decode_gray_bitstream(HYuvContext *s, int count){
712 int i;
714 count/=2;
716 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*2)){
717 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
718 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
720 }else{
721 for(i=0; i<count; i++){
722 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
727 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
728 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
729 int i;
730 const uint8_t *y = s->temp[0] + offset;
731 const uint8_t *u = s->temp[1] + offset/2;
732 const uint8_t *v = s->temp[2] + offset/2;
734 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
735 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
736 return -1;
739 #define LOAD4\
740 int y0 = y[2*i];\
741 int y1 = y[2*i+1];\
742 int u0 = u[i];\
743 int v0 = v[i];
745 count/=2;
746 if(s->flags&CODEC_FLAG_PASS1){
747 for(i=0; i<count; i++){
748 LOAD4;
749 s->stats[0][y0]++;
750 s->stats[1][u0]++;
751 s->stats[0][y1]++;
752 s->stats[2][v0]++;
755 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
756 return 0;
757 if(s->context){
758 for(i=0; i<count; i++){
759 LOAD4;
760 s->stats[0][y0]++;
761 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
762 s->stats[1][u0]++;
763 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
764 s->stats[0][y1]++;
765 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
766 s->stats[2][v0]++;
767 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
769 }else{
770 for(i=0; i<count; i++){
771 LOAD4;
772 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
773 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
774 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
775 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
778 return 0;
781 static int encode_gray_bitstream(HYuvContext *s, int count){
782 int i;
784 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
785 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
786 return -1;
789 #define LOAD2\
790 int y0 = s->temp[0][2*i];\
791 int y1 = s->temp[0][2*i+1];
792 #define STAT2\
793 s->stats[0][y0]++;\
794 s->stats[0][y1]++;
795 #define WRITE2\
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
797 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
799 count/=2;
800 if(s->flags&CODEC_FLAG_PASS1){
801 for(i=0; i<count; i++){
802 LOAD2;
803 STAT2;
806 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
807 return 0;
809 if(s->context){
810 for(i=0; i<count; i++){
811 LOAD2;
812 STAT2;
813 WRITE2;
815 }else{
816 for(i=0; i<count; i++){
817 LOAD2;
818 WRITE2;
821 return 0;
823 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
825 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
826 int i;
827 for(i=0; i<count; i++){
828 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
829 if(code != -1){
830 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
831 }else if(decorrelate){
832 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
833 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
834 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
835 }else{
836 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
837 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
838 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
840 if(alpha)
841 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
845 static void decode_bgr_bitstream(HYuvContext *s, int count){
846 if(s->decorrelate){
847 if(s->bitstream_bpp==24)
848 decode_bgr_1(s, count, 1, 0);
849 else
850 decode_bgr_1(s, count, 1, 1);
851 }else{
852 if(s->bitstream_bpp==24)
853 decode_bgr_1(s, count, 0, 0);
854 else
855 decode_bgr_1(s, count, 0, 1);
859 static int encode_bgr_bitstream(HYuvContext *s, int count){
860 int i;
862 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
863 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
864 return -1;
867 #define LOAD3\
868 int g= s->temp[0][4*i+G];\
869 int b= (s->temp[0][4*i+B] - g) & 0xff;\
870 int r= (s->temp[0][4*i+R] - g) & 0xff;
871 #define STAT3\
872 s->stats[0][b]++;\
873 s->stats[1][g]++;\
874 s->stats[2][r]++;
875 #define WRITE3\
876 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
877 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
878 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
880 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
881 for(i=0; i<count; i++){
882 LOAD3;
883 STAT3;
885 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
886 for(i=0; i<count; i++){
887 LOAD3;
888 STAT3;
889 WRITE3;
891 }else{
892 for(i=0; i<count; i++){
893 LOAD3;
894 WRITE3;
897 return 0;
900 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
901 static void draw_slice(HYuvContext *s, int y){
902 int h, cy;
903 int offset[4];
905 if(s->avctx->draw_horiz_band==NULL)
906 return;
908 h= y - s->last_slice_end;
909 y -= h;
911 if(s->bitstream_bpp==12){
912 cy= y>>1;
913 }else{
914 cy= y;
917 offset[0] = s->picture.linesize[0]*y;
918 offset[1] = s->picture.linesize[1]*cy;
919 offset[2] = s->picture.linesize[2]*cy;
920 offset[3] = 0;
921 emms_c();
923 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
925 s->last_slice_end= y + h;
928 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
929 const uint8_t *buf = avpkt->data;
930 int buf_size = avpkt->size;
931 HYuvContext *s = avctx->priv_data;
932 const int width= s->width;
933 const int width2= s->width>>1;
934 const int height= s->height;
935 int fake_ystride, fake_ustride, fake_vstride;
936 AVFrame * const p= &s->picture;
937 int table_size= 0;
939 AVFrame *picture = data;
941 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
942 if (!s->bitstream_buffer)
943 return AVERROR(ENOMEM);
945 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
947 if(p->data[0])
948 avctx->release_buffer(avctx, p);
950 p->reference= 0;
951 if(avctx->get_buffer(avctx, p) < 0){
952 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
953 return -1;
956 if(s->context){
957 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
958 if(table_size < 0)
959 return -1;
962 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
963 return -1;
965 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
967 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
968 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
969 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
971 s->last_slice_end= 0;
973 if(s->bitstream_bpp<24){
974 int y, cy;
975 int lefty, leftu, leftv;
976 int lefttopy, lefttopu, lefttopv;
978 if(s->yuy2){
979 p->data[0][3]= get_bits(&s->gb, 8);
980 p->data[0][2]= get_bits(&s->gb, 8);
981 p->data[0][1]= get_bits(&s->gb, 8);
982 p->data[0][0]= get_bits(&s->gb, 8);
984 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
985 return -1;
986 }else{
988 leftv= p->data[2][0]= get_bits(&s->gb, 8);
989 lefty= p->data[0][1]= get_bits(&s->gb, 8);
990 leftu= p->data[1][0]= get_bits(&s->gb, 8);
991 p->data[0][0]= get_bits(&s->gb, 8);
993 switch(s->predictor){
994 case LEFT:
995 case PLANE:
996 decode_422_bitstream(s, width-2);
997 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
998 if(!(s->flags&CODEC_FLAG_GRAY)){
999 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1000 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1003 for(cy=y=1; y<s->height; y++,cy++){
1004 uint8_t *ydst, *udst, *vdst;
1006 if(s->bitstream_bpp==12){
1007 decode_gray_bitstream(s, width);
1009 ydst= p->data[0] + p->linesize[0]*y;
1011 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1012 if(s->predictor == PLANE){
1013 if(y>s->interlaced)
1014 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1016 y++;
1017 if(y>=s->height) break;
1020 draw_slice(s, y);
1022 ydst= p->data[0] + p->linesize[0]*y;
1023 udst= p->data[1] + p->linesize[1]*cy;
1024 vdst= p->data[2] + p->linesize[2]*cy;
1026 decode_422_bitstream(s, width);
1027 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1028 if(!(s->flags&CODEC_FLAG_GRAY)){
1029 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1030 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1032 if(s->predictor == PLANE){
1033 if(cy>s->interlaced){
1034 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1035 if(!(s->flags&CODEC_FLAG_GRAY)){
1036 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1037 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1042 draw_slice(s, height);
1044 break;
1045 case MEDIAN:
1046 /* first line except first 2 pixels is left predicted */
1047 decode_422_bitstream(s, width-2);
1048 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1049 if(!(s->flags&CODEC_FLAG_GRAY)){
1050 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1051 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1054 cy=y=1;
1056 /* second line is left predicted for interlaced case */
1057 if(s->interlaced){
1058 decode_422_bitstream(s, width);
1059 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1060 if(!(s->flags&CODEC_FLAG_GRAY)){
1061 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1062 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1064 y++; cy++;
1067 /* next 4 pixels are left predicted too */
1068 decode_422_bitstream(s, 4);
1069 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1070 if(!(s->flags&CODEC_FLAG_GRAY)){
1071 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1072 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1075 /* next line except the first 4 pixels is median predicted */
1076 lefttopy= p->data[0][3];
1077 decode_422_bitstream(s, width-4);
1078 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1079 if(!(s->flags&CODEC_FLAG_GRAY)){
1080 lefttopu= p->data[1][1];
1081 lefttopv= p->data[2][1];
1082 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1083 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1085 y++; cy++;
1087 for(; y<height; y++,cy++){
1088 uint8_t *ydst, *udst, *vdst;
1090 if(s->bitstream_bpp==12){
1091 while(2*cy > y){
1092 decode_gray_bitstream(s, width);
1093 ydst= p->data[0] + p->linesize[0]*y;
1094 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1095 y++;
1097 if(y>=height) break;
1099 draw_slice(s, y);
1101 decode_422_bitstream(s, width);
1103 ydst= p->data[0] + p->linesize[0]*y;
1104 udst= p->data[1] + p->linesize[1]*cy;
1105 vdst= p->data[2] + p->linesize[2]*cy;
1107 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1108 if(!(s->flags&CODEC_FLAG_GRAY)){
1109 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1110 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1114 draw_slice(s, height);
1115 break;
1118 }else{
1119 int y;
1120 int leftr, leftg, leftb;
1121 const int last_line= (height-1)*p->linesize[0];
1123 if(s->bitstream_bpp==32){
1124 skip_bits(&s->gb, 8);
1125 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1126 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1127 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1128 }else{
1129 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1130 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1131 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1132 skip_bits(&s->gb, 8);
1135 if(s->bgr32){
1136 switch(s->predictor){
1137 case LEFT:
1138 case PLANE:
1139 decode_bgr_bitstream(s, width-1);
1140 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1142 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1143 decode_bgr_bitstream(s, width);
1145 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1146 if(s->predictor == PLANE){
1147 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1148 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1149 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1153 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1154 break;
1155 default:
1156 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1158 }else{
1160 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1161 return -1;
1164 emms_c();
1166 *picture= *p;
1167 *data_size = sizeof(AVFrame);
1169 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1171 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1173 static int common_end(HYuvContext *s){
1174 int i;
1176 for(i=0; i<3; i++){
1177 av_freep(&s->temp[i]);
1179 return 0;
1182 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1183 static av_cold int decode_end(AVCodecContext *avctx)
1185 HYuvContext *s = avctx->priv_data;
1186 int i;
1188 common_end(s);
1189 av_freep(&s->bitstream_buffer);
1191 for(i=0; i<6; i++){
1192 free_vlc(&s->vlc[i]);
1195 return 0;
1197 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1199 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1200 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1201 HYuvContext *s = avctx->priv_data;
1202 AVFrame *pict = data;
1203 const int width= s->width;
1204 const int width2= s->width>>1;
1205 const int height= s->height;
1206 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1207 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1208 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1209 AVFrame * const p= &s->picture;
1210 int i, j, size=0;
1212 *p = *pict;
1213 p->pict_type= FF_I_TYPE;
1214 p->key_frame= 1;
1216 if(s->context){
1217 for(i=0; i<3; i++){
1218 generate_len_table(s->len[i], s->stats[i], 256);
1219 if(generate_bits_table(s->bits[i], s->len[i])<0)
1220 return -1;
1221 size+= store_table(s, s->len[i], &buf[size]);
1224 for(i=0; i<3; i++)
1225 for(j=0; j<256; j++)
1226 s->stats[i][j] >>= 1;
1229 init_put_bits(&s->pb, buf+size, buf_size-size);
1231 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1232 int lefty, leftu, leftv, y, cy;
1234 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1235 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1236 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1237 put_bits(&s->pb, 8, p->data[0][0]);
1239 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1240 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1241 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1243 encode_422_bitstream(s, 2, width-2);
1245 if(s->predictor==MEDIAN){
1246 int lefttopy, lefttopu, lefttopv;
1247 cy=y=1;
1248 if(s->interlaced){
1249 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1250 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1251 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1253 encode_422_bitstream(s, 0, width);
1254 y++; cy++;
1257 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1258 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1259 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1261 encode_422_bitstream(s, 0, 4);
1263 lefttopy= p->data[0][3];
1264 lefttopu= p->data[1][1];
1265 lefttopv= p->data[2][1];
1266 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1267 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1268 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1269 encode_422_bitstream(s, 0, width-4);
1270 y++; cy++;
1272 for(; y<height; y++,cy++){
1273 uint8_t *ydst, *udst, *vdst;
1275 if(s->bitstream_bpp==12){
1276 while(2*cy > y){
1277 ydst= p->data[0] + p->linesize[0]*y;
1278 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1279 encode_gray_bitstream(s, width);
1280 y++;
1282 if(y>=height) break;
1284 ydst= p->data[0] + p->linesize[0]*y;
1285 udst= p->data[1] + p->linesize[1]*cy;
1286 vdst= p->data[2] + p->linesize[2]*cy;
1288 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1289 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1290 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1292 encode_422_bitstream(s, 0, width);
1294 }else{
1295 for(cy=y=1; y<height; y++,cy++){
1296 uint8_t *ydst, *udst, *vdst;
1298 /* encode a luma only line & y++ */
1299 if(s->bitstream_bpp==12){
1300 ydst= p->data[0] + p->linesize[0]*y;
1302 if(s->predictor == PLANE && s->interlaced < y){
1303 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1305 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1306 }else{
1307 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1309 encode_gray_bitstream(s, width);
1310 y++;
1311 if(y>=height) break;
1314 ydst= p->data[0] + p->linesize[0]*y;
1315 udst= p->data[1] + p->linesize[1]*cy;
1316 vdst= p->data[2] + p->linesize[2]*cy;
1318 if(s->predictor == PLANE && s->interlaced < cy){
1319 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1320 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1321 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1323 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1324 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1325 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1326 }else{
1327 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1328 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1329 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1332 encode_422_bitstream(s, 0, width);
1335 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1336 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1337 const int stride = -p->linesize[0];
1338 const int fake_stride = -fake_ystride;
1339 int y;
1340 int leftr, leftg, leftb;
1342 put_bits(&s->pb, 8, leftr= data[R]);
1343 put_bits(&s->pb, 8, leftg= data[G]);
1344 put_bits(&s->pb, 8, leftb= data[B]);
1345 put_bits(&s->pb, 8, 0);
1347 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1348 encode_bgr_bitstream(s, width-1);
1350 for(y=1; y<s->height; y++){
1351 uint8_t *dst = data + y*stride;
1352 if(s->predictor == PLANE && s->interlaced < y){
1353 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1354 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1355 }else{
1356 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1358 encode_bgr_bitstream(s, width);
1360 }else{
1361 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1363 emms_c();
1365 size+= (put_bits_count(&s->pb)+31)/8;
1366 put_bits(&s->pb, 16, 0);
1367 put_bits(&s->pb, 15, 0);
1368 size/= 4;
1370 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1371 int j;
1372 char *p= avctx->stats_out;
1373 char *end= p + 1024*30;
1374 for(i=0; i<3; i++){
1375 for(j=0; j<256; j++){
1376 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1377 p+= strlen(p);
1378 s->stats[i][j]= 0;
1380 snprintf(p, end-p, "\n");
1381 p++;
1383 } else
1384 avctx->stats_out[0] = '\0';
1385 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1386 flush_put_bits(&s->pb);
1387 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1390 s->picture_number++;
1392 return size*4;
1395 static av_cold int encode_end(AVCodecContext *avctx)
1397 HYuvContext *s = avctx->priv_data;
1399 common_end(s);
1401 av_freep(&avctx->extradata);
1402 av_freep(&avctx->stats_out);
1404 return 0;
1406 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1408 #if CONFIG_HUFFYUV_DECODER
1409 AVCodec huffyuv_decoder = {
1410 "huffyuv",
1411 CODEC_TYPE_VIDEO,
1412 CODEC_ID_HUFFYUV,
1413 sizeof(HYuvContext),
1414 decode_init,
1415 NULL,
1416 decode_end,
1417 decode_frame,
1418 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1419 NULL,
1420 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1422 #endif
1424 #if CONFIG_FFVHUFF_DECODER
1425 AVCodec ffvhuff_decoder = {
1426 "ffvhuff",
1427 CODEC_TYPE_VIDEO,
1428 CODEC_ID_FFVHUFF,
1429 sizeof(HYuvContext),
1430 decode_init,
1431 NULL,
1432 decode_end,
1433 decode_frame,
1434 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1435 NULL,
1436 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1438 #endif
1440 #if CONFIG_HUFFYUV_ENCODER
1441 AVCodec huffyuv_encoder = {
1442 "huffyuv",
1443 CODEC_TYPE_VIDEO,
1444 CODEC_ID_HUFFYUV,
1445 sizeof(HYuvContext),
1446 encode_init,
1447 encode_frame,
1448 encode_end,
1449 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1450 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1452 #endif
1454 #if CONFIG_FFVHUFF_ENCODER
1455 AVCodec ffvhuff_encoder = {
1456 "ffvhuff",
1457 CODEC_TYPE_VIDEO,
1458 CODEC_ID_FFVHUFF,
1459 sizeof(HYuvContext),
1460 encode_init,
1461 encode_frame,
1462 encode_end,
1463 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1464 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1466 #endif