Merge branch 'mirror' into vdpau
[FFMpeg-mirror/ffmpeg-vdpau.git] / libavcodec / mpegvideo.c
blob5289b01c666d247c28c45e44e9139dc1c0e8f648
1 /*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 /**
26 * @file mpegvideo.c
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "avcodec.h"
31 #include "dsputil.h"
32 #include "mpegvideo.h"
33 #include "mpegvideo_common.h"
34 #include "mjpegenc.h"
35 #include "msmpeg4.h"
36 #include "faandct.h"
37 #include <limits.h>
39 //#undef NDEBUG
40 //#include <assert.h>
42 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
43 DCTELEM *block, int n, int qscale);
44 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
57 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58 extern void XVMC_field_end(MpegEncContext *s);
59 extern void XVMC_decode_mb(MpegEncContext *s);
61 extern int VDPAU_mpeg_field_start(MpegEncContext *s);
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
66 //#define DEBUG
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
84 int i;
86 assert(p<=end);
87 if(p>=end)
88 return end;
90 for(i=0; i<3; i++){
91 uint32_t tmp= *state << 8;
92 *state= tmp + *(p++);
93 if(tmp == 0x100 || p==end)
94 return p;
97 while(p<end){
98 if (p[-1] > 1 ) p+= 3;
99 else if(p[-2] ) p+= 2;
100 else if(p[-3]|(p[-1]-1)) p++;
101 else{
102 p++;
103 break;
107 p= FFMIN(p, end)-4;
108 *state= AV_RB32(p);
110 return p+4;
113 /* init common dct for both encoder and decoder */
114 int ff_dct_common_init(MpegEncContext *s)
116 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
117 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
118 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
119 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
120 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
121 if(s->flags & CODEC_FLAG_BITEXACT)
122 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
123 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
125 #if defined(HAVE_MMX)
126 MPV_common_init_mmx(s);
127 #elif defined(ARCH_ALPHA)
128 MPV_common_init_axp(s);
129 #elif defined(CONFIG_MLIB)
130 MPV_common_init_mlib(s);
131 #elif defined(HAVE_MMI)
132 MPV_common_init_mmi(s);
133 #elif defined(ARCH_ARMV4L)
134 MPV_common_init_armv4l(s);
135 #elif defined(HAVE_ALTIVEC)
136 MPV_common_init_altivec(s);
137 #elif defined(ARCH_BFIN)
138 MPV_common_init_bfin(s);
139 #endif
141 /* load & permutate scantables
142 note: only wmv uses different ones
144 if(s->alternate_scan){
145 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
146 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
147 }else{
148 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
149 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
151 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
152 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
154 return 0;
157 void ff_copy_picture(Picture *dst, Picture *src){
158 *dst = *src;
159 dst->type= FF_BUFFER_TYPE_COPY;
163 * allocates a Picture
164 * The pixels are allocated/set by calling get_buffer() if shared=0
166 int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
167 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
168 const int mb_array_size= s->mb_stride*s->mb_height;
169 const int b8_array_size= s->b8_stride*s->mb_height*2;
170 const int b4_array_size= s->b4_stride*s->mb_height*4;
171 int i;
172 int r= -1;
174 if(shared){
175 assert(pic->data[0]);
176 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
177 pic->type= FF_BUFFER_TYPE_SHARED;
178 }else{
179 assert(!pic->data[0]);
181 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
183 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
184 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
185 return -1;
188 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
189 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
190 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
191 return -1;
194 if(pic->linesize[1] != pic->linesize[2]){
195 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
196 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
197 return -1;
200 s->linesize = pic->linesize[0];
201 s->uvlinesize= pic->linesize[1];
204 if(pic->qscale_table==NULL){
205 if (s->encoding) {
206 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
207 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
208 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
211 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
212 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
213 CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t))
214 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
215 if(s->out_format == FMT_H264){
216 for(i=0; i<2; i++){
217 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
218 pic->motion_val[i]= pic->motion_val_base[i]+4;
219 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
221 pic->motion_subsample_log2= 2;
222 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
223 for(i=0; i<2; i++){
224 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
225 pic->motion_val[i]= pic->motion_val_base[i]+4;
226 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
228 pic->motion_subsample_log2= 3;
230 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
231 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
233 pic->qstride= s->mb_stride;
234 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
237 /* It might be nicer if the application would keep track of these
238 * but it would require an API change. */
239 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
240 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
241 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
242 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
244 return 0;
245 fail: //for the CHECKED_ALLOCZ macro
246 if(r>=0)
247 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
248 return -1;
252 * deallocates a picture
254 static void free_picture(MpegEncContext *s, Picture *pic){
255 int i;
257 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
258 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
261 av_freep(&pic->mb_var);
262 av_freep(&pic->mc_mb_var);
263 av_freep(&pic->mb_mean);
264 av_freep(&pic->mbskip_table);
265 av_freep(&pic->qscale_table);
266 av_freep(&pic->mb_type_base);
267 av_freep(&pic->dct_coeff);
268 av_freep(&pic->pan_scan);
269 pic->mb_type= NULL;
270 for(i=0; i<2; i++){
271 av_freep(&pic->motion_val_base[i]);
272 av_freep(&pic->ref_index[i]);
275 if(pic->type == FF_BUFFER_TYPE_SHARED){
276 for(i=0; i<4; i++){
277 pic->base[i]=
278 pic->data[i]= NULL;
280 pic->type= 0;
284 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
285 int i;
287 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
288 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance
289 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
291 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
292 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
293 s->rd_scratchpad= s->me.scratchpad;
294 s->b_scratchpad= s->me.scratchpad;
295 s->obmc_scratchpad= s->me.scratchpad + 16;
296 if (s->encoding) {
297 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
298 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
299 if(s->avctx->noise_reduction){
300 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
303 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
304 s->block= s->blocks[0];
306 for(i=0;i<12;i++){
307 s->pblocks[i] = (short *)(&s->block[i]);
309 return 0;
310 fail:
311 return -1; //free() through MPV_common_end()
314 static void free_duplicate_context(MpegEncContext *s){
315 if(s==NULL) return;
317 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
318 av_freep(&s->me.scratchpad);
319 s->rd_scratchpad=
320 s->b_scratchpad=
321 s->obmc_scratchpad= NULL;
323 av_freep(&s->dct_error_sum);
324 av_freep(&s->me.map);
325 av_freep(&s->me.score_map);
326 av_freep(&s->blocks);
327 s->block= NULL;
330 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
331 #define COPY(a) bak->a= src->a
332 COPY(allocated_edge_emu_buffer);
333 COPY(edge_emu_buffer);
334 COPY(me.scratchpad);
335 COPY(rd_scratchpad);
336 COPY(b_scratchpad);
337 COPY(obmc_scratchpad);
338 COPY(me.map);
339 COPY(me.score_map);
340 COPY(blocks);
341 COPY(block);
342 COPY(start_mb_y);
343 COPY(end_mb_y);
344 COPY(me.map_generation);
345 COPY(pb);
346 COPY(dct_error_sum);
347 COPY(dct_count[0]);
348 COPY(dct_count[1]);
349 #undef COPY
352 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
353 MpegEncContext bak;
354 int i;
355 //FIXME copy only needed parts
356 //START_TIMER
357 backup_duplicate_context(&bak, dst);
358 memcpy(dst, src, sizeof(MpegEncContext));
359 backup_duplicate_context(dst, &bak);
360 for(i=0;i<12;i++){
361 dst->pblocks[i] = (short *)(&dst->block[i]);
363 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
367 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
368 * the changed fields will not depend upon the prior state of the MpegEncContext.
370 void MPV_common_defaults(MpegEncContext *s){
371 s->y_dc_scale_table=
372 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
373 s->chroma_qscale_table= ff_default_chroma_qscale_table;
374 s->progressive_frame= 1;
375 s->progressive_sequence= 1;
376 s->picture_structure= PICT_FRAME;
378 s->coded_picture_number = 0;
379 s->picture_number = 0;
380 s->input_picture_number = 0;
382 s->picture_in_gop_number = 0;
384 s->f_code = 1;
385 s->b_code = 1;
389 * sets the given MpegEncContext to defaults for decoding.
390 * the changed fields will not depend upon the prior state of the MpegEncContext.
392 void MPV_decode_defaults(MpegEncContext *s){
393 MPV_common_defaults(s);
397 * init common structure for both encoder and decoder.
398 * this assumes that some variables like width/height are already set
400 int MPV_common_init(MpegEncContext *s)
402 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
404 s->mb_height = (s->height + 15) / 16;
406 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
407 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
408 return -1;
411 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
412 return -1;
414 dsputil_init(&s->dsp, s->avctx);
415 ff_dct_common_init(s);
417 s->flags= s->avctx->flags;
418 s->flags2= s->avctx->flags2;
420 s->mb_width = (s->width + 15) / 16;
421 s->mb_stride = s->mb_width + 1;
422 s->b8_stride = s->mb_width*2 + 1;
423 s->b4_stride = s->mb_width*4 + 1;
424 mb_array_size= s->mb_height * s->mb_stride;
425 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
427 /* set chroma shifts */
428 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
429 &(s->chroma_y_shift) );
431 /* set default edge pos, will be overriden in decode_header if needed */
432 s->h_edge_pos= s->mb_width*16;
433 s->v_edge_pos= s->mb_height*16;
435 s->mb_num = s->mb_width * s->mb_height;
437 s->block_wrap[0]=
438 s->block_wrap[1]=
439 s->block_wrap[2]=
440 s->block_wrap[3]= s->b8_stride;
441 s->block_wrap[4]=
442 s->block_wrap[5]= s->mb_stride;
444 y_size = s->b8_stride * (2 * s->mb_height + 1);
445 c_size = s->mb_stride * (s->mb_height + 1);
446 yc_size = y_size + 2 * c_size;
448 /* convert fourcc to upper case */
449 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
450 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
451 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
452 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
454 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
455 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
456 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
457 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
459 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
461 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
462 for(y=0; y<s->mb_height; y++){
463 for(x=0; x<s->mb_width; x++){
464 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
467 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
469 if (s->encoding) {
470 /* Allocate MV tables */
471 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
472 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
473 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
474 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
475 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
476 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
477 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
478 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
479 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
480 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
481 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
482 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
484 if(s->msmpeg4_version){
485 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
487 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
489 /* Allocate MB type table */
490 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
492 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
494 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
495 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
496 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
497 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
498 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
499 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
501 if(s->avctx->noise_reduction){
502 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
505 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
507 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
509 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
510 /* interlaced direct mode decoding tables */
511 for(i=0; i<2; i++){
512 int j, k;
513 for(j=0; j<2; j++){
514 for(k=0; k<2; k++){
515 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
516 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
518 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
519 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
520 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
522 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
525 if (s->out_format == FMT_H263) {
526 /* ac values */
527 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
528 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
529 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
530 s->ac_val[2] = s->ac_val[1] + c_size;
532 /* cbp values */
533 CHECKED_ALLOCZ(s->coded_block_base, y_size);
534 s->coded_block= s->coded_block_base + s->b8_stride + 1;
536 /* cbp, ac_pred, pred_dir */
537 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
538 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
541 if (s->h263_pred || s->h263_plus || !s->encoding) {
542 /* dc values */
543 //MN: we need these for error resilience of intra-frames
544 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
545 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
546 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
547 s->dc_val[2] = s->dc_val[1] + c_size;
548 for(i=0;i<yc_size;i++)
549 s->dc_val_base[i] = 1024;
552 /* which mb is a intra block */
553 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
554 memset(s->mbintra_table, 1, mb_array_size);
556 /* init macroblock skip table */
557 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
558 //Note the +1 is for a quicker mpeg4 slice_end detection
559 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
561 s->parse_context.state= -1;
562 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
563 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
564 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
565 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
568 s->context_initialized = 1;
570 s->thread_context[0]= s;
571 threads = s->avctx->thread_count;
573 for(i=1; i<threads; i++){
574 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
575 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
578 for(i=0; i<threads; i++){
579 if(init_duplicate_context(s->thread_context[i], s) < 0)
580 goto fail;
581 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
582 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
585 return 0;
586 fail:
587 MPV_common_end(s);
588 return -1;
591 /* init common structure for both encoder and decoder */
592 void MPV_common_end(MpegEncContext *s)
594 int i, j, k;
596 for(i=0; i<s->avctx->thread_count; i++){
597 free_duplicate_context(s->thread_context[i]);
599 for(i=1; i<s->avctx->thread_count; i++){
600 av_freep(&s->thread_context[i]);
603 av_freep(&s->parse_context.buffer);
604 s->parse_context.buffer_size=0;
606 av_freep(&s->mb_type);
607 av_freep(&s->p_mv_table_base);
608 av_freep(&s->b_forw_mv_table_base);
609 av_freep(&s->b_back_mv_table_base);
610 av_freep(&s->b_bidir_forw_mv_table_base);
611 av_freep(&s->b_bidir_back_mv_table_base);
612 av_freep(&s->b_direct_mv_table_base);
613 s->p_mv_table= NULL;
614 s->b_forw_mv_table= NULL;
615 s->b_back_mv_table= NULL;
616 s->b_bidir_forw_mv_table= NULL;
617 s->b_bidir_back_mv_table= NULL;
618 s->b_direct_mv_table= NULL;
619 for(i=0; i<2; i++){
620 for(j=0; j<2; j++){
621 for(k=0; k<2; k++){
622 av_freep(&s->b_field_mv_table_base[i][j][k]);
623 s->b_field_mv_table[i][j][k]=NULL;
625 av_freep(&s->b_field_select_table[i][j]);
626 av_freep(&s->p_field_mv_table_base[i][j]);
627 s->p_field_mv_table[i][j]=NULL;
629 av_freep(&s->p_field_select_table[i]);
632 av_freep(&s->dc_val_base);
633 av_freep(&s->ac_val_base);
634 av_freep(&s->coded_block_base);
635 av_freep(&s->mbintra_table);
636 av_freep(&s->cbp_table);
637 av_freep(&s->pred_dir_table);
639 av_freep(&s->mbskip_table);
640 av_freep(&s->prev_pict_types);
641 av_freep(&s->bitstream_buffer);
642 s->allocated_bitstream_buffer_size=0;
644 av_freep(&s->avctx->stats_out);
645 av_freep(&s->ac_stats);
646 av_freep(&s->error_status_table);
647 av_freep(&s->mb_index2xy);
648 av_freep(&s->lambda_table);
649 av_freep(&s->q_intra_matrix);
650 av_freep(&s->q_inter_matrix);
651 av_freep(&s->q_intra_matrix16);
652 av_freep(&s->q_inter_matrix16);
653 av_freep(&s->input_picture);
654 av_freep(&s->reordered_input_picture);
655 av_freep(&s->dct_offset);
657 if(s->picture){
658 for(i=0; i<MAX_PICTURE_COUNT; i++){
659 free_picture(s, &s->picture[i]);
662 av_freep(&s->picture);
663 s->context_initialized = 0;
664 s->last_picture_ptr=
665 s->next_picture_ptr=
666 s->current_picture_ptr= NULL;
667 s->linesize= s->uvlinesize= 0;
669 for(i=0; i<3; i++)
670 av_freep(&s->visualization_buffer[i]);
672 avcodec_default_free_buffers(s->avctx);
675 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
677 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
678 uint8_t index_run[MAX_RUN+1];
679 int last, run, level, start, end, i;
681 /* If table is static, we can quit if rl->max_level[0] is not NULL */
682 if(static_store && rl->max_level[0])
683 return;
685 /* compute max_level[], max_run[] and index_run[] */
686 for(last=0;last<2;last++) {
687 if (last == 0) {
688 start = 0;
689 end = rl->last;
690 } else {
691 start = rl->last;
692 end = rl->n;
695 memset(max_level, 0, MAX_RUN + 1);
696 memset(max_run, 0, MAX_LEVEL + 1);
697 memset(index_run, rl->n, MAX_RUN + 1);
698 for(i=start;i<end;i++) {
699 run = rl->table_run[i];
700 level = rl->table_level[i];
701 if (index_run[run] == rl->n)
702 index_run[run] = i;
703 if (level > max_level[run])
704 max_level[run] = level;
705 if (run > max_run[level])
706 max_run[level] = run;
708 if(static_store)
709 rl->max_level[last] = static_store[last];
710 else
711 rl->max_level[last] = av_malloc(MAX_RUN + 1);
712 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
713 if(static_store)
714 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
715 else
716 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
717 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
718 if(static_store)
719 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
720 else
721 rl->index_run[last] = av_malloc(MAX_RUN + 1);
722 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
726 void init_vlc_rl(RLTable *rl)
728 int i, q;
730 for(q=0; q<32; q++){
731 int qmul= q*2;
732 int qadd= (q-1)|1;
734 if(q==0){
735 qmul=1;
736 qadd=0;
738 for(i=0; i<rl->vlc.table_size; i++){
739 int code= rl->vlc.table[i][0];
740 int len = rl->vlc.table[i][1];
741 int level, run;
743 if(len==0){ // illegal code
744 run= 66;
745 level= MAX_LEVEL;
746 }else if(len<0){ //more bits needed
747 run= 0;
748 level= code;
749 }else{
750 if(code==rl->n){ //esc
751 run= 66;
752 level= 0;
753 }else{
754 run= rl->table_run [code] + 1;
755 level= rl->table_level[code] * qmul + qadd;
756 if(code >= rl->last) run+=192;
759 rl->rl_vlc[q][i].len= len;
760 rl->rl_vlc[q][i].level= level;
761 rl->rl_vlc[q][i].run= run;
766 int ff_find_unused_picture(MpegEncContext *s, int shared){
767 int i;
769 if(shared){
770 for(i=0; i<MAX_PICTURE_COUNT; i++){
771 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
773 }else{
774 for(i=0; i<MAX_PICTURE_COUNT; i++){
775 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
777 for(i=0; i<MAX_PICTURE_COUNT; i++){
778 if(s->picture[i].data[0]==NULL) return i;
782 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
783 /* We could return -1, but the codec would crash trying to draw into a
784 * non-existing frame anyway. This is safer than waiting for a random crash.
785 * Also the return of this is never useful, an encoder must only allocate
786 * as much as allowed in the specification. This has no relationship to how
787 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
788 * enough for such valid streams).
789 * Plus, a decoder has to check stream validity and remove frames if too
790 * many reference frames are around. Waiting for "OOM" is not correct at
791 * all. Similarly, missing reference frames have to be replaced by
792 * interpolated/MC frames, anything else is a bug in the codec ...
794 abort();
795 return -1;
798 static void update_noise_reduction(MpegEncContext *s){
799 int intra, i;
801 for(intra=0; intra<2; intra++){
802 if(s->dct_count[intra] > (1<<16)){
803 for(i=0; i<64; i++){
804 s->dct_error_sum[intra][i] >>=1;
806 s->dct_count[intra] >>= 1;
809 for(i=0; i<64; i++){
810 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
816 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
818 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
820 int i;
821 AVFrame *pic;
822 s->mb_skipped = 0;
824 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
826 /* mark&release old frames */
827 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
828 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
829 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
831 /* release forgotten pictures */
832 /* if(mpeg124/h263) */
833 if(!s->encoding){
834 for(i=0; i<MAX_PICTURE_COUNT; i++){
835 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
836 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
837 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
843 alloc:
844 if(!s->encoding){
845 /* release non reference frames */
846 for(i=0; i<MAX_PICTURE_COUNT; i++){
847 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
848 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
852 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
853 pic= (AVFrame*)s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
854 else{
855 i= ff_find_unused_picture(s, 0);
856 pic= (AVFrame*)&s->picture[i];
859 pic->reference= 0;
860 if (!s->dropable){
861 if (s->codec_id == CODEC_ID_H264)
862 pic->reference = s->picture_structure;
863 else if (s->pict_type != FF_B_TYPE)
864 pic->reference = 3;
867 pic->coded_picture_number= s->coded_picture_number++;
869 if( alloc_picture(s, (Picture*)pic, 0) < 0)
870 return -1;
872 s->current_picture_ptr= (Picture*)pic;
873 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
874 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
877 s->current_picture_ptr->pict_type= s->pict_type;
878 // if(s->flags && CODEC_FLAG_QSCALE)
879 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
880 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
882 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
884 if (s->pict_type != FF_B_TYPE) {
885 s->last_picture_ptr= s->next_picture_ptr;
886 if(!s->dropable)
887 s->next_picture_ptr= s->current_picture_ptr;
889 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
890 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
891 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
892 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
893 s->pict_type, s->dropable);*/
895 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
896 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
898 if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable && s->codec_id != CODEC_ID_H264){
899 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
900 assert(s->pict_type != FF_B_TYPE); //these should have been dropped if we don't have a reference
901 goto alloc;
904 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
906 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
907 int i;
908 for(i=0; i<4; i++){
909 if(s->picture_structure == PICT_BOTTOM_FIELD){
910 s->current_picture.data[i] += s->current_picture.linesize[i];
912 s->current_picture.linesize[i] *= 2;
913 s->last_picture.linesize[i] *=2;
914 s->next_picture.linesize[i] *=2;
918 s->hurry_up= s->avctx->hurry_up;
919 s->error_recognition= avctx->error_recognition;
921 /* set dequantizer, we can't do it during init as it might change for mpeg4
922 and we can't do it in the header decode as init is not called for mpeg4 there yet */
923 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
924 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
925 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
926 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
927 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
928 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
929 }else{
930 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
931 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
934 if(s->dct_error_sum){
935 assert(s->avctx->noise_reduction && s->encoding);
937 update_noise_reduction(s);
940 #ifdef HAVE_XVMC
941 if(s->avctx->xvmc_acceleration)
942 return XVMC_field_start(s, avctx);
943 #endif
944 return 0;
947 /* generic function for encode/decode called after a frame has been coded/decoded */
948 void MPV_frame_end(MpegEncContext *s)
950 int i;
951 /* draw edge for correct motion prediction if outside */
952 #ifdef HAVE_XVMC
953 //just to make sure that all data is rendered.
954 if(s->avctx->xvmc_acceleration){
955 XVMC_field_end(s);
956 }else
957 #endif
958 #ifdef HAVE_VDPAU
959 if(s->avctx->vdpau_acceleration){
960 }else
961 #endif
962 if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
963 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
964 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
965 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
967 emms_c();
969 s->last_pict_type = s->pict_type;
970 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
971 if(s->pict_type!=FF_B_TYPE){
972 s->last_non_b_pict_type= s->pict_type;
974 #if 0
975 /* copy back current_picture variables */
976 for(i=0; i<MAX_PICTURE_COUNT; i++){
977 if(s->picture[i].data[0] == s->current_picture.data[0]){
978 s->picture[i]= s->current_picture;
979 break;
982 assert(i<MAX_PICTURE_COUNT);
983 #endif
985 if(s->encoding){
986 /* release non-reference frames */
987 for(i=0; i<MAX_PICTURE_COUNT; i++){
988 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
989 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
993 // clear copies, to avoid confusion
994 #if 0
995 memset(&s->last_picture, 0, sizeof(Picture));
996 memset(&s->next_picture, 0, sizeof(Picture));
997 memset(&s->current_picture, 0, sizeof(Picture));
998 #endif
999 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1003 * draws an line from (ex, ey) -> (sx, sy).
1004 * @param w width of the image
1005 * @param h height of the image
1006 * @param stride stride/linesize of the image
1007 * @param color color of the arrow
1009 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1010 int x, y, fr, f;
1012 sx= av_clip(sx, 0, w-1);
1013 sy= av_clip(sy, 0, h-1);
1014 ex= av_clip(ex, 0, w-1);
1015 ey= av_clip(ey, 0, h-1);
1017 buf[sy*stride + sx]+= color;
1019 if(FFABS(ex - sx) > FFABS(ey - sy)){
1020 if(sx > ex){
1021 FFSWAP(int, sx, ex);
1022 FFSWAP(int, sy, ey);
1024 buf+= sx + sy*stride;
1025 ex-= sx;
1026 f= ((ey-sy)<<16)/ex;
1027 for(x= 0; x <= ex; x++){
1028 y = (x*f)>>16;
1029 fr= (x*f)&0xFFFF;
1030 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1031 buf[(y+1)*stride + x]+= (color* fr )>>16;
1033 }else{
1034 if(sy > ey){
1035 FFSWAP(int, sx, ex);
1036 FFSWAP(int, sy, ey);
1038 buf+= sx + sy*stride;
1039 ey-= sy;
1040 if(ey) f= ((ex-sx)<<16)/ey;
1041 else f= 0;
1042 for(y= 0; y <= ey; y++){
1043 x = (y*f)>>16;
1044 fr= (y*f)&0xFFFF;
1045 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1046 buf[y*stride + x+1]+= (color* fr )>>16;
1052 * draws an arrow from (ex, ey) -> (sx, sy).
1053 * @param w width of the image
1054 * @param h height of the image
1055 * @param stride stride/linesize of the image
1056 * @param color color of the arrow
1058 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1059 int dx,dy;
1061 sx= av_clip(sx, -100, w+100);
1062 sy= av_clip(sy, -100, h+100);
1063 ex= av_clip(ex, -100, w+100);
1064 ey= av_clip(ey, -100, h+100);
1066 dx= ex - sx;
1067 dy= ey - sy;
1069 if(dx*dx + dy*dy > 3*3){
1070 int rx= dx + dy;
1071 int ry= -dx + dy;
1072 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1074 //FIXME subpixel accuracy
1075 rx= ROUNDED_DIV(rx*3<<4, length);
1076 ry= ROUNDED_DIV(ry*3<<4, length);
1078 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1079 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1081 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1085 * prints debuging info for the given picture.
1087 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1089 if(!pict || !pict->mb_type) return;
1091 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1092 int x,y;
1094 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1095 switch (pict->pict_type) {
1096 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1097 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1098 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1099 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1100 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1101 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1103 for(y=0; y<s->mb_height; y++){
1104 for(x=0; x<s->mb_width; x++){
1105 if(s->avctx->debug&FF_DEBUG_SKIP){
1106 int count= s->mbskip_table[x + y*s->mb_stride];
1107 if(count>9) count=9;
1108 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1110 if(s->avctx->debug&FF_DEBUG_QP){
1111 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1113 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1114 int mb_type= pict->mb_type[x + y*s->mb_stride];
1115 //Type & MV direction
1116 if(IS_PCM(mb_type))
1117 av_log(s->avctx, AV_LOG_DEBUG, "P");
1118 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1119 av_log(s->avctx, AV_LOG_DEBUG, "A");
1120 else if(IS_INTRA4x4(mb_type))
1121 av_log(s->avctx, AV_LOG_DEBUG, "i");
1122 else if(IS_INTRA16x16(mb_type))
1123 av_log(s->avctx, AV_LOG_DEBUG, "I");
1124 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1125 av_log(s->avctx, AV_LOG_DEBUG, "d");
1126 else if(IS_DIRECT(mb_type))
1127 av_log(s->avctx, AV_LOG_DEBUG, "D");
1128 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1129 av_log(s->avctx, AV_LOG_DEBUG, "g");
1130 else if(IS_GMC(mb_type))
1131 av_log(s->avctx, AV_LOG_DEBUG, "G");
1132 else if(IS_SKIP(mb_type))
1133 av_log(s->avctx, AV_LOG_DEBUG, "S");
1134 else if(!USES_LIST(mb_type, 1))
1135 av_log(s->avctx, AV_LOG_DEBUG, ">");
1136 else if(!USES_LIST(mb_type, 0))
1137 av_log(s->avctx, AV_LOG_DEBUG, "<");
1138 else{
1139 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1140 av_log(s->avctx, AV_LOG_DEBUG, "X");
1143 //segmentation
1144 if(IS_8X8(mb_type))
1145 av_log(s->avctx, AV_LOG_DEBUG, "+");
1146 else if(IS_16X8(mb_type))
1147 av_log(s->avctx, AV_LOG_DEBUG, "-");
1148 else if(IS_8X16(mb_type))
1149 av_log(s->avctx, AV_LOG_DEBUG, "|");
1150 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1151 av_log(s->avctx, AV_LOG_DEBUG, " ");
1152 else
1153 av_log(s->avctx, AV_LOG_DEBUG, "?");
1156 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1157 av_log(s->avctx, AV_LOG_DEBUG, "=");
1158 else
1159 av_log(s->avctx, AV_LOG_DEBUG, " ");
1161 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1163 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1167 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1168 const int shift= 1 + s->quarter_sample;
1169 int mb_y;
1170 uint8_t *ptr;
1171 int i;
1172 int h_chroma_shift, v_chroma_shift, block_height;
1173 const int width = s->avctx->width;
1174 const int height= s->avctx->height;
1175 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1176 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1177 s->low_delay=0; //needed to see the vectors without trashing the buffers
1179 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1180 for(i=0; i<3; i++){
1181 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1182 pict->data[i]= s->visualization_buffer[i];
1184 pict->type= FF_BUFFER_TYPE_COPY;
1185 ptr= pict->data[0];
1186 block_height = 16>>v_chroma_shift;
1188 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1189 int mb_x;
1190 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1191 const int mb_index= mb_x + mb_y*s->mb_stride;
1192 if((s->avctx->debug_mv) && pict->motion_val){
1193 int type;
1194 for(type=0; type<3; type++){
1195 int direction = 0;
1196 switch (type) {
1197 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1198 continue;
1199 direction = 0;
1200 break;
1201 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1202 continue;
1203 direction = 0;
1204 break;
1205 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1206 continue;
1207 direction = 1;
1208 break;
1210 if(!USES_LIST(pict->mb_type[mb_index], direction))
1211 continue;
1213 if(IS_8X8(pict->mb_type[mb_index])){
1214 int i;
1215 for(i=0; i<4; i++){
1216 int sx= mb_x*16 + 4 + 8*(i&1);
1217 int sy= mb_y*16 + 4 + 8*(i>>1);
1218 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1219 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1220 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1221 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1223 }else if(IS_16X8(pict->mb_type[mb_index])){
1224 int i;
1225 for(i=0; i<2; i++){
1226 int sx=mb_x*16 + 8;
1227 int sy=mb_y*16 + 4 + 8*i;
1228 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1229 int mx=(pict->motion_val[direction][xy][0]>>shift);
1230 int my=(pict->motion_val[direction][xy][1]>>shift);
1232 if(IS_INTERLACED(pict->mb_type[mb_index]))
1233 my*=2;
1235 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1237 }else if(IS_8X16(pict->mb_type[mb_index])){
1238 int i;
1239 for(i=0; i<2; i++){
1240 int sx=mb_x*16 + 4 + 8*i;
1241 int sy=mb_y*16 + 8;
1242 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1243 int mx=(pict->motion_val[direction][xy][0]>>shift);
1244 int my=(pict->motion_val[direction][xy][1]>>shift);
1246 if(IS_INTERLACED(pict->mb_type[mb_index]))
1247 my*=2;
1249 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1251 }else{
1252 int sx= mb_x*16 + 8;
1253 int sy= mb_y*16 + 8;
1254 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1255 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1256 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1257 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1261 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1262 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1263 int y;
1264 for(y=0; y<block_height; y++){
1265 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1266 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1269 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1270 int mb_type= pict->mb_type[mb_index];
1271 uint64_t u,v;
1272 int y;
1273 #define COLOR(theta, r)\
1274 u= (int)(128 + r*cos(theta*3.141592/180));\
1275 v= (int)(128 + r*sin(theta*3.141592/180));
1278 u=v=128;
1279 if(IS_PCM(mb_type)){
1280 COLOR(120,48)
1281 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1282 COLOR(30,48)
1283 }else if(IS_INTRA4x4(mb_type)){
1284 COLOR(90,48)
1285 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1286 // COLOR(120,48)
1287 }else if(IS_DIRECT(mb_type)){
1288 COLOR(150,48)
1289 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1290 COLOR(170,48)
1291 }else if(IS_GMC(mb_type)){
1292 COLOR(190,48)
1293 }else if(IS_SKIP(mb_type)){
1294 // COLOR(180,48)
1295 }else if(!USES_LIST(mb_type, 1)){
1296 COLOR(240,48)
1297 }else if(!USES_LIST(mb_type, 0)){
1298 COLOR(0,48)
1299 }else{
1300 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1301 COLOR(300,48)
1304 u*= 0x0101010101010101ULL;
1305 v*= 0x0101010101010101ULL;
1306 for(y=0; y<block_height; y++){
1307 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1308 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1311 //segmentation
1312 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1313 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1314 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1316 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1317 for(y=0; y<16; y++)
1318 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1320 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1321 int dm= 1 << (mv_sample_log2-2);
1322 for(i=0; i<4; i++){
1323 int sx= mb_x*16 + 8*(i&1);
1324 int sy= mb_y*16 + 8*(i>>1);
1325 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1326 //FIXME bidir
1327 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1328 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1329 for(y=0; y<8; y++)
1330 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1331 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1332 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1336 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1337 // hmm
1340 s->mbskip_table[mb_index]=0;
1346 static inline int hpel_motion_lowres(MpegEncContext *s,
1347 uint8_t *dest, uint8_t *src,
1348 int field_based, int field_select,
1349 int src_x, int src_y,
1350 int width, int height, int stride,
1351 int h_edge_pos, int v_edge_pos,
1352 int w, int h, h264_chroma_mc_func *pix_op,
1353 int motion_x, int motion_y)
1355 const int lowres= s->avctx->lowres;
1356 const int s_mask= (2<<lowres)-1;
1357 int emu=0;
1358 int sx, sy;
1360 if(s->quarter_sample){
1361 motion_x/=2;
1362 motion_y/=2;
1365 sx= motion_x & s_mask;
1366 sy= motion_y & s_mask;
1367 src_x += motion_x >> (lowres+1);
1368 src_y += motion_y >> (lowres+1);
1370 src += src_y * stride + src_x;
1372 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1373 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1374 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1375 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1376 src= s->edge_emu_buffer;
1377 emu=1;
1380 sx <<= 2 - lowres;
1381 sy <<= 2 - lowres;
1382 if(field_select)
1383 src += s->linesize;
1384 pix_op[lowres](dest, src, stride, h, sx, sy);
1385 return emu;
1388 /* apply one mpeg motion vector to the three components */
1389 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1390 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1391 int field_based, int bottom_field, int field_select,
1392 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1393 int motion_x, int motion_y, int h)
1395 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1396 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1397 const int lowres= s->avctx->lowres;
1398 const int block_s= 8>>lowres;
1399 const int s_mask= (2<<lowres)-1;
1400 const int h_edge_pos = s->h_edge_pos >> lowres;
1401 const int v_edge_pos = s->v_edge_pos >> lowres;
1402 linesize = s->current_picture.linesize[0] << field_based;
1403 uvlinesize = s->current_picture.linesize[1] << field_based;
1405 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1406 motion_x/=2;
1407 motion_y/=2;
1410 if(field_based){
1411 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1414 sx= motion_x & s_mask;
1415 sy= motion_y & s_mask;
1416 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1417 src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1419 if (s->out_format == FMT_H263) {
1420 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1421 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1422 uvsrc_x = src_x>>1;
1423 uvsrc_y = src_y>>1;
1424 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1425 mx = motion_x / 4;
1426 my = motion_y / 4;
1427 uvsx = (2*mx) & s_mask;
1428 uvsy = (2*my) & s_mask;
1429 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1430 uvsrc_y = s->mb_y*block_s + (my >> lowres);
1431 } else {
1432 mx = motion_x / 2;
1433 my = motion_y / 2;
1434 uvsx = mx & s_mask;
1435 uvsy = my & s_mask;
1436 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1437 uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
1440 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1441 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1442 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1444 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1445 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1446 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1447 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1448 ptr_y = s->edge_emu_buffer;
1449 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1450 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1451 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1452 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1453 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1454 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1455 ptr_cb= uvbuf;
1456 ptr_cr= uvbuf+16;
1460 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1461 dest_y += s->linesize;
1462 dest_cb+= s->uvlinesize;
1463 dest_cr+= s->uvlinesize;
1466 if(field_select){
1467 ptr_y += s->linesize;
1468 ptr_cb+= s->uvlinesize;
1469 ptr_cr+= s->uvlinesize;
1472 sx <<= 2 - lowres;
1473 sy <<= 2 - lowres;
1474 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1476 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1477 uvsx <<= 2 - lowres;
1478 uvsy <<= 2 - lowres;
1479 pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1480 pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1482 //FIXME h261 lowres loop filter
1485 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1486 uint8_t *dest_cb, uint8_t *dest_cr,
1487 uint8_t **ref_picture,
1488 h264_chroma_mc_func *pix_op,
1489 int mx, int my){
1490 const int lowres= s->avctx->lowres;
1491 const int block_s= 8>>lowres;
1492 const int s_mask= (2<<lowres)-1;
1493 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1494 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1495 int emu=0, src_x, src_y, offset, sx, sy;
1496 uint8_t *ptr;
1498 if(s->quarter_sample){
1499 mx/=2;
1500 my/=2;
1503 /* In case of 8X8, we construct a single chroma motion vector
1504 with a special rounding */
1505 mx= ff_h263_round_chroma(mx);
1506 my= ff_h263_round_chroma(my);
1508 sx= mx & s_mask;
1509 sy= my & s_mask;
1510 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1511 src_y = s->mb_y*block_s + (my >> (lowres+1));
1513 offset = src_y * s->uvlinesize + src_x;
1514 ptr = ref_picture[1] + offset;
1515 if(s->flags&CODEC_FLAG_EMU_EDGE){
1516 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1517 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1518 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1519 ptr= s->edge_emu_buffer;
1520 emu=1;
1523 sx <<= 2 - lowres;
1524 sy <<= 2 - lowres;
1525 pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1527 ptr = ref_picture[2] + offset;
1528 if(emu){
1529 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1530 ptr= s->edge_emu_buffer;
1532 pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1536 * motion compensation of a single macroblock
1537 * @param s context
1538 * @param dest_y luma destination pointer
1539 * @param dest_cb chroma cb/u destination pointer
1540 * @param dest_cr chroma cr/v destination pointer
1541 * @param dir direction (0->forward, 1->backward)
1542 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1543 * @param pic_op halfpel motion compensation function (average or put normally)
1544 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1546 static inline void MPV_motion_lowres(MpegEncContext *s,
1547 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1548 int dir, uint8_t **ref_picture,
1549 h264_chroma_mc_func *pix_op)
1551 int mx, my;
1552 int mb_x, mb_y, i;
1553 const int lowres= s->avctx->lowres;
1554 const int block_s= 8>>lowres;
1556 mb_x = s->mb_x;
1557 mb_y = s->mb_y;
1559 switch(s->mv_type) {
1560 case MV_TYPE_16X16:
1561 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1562 0, 0, 0,
1563 ref_picture, pix_op,
1564 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1565 break;
1566 case MV_TYPE_8X8:
1567 mx = 0;
1568 my = 0;
1569 for(i=0;i<4;i++) {
1570 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1571 ref_picture[0], 0, 0,
1572 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1573 s->width, s->height, s->linesize,
1574 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1575 block_s, block_s, pix_op,
1576 s->mv[dir][i][0], s->mv[dir][i][1]);
1578 mx += s->mv[dir][i][0];
1579 my += s->mv[dir][i][1];
1582 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1583 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1584 break;
1585 case MV_TYPE_FIELD:
1586 if (s->picture_structure == PICT_FRAME) {
1587 /* top field */
1588 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1589 1, 0, s->field_select[dir][0],
1590 ref_picture, pix_op,
1591 s->mv[dir][0][0], s->mv[dir][0][1], block_s);
1592 /* bottom field */
1593 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1594 1, 1, s->field_select[dir][1],
1595 ref_picture, pix_op,
1596 s->mv[dir][1][0], s->mv[dir][1][1], block_s);
1597 } else {
1598 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1599 ref_picture= s->current_picture_ptr->data;
1602 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1603 0, 0, s->field_select[dir][0],
1604 ref_picture, pix_op,
1605 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1607 break;
1608 case MV_TYPE_16X8:
1609 for(i=0; i<2; i++){
1610 uint8_t ** ref2picture;
1612 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1613 ref2picture= ref_picture;
1614 }else{
1615 ref2picture= s->current_picture_ptr->data;
1618 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1619 0, 0, s->field_select[dir][i],
1620 ref2picture, pix_op,
1621 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
1623 dest_y += 2*block_s*s->linesize;
1624 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1625 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1627 break;
1628 case MV_TYPE_DMV:
1629 if(s->picture_structure == PICT_FRAME){
1630 for(i=0; i<2; i++){
1631 int j;
1632 for(j=0; j<2; j++){
1633 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1634 1, j, j^i,
1635 ref_picture, pix_op,
1636 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
1638 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1640 }else{
1641 for(i=0; i<2; i++){
1642 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1643 0, 0, s->picture_structure != i+1,
1644 ref_picture, pix_op,
1645 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
1647 // after put we make avg of the same block
1648 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1650 //opposite parity is always in the same frame if this is second field
1651 if(!s->first_field){
1652 ref_picture = s->current_picture_ptr->data;
1656 break;
1657 default: assert(0);
1661 /* put block[] to dest[] */
1662 static inline void put_dct(MpegEncContext *s,
1663 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1665 s->dct_unquantize_intra(s, block, i, qscale);
1666 s->dsp.idct_put (dest, line_size, block);
1669 /* add block[] to dest[] */
1670 static inline void add_dct(MpegEncContext *s,
1671 DCTELEM *block, int i, uint8_t *dest, int line_size)
1673 if (s->block_last_index[i] >= 0) {
1674 s->dsp.idct_add (dest, line_size, block);
1678 static inline void add_dequant_dct(MpegEncContext *s,
1679 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1681 if (s->block_last_index[i] >= 0) {
1682 s->dct_unquantize_inter(s, block, i, qscale);
1684 s->dsp.idct_add (dest, line_size, block);
1689 * cleans dc, ac, coded_block for the current non intra MB
1691 void ff_clean_intra_table_entries(MpegEncContext *s)
1693 int wrap = s->b8_stride;
1694 int xy = s->block_index[0];
1696 s->dc_val[0][xy ] =
1697 s->dc_val[0][xy + 1 ] =
1698 s->dc_val[0][xy + wrap] =
1699 s->dc_val[0][xy + 1 + wrap] = 1024;
1700 /* ac pred */
1701 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1702 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1703 if (s->msmpeg4_version>=3) {
1704 s->coded_block[xy ] =
1705 s->coded_block[xy + 1 ] =
1706 s->coded_block[xy + wrap] =
1707 s->coded_block[xy + 1 + wrap] = 0;
1709 /* chroma */
1710 wrap = s->mb_stride;
1711 xy = s->mb_x + s->mb_y * wrap;
1712 s->dc_val[1][xy] =
1713 s->dc_val[2][xy] = 1024;
1714 /* ac pred */
1715 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1716 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1718 s->mbintra_table[xy]= 0;
1721 /* generic function called after a macroblock has been parsed by the
1722 decoder or after it has been encoded by the encoder.
1724 Important variables used:
1725 s->mb_intra : true if intra macroblock
1726 s->mv_dir : motion vector direction
1727 s->mv_type : motion vector type
1728 s->mv : motion vector
1729 s->interlaced_dct : true if interlaced dct used (mpeg2)
1731 static av_always_inline
1732 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1733 int lowres_flag, int is_mpeg12)
1735 int mb_x, mb_y;
1736 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1737 #ifdef HAVE_XVMC
1738 if(s->avctx->xvmc_acceleration){
1739 XVMC_decode_mb(s);//xvmc uses pblocks
1740 return;
1742 #endif
1744 mb_x = s->mb_x;
1745 mb_y = s->mb_y;
1747 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1748 /* save DCT coefficients */
1749 int i,j;
1750 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1751 for(i=0; i<6; i++)
1752 for(j=0; j<64; j++)
1753 *dct++ = block[i][s->dsp.idct_permutation[j]];
1756 s->current_picture.qscale_table[mb_xy]= s->qscale;
1758 /* update DC predictors for P macroblocks */
1759 if (!s->mb_intra) {
1760 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1761 if(s->mbintra_table[mb_xy])
1762 ff_clean_intra_table_entries(s);
1763 } else {
1764 s->last_dc[0] =
1765 s->last_dc[1] =
1766 s->last_dc[2] = 128 << s->intra_dc_precision;
1769 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1770 s->mbintra_table[mb_xy]=1;
1772 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1773 uint8_t *dest_y, *dest_cb, *dest_cr;
1774 int dct_linesize, dct_offset;
1775 op_pixels_func (*op_pix)[4];
1776 qpel_mc_func (*op_qpix)[16];
1777 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1778 const int uvlinesize= s->current_picture.linesize[1];
1779 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1780 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1782 /* avoid copy if macroblock skipped in last frame too */
1783 /* skip only during decoding as we might trash the buffers during encoding a bit */
1784 if(!s->encoding){
1785 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1786 const int age= s->current_picture.age;
1788 assert(age);
1790 if (s->mb_skipped) {
1791 s->mb_skipped= 0;
1792 assert(s->pict_type!=FF_I_TYPE);
1794 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1795 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1797 /* if previous was skipped too, then nothing to do ! */
1798 if (*mbskip_ptr >= age && s->current_picture.reference){
1799 return;
1801 } else if(!s->current_picture.reference){
1802 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1803 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1804 } else{
1805 *mbskip_ptr = 0; /* not skipped */
1809 dct_linesize = linesize << s->interlaced_dct;
1810 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1812 if(readable){
1813 dest_y= s->dest[0];
1814 dest_cb= s->dest[1];
1815 dest_cr= s->dest[2];
1816 }else{
1817 dest_y = s->b_scratchpad;
1818 dest_cb= s->b_scratchpad+16*linesize;
1819 dest_cr= s->b_scratchpad+32*linesize;
1822 if (!s->mb_intra) {
1823 /* motion handling */
1824 /* decoding or more than one mb_type (MC was already done otherwise) */
1825 if(!s->encoding){
1826 if(lowres_flag){
1827 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1829 if (s->mv_dir & MV_DIR_FORWARD) {
1830 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1831 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1833 if (s->mv_dir & MV_DIR_BACKWARD) {
1834 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1836 }else{
1837 op_qpix= s->me.qpel_put;
1838 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1839 op_pix = s->dsp.put_pixels_tab;
1840 }else{
1841 op_pix = s->dsp.put_no_rnd_pixels_tab;
1843 if (s->mv_dir & MV_DIR_FORWARD) {
1844 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1845 op_pix = s->dsp.avg_pixels_tab;
1846 op_qpix= s->me.qpel_avg;
1848 if (s->mv_dir & MV_DIR_BACKWARD) {
1849 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1854 /* skip dequant / idct if we are really late ;) */
1855 if(s->hurry_up>1) goto skip_idct;
1856 if(s->avctx->skip_idct){
1857 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1858 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1859 || s->avctx->skip_idct >= AVDISCARD_ALL)
1860 goto skip_idct;
1863 /* add dct residue */
1864 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1865 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1866 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1867 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1868 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1869 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1871 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1872 if (s->chroma_y_shift){
1873 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1874 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1875 }else{
1876 dct_linesize >>= 1;
1877 dct_offset >>=1;
1878 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1879 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1880 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1881 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1884 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1885 add_dct(s, block[0], 0, dest_y , dct_linesize);
1886 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1887 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1888 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1890 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1891 if(s->chroma_y_shift){//Chroma420
1892 add_dct(s, block[4], 4, dest_cb, uvlinesize);
1893 add_dct(s, block[5], 5, dest_cr, uvlinesize);
1894 }else{
1895 //chroma422
1896 dct_linesize = uvlinesize << s->interlaced_dct;
1897 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1899 add_dct(s, block[4], 4, dest_cb, dct_linesize);
1900 add_dct(s, block[5], 5, dest_cr, dct_linesize);
1901 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1902 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1903 if(!s->chroma_x_shift){//Chroma444
1904 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1905 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1906 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1907 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1910 }//fi gray
1912 else if (ENABLE_WMV2) {
1913 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1915 } else {
1916 /* dct only in intra block */
1917 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
1918 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1919 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1920 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1921 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1923 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1924 if(s->chroma_y_shift){
1925 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1926 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1927 }else{
1928 dct_offset >>=1;
1929 dct_linesize >>=1;
1930 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1931 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1932 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1933 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1936 }else{
1937 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
1938 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1939 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
1940 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1942 if(!ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1943 if(s->chroma_y_shift){
1944 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
1945 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
1946 }else{
1948 dct_linesize = uvlinesize << s->interlaced_dct;
1949 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1951 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
1952 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
1953 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1954 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1955 if(!s->chroma_x_shift){//Chroma444
1956 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
1957 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
1958 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1959 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1962 }//gray
1965 skip_idct:
1966 if(!readable){
1967 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1968 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1969 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1974 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
1975 #ifndef CONFIG_SMALL
1976 if(s->out_format == FMT_MPEG1) {
1977 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
1978 else MPV_decode_mb_internal(s, block, 0, 1);
1979 } else
1980 #endif
1981 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
1982 else MPV_decode_mb_internal(s, block, 0, 0);
1987 * @param h is the normal height, this will be reduced automatically if needed for the last row
1989 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
1990 if (s->avctx->draw_horiz_band) {
1991 AVFrame *src;
1992 int offset[4];
1994 if(s->picture_structure != PICT_FRAME){
1995 h <<= 1;
1996 y <<= 1;
1997 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2000 h= FFMIN(h, s->avctx->height - y);
2002 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2003 src= (AVFrame*)s->current_picture_ptr;
2004 else if(s->last_picture_ptr)
2005 src= (AVFrame*)s->last_picture_ptr;
2006 else
2007 return;
2009 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2010 offset[0]=
2011 offset[1]=
2012 offset[2]=
2013 offset[3]= 0;
2014 }else{
2015 offset[0]= y * s->linesize;
2016 offset[1]=
2017 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2018 offset[3]= 0;
2021 emms_c();
2023 s->avctx->draw_horiz_band(s->avctx, src, offset,
2024 y, s->picture_structure, h);
2028 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2029 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2030 const int uvlinesize= s->current_picture.linesize[1];
2031 const int mb_size= 4 - s->avctx->lowres;
2033 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2034 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2035 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2036 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2037 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2038 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2039 //block_index is not used by mpeg2, so it is not affected by chroma_format
2041 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2042 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2043 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2045 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2047 s->dest[0] += s->mb_y * linesize << mb_size;
2048 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2049 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2053 void ff_mpeg_flush(AVCodecContext *avctx){
2054 int i;
2055 MpegEncContext *s = avctx->priv_data;
2057 if(s==NULL || s->picture==NULL)
2058 return;
2060 for(i=0; i<MAX_PICTURE_COUNT; i++){
2061 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2062 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2063 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
2065 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2067 s->mb_x= s->mb_y= 0;
2069 s->parse_context.state= -1;
2070 s->parse_context.frame_start_found= 0;
2071 s->parse_context.overread= 0;
2072 s->parse_context.overread_index= 0;
2073 s->parse_context.index= 0;
2074 s->parse_context.last_index= 0;
2075 s->bitstream_buffer_size=0;
2076 s->pp_time=0;
2079 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2080 DCTELEM *block, int n, int qscale)
2082 int i, level, nCoeffs;
2083 const uint16_t *quant_matrix;
2085 nCoeffs= s->block_last_index[n];
2087 if (n < 4)
2088 block[0] = block[0] * s->y_dc_scale;
2089 else
2090 block[0] = block[0] * s->c_dc_scale;
2091 /* XXX: only mpeg1 */
2092 quant_matrix = s->intra_matrix;
2093 for(i=1;i<=nCoeffs;i++) {
2094 int j= s->intra_scantable.permutated[i];
2095 level = block[j];
2096 if (level) {
2097 if (level < 0) {
2098 level = -level;
2099 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2100 level = (level - 1) | 1;
2101 level = -level;
2102 } else {
2103 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2104 level = (level - 1) | 1;
2106 block[j] = level;
2111 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2112 DCTELEM *block, int n, int qscale)
2114 int i, level, nCoeffs;
2115 const uint16_t *quant_matrix;
2117 nCoeffs= s->block_last_index[n];
2119 quant_matrix = s->inter_matrix;
2120 for(i=0; i<=nCoeffs; i++) {
2121 int j= s->intra_scantable.permutated[i];
2122 level = block[j];
2123 if (level) {
2124 if (level < 0) {
2125 level = -level;
2126 level = (((level << 1) + 1) * qscale *
2127 ((int) (quant_matrix[j]))) >> 4;
2128 level = (level - 1) | 1;
2129 level = -level;
2130 } else {
2131 level = (((level << 1) + 1) * qscale *
2132 ((int) (quant_matrix[j]))) >> 4;
2133 level = (level - 1) | 1;
2135 block[j] = level;
2140 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2141 DCTELEM *block, int n, int qscale)
2143 int i, level, nCoeffs;
2144 const uint16_t *quant_matrix;
2146 if(s->alternate_scan) nCoeffs= 63;
2147 else nCoeffs= s->block_last_index[n];
2149 if (n < 4)
2150 block[0] = block[0] * s->y_dc_scale;
2151 else
2152 block[0] = block[0] * s->c_dc_scale;
2153 quant_matrix = s->intra_matrix;
2154 for(i=1;i<=nCoeffs;i++) {
2155 int j= s->intra_scantable.permutated[i];
2156 level = block[j];
2157 if (level) {
2158 if (level < 0) {
2159 level = -level;
2160 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2161 level = -level;
2162 } else {
2163 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2165 block[j] = level;
2170 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2171 DCTELEM *block, int n, int qscale)
2173 int i, level, nCoeffs;
2174 const uint16_t *quant_matrix;
2175 int sum=-1;
2177 if(s->alternate_scan) nCoeffs= 63;
2178 else nCoeffs= s->block_last_index[n];
2180 if (n < 4)
2181 block[0] = block[0] * s->y_dc_scale;
2182 else
2183 block[0] = block[0] * s->c_dc_scale;
2184 quant_matrix = s->intra_matrix;
2185 for(i=1;i<=nCoeffs;i++) {
2186 int j= s->intra_scantable.permutated[i];
2187 level = block[j];
2188 if (level) {
2189 if (level < 0) {
2190 level = -level;
2191 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2192 level = -level;
2193 } else {
2194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2196 block[j] = level;
2197 sum+=level;
2200 block[63]^=sum&1;
2203 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2204 DCTELEM *block, int n, int qscale)
2206 int i, level, nCoeffs;
2207 const uint16_t *quant_matrix;
2208 int sum=-1;
2210 if(s->alternate_scan) nCoeffs= 63;
2211 else nCoeffs= s->block_last_index[n];
2213 quant_matrix = s->inter_matrix;
2214 for(i=0; i<=nCoeffs; i++) {
2215 int j= s->intra_scantable.permutated[i];
2216 level = block[j];
2217 if (level) {
2218 if (level < 0) {
2219 level = -level;
2220 level = (((level << 1) + 1) * qscale *
2221 ((int) (quant_matrix[j]))) >> 4;
2222 level = -level;
2223 } else {
2224 level = (((level << 1) + 1) * qscale *
2225 ((int) (quant_matrix[j]))) >> 4;
2227 block[j] = level;
2228 sum+=level;
2231 block[63]^=sum&1;
2234 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2235 DCTELEM *block, int n, int qscale)
2237 int i, level, qmul, qadd;
2238 int nCoeffs;
2240 assert(s->block_last_index[n]>=0);
2242 qmul = qscale << 1;
2244 if (!s->h263_aic) {
2245 if (n < 4)
2246 block[0] = block[0] * s->y_dc_scale;
2247 else
2248 block[0] = block[0] * s->c_dc_scale;
2249 qadd = (qscale - 1) | 1;
2250 }else{
2251 qadd = 0;
2253 if(s->ac_pred)
2254 nCoeffs=63;
2255 else
2256 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2258 for(i=1; i<=nCoeffs; i++) {
2259 level = block[i];
2260 if (level) {
2261 if (level < 0) {
2262 level = level * qmul - qadd;
2263 } else {
2264 level = level * qmul + qadd;
2266 block[i] = level;
2271 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2272 DCTELEM *block, int n, int qscale)
2274 int i, level, qmul, qadd;
2275 int nCoeffs;
2277 assert(s->block_last_index[n]>=0);
2279 qadd = (qscale - 1) | 1;
2280 qmul = qscale << 1;
2282 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2284 for(i=0; i<=nCoeffs; i++) {
2285 level = block[i];
2286 if (level) {
2287 if (level < 0) {
2288 level = level * qmul - qadd;
2289 } else {
2290 level = level * qmul + qadd;
2292 block[i] = level;
2298 * set qscale and update qscale dependent variables.
2300 void ff_set_qscale(MpegEncContext * s, int qscale)
2302 if (qscale < 1)
2303 qscale = 1;
2304 else if (qscale > 31)
2305 qscale = 31;
2307 s->qscale = qscale;
2308 s->chroma_qscale= s->chroma_qscale_table[qscale];
2310 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2311 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];