2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * @file libavcodec/mpegvideo.c
27 * The simplest mpeg encoder (well, it was the simplest!).
32 #include "mpegvideo.h"
33 #include "mpegvideo_common.h"
37 #include "xvmc_internal.h"
43 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
44 DCTELEM
*block
, int n
, int qscale
);
45 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
46 DCTELEM
*block
, int n
, int qscale
);
47 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
48 DCTELEM
*block
, int n
, int qscale
);
49 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext
*s
,
50 DCTELEM
*block
, int n
, int qscale
);
51 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
52 DCTELEM
*block
, int n
, int qscale
);
53 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
54 DCTELEM
*block
, int n
, int qscale
);
55 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
56 DCTELEM
*block
, int n
, int qscale
);
59 /* enable all paranoid tests for rounding, overflows, etc... */
65 static const uint8_t ff_default_chroma_qscale_table
[32]={
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
67 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
70 const uint8_t ff_mpeg1_dc_scale_table
[128]={
71 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 const enum PixelFormat ff_pixfmt_list_420
[] = {
83 const enum PixelFormat ff_hwaccel_pixfmt_list_420
[] = {
88 const uint8_t *ff_find_start_code(const uint8_t * restrict p
, const uint8_t *end
, uint32_t * restrict state
){
96 uint32_t tmp
= *state
<< 8;
98 if(tmp
== 0x100 || p
==end
)
103 if (p
[-1] > 1 ) p
+= 3;
104 else if(p
[-2] ) p
+= 2;
105 else if(p
[-3]|(p
[-1]-1)) p
++;
118 /* init common dct for both encoder and decoder */
119 av_cold
int ff_dct_common_init(MpegEncContext
*s
)
121 s
->dct_unquantize_h263_intra
= dct_unquantize_h263_intra_c
;
122 s
->dct_unquantize_h263_inter
= dct_unquantize_h263_inter_c
;
123 s
->dct_unquantize_mpeg1_intra
= dct_unquantize_mpeg1_intra_c
;
124 s
->dct_unquantize_mpeg1_inter
= dct_unquantize_mpeg1_inter_c
;
125 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_c
;
126 if(s
->flags
& CODEC_FLAG_BITEXACT
)
127 s
->dct_unquantize_mpeg2_intra
= dct_unquantize_mpeg2_intra_bitexact
;
128 s
->dct_unquantize_mpeg2_inter
= dct_unquantize_mpeg2_inter_c
;
131 MPV_common_init_mmx(s
);
133 MPV_common_init_axp(s
);
135 MPV_common_init_mlib(s
);
137 MPV_common_init_mmi(s
);
139 MPV_common_init_arm(s
);
141 MPV_common_init_altivec(s
);
143 MPV_common_init_bfin(s
);
146 /* load & permutate scantables
147 note: only wmv uses different ones
149 if(s
->alternate_scan
){
150 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_alternate_vertical_scan
);
151 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_alternate_vertical_scan
);
153 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->inter_scantable
, ff_zigzag_direct
);
154 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_scantable
, ff_zigzag_direct
);
156 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_h_scantable
, ff_alternate_horizontal_scan
);
157 ff_init_scantable(s
->dsp
.idct_permutation
, &s
->intra_v_scantable
, ff_alternate_vertical_scan
);
162 void ff_copy_picture(Picture
*dst
, Picture
*src
){
164 dst
->type
= FF_BUFFER_TYPE_COPY
;
168 * Releases a frame buffer
170 static void free_frame_buffer(MpegEncContext
*s
, Picture
*pic
)
172 s
->avctx
->release_buffer(s
->avctx
, (AVFrame
*)pic
);
173 av_freep(&pic
->hwaccel_data_private
);
177 * Allocates a frame buffer
179 static int alloc_frame_buffer(MpegEncContext
*s
, Picture
*pic
)
183 if (s
->avctx
->hwaccel
) {
184 assert(!pic
->hwaccel_data_private
);
185 if (s
->avctx
->hwaccel
->priv_data_size
) {
186 pic
->hwaccel_data_private
= av_mallocz(s
->avctx
->hwaccel
->priv_data_size
);
187 if (!pic
->hwaccel_data_private
) {
188 av_log(s
->avctx
, AV_LOG_ERROR
, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
194 r
= s
->avctx
->get_buffer(s
->avctx
, (AVFrame
*)pic
);
196 if (r
<0 || !pic
->age
|| !pic
->type
|| !pic
->data
[0]) {
197 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (%d %d %d %p)\n", r
, pic
->age
, pic
->type
, pic
->data
[0]);
198 av_freep(&pic
->hwaccel_data_private
);
202 if (s
->linesize
&& (s
->linesize
!= pic
->linesize
[0] || s
->uvlinesize
!= pic
->linesize
[1])) {
203 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (stride changed)\n");
204 free_frame_buffer(s
, pic
);
208 if (pic
->linesize
[1] != pic
->linesize
[2]) {
209 av_log(s
->avctx
, AV_LOG_ERROR
, "get_buffer() failed (uv stride mismatch)\n");
210 free_frame_buffer(s
, pic
);
218 * allocates a Picture
219 * The pixels are allocated/set by calling get_buffer() if shared=0
221 int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
){
222 const int big_mb_num
= s
->mb_stride
*(s
->mb_height
+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
223 const int mb_array_size
= s
->mb_stride
*s
->mb_height
;
224 const int b8_array_size
= s
->b8_stride
*s
->mb_height
*2;
225 const int b4_array_size
= s
->b4_stride
*s
->mb_height
*4;
230 assert(pic
->data
[0]);
231 assert(pic
->type
== 0 || pic
->type
== FF_BUFFER_TYPE_SHARED
);
232 pic
->type
= FF_BUFFER_TYPE_SHARED
;
234 assert(!pic
->data
[0]);
236 if (alloc_frame_buffer(s
, pic
) < 0)
239 s
->linesize
= pic
->linesize
[0];
240 s
->uvlinesize
= pic
->linesize
[1];
243 if(pic
->qscale_table
==NULL
){
245 CHECKED_ALLOCZ(pic
->mb_var
, mb_array_size
* sizeof(int16_t))
246 CHECKED_ALLOCZ(pic
->mc_mb_var
, mb_array_size
* sizeof(int16_t))
247 CHECKED_ALLOCZ(pic
->mb_mean
, mb_array_size
* sizeof(int8_t))
250 CHECKED_ALLOCZ(pic
->mbskip_table
, mb_array_size
* sizeof(uint8_t)+2) //the +2 is for the slice end check
251 CHECKED_ALLOCZ(pic
->qscale_table
, mb_array_size
* sizeof(uint8_t))
252 CHECKED_ALLOCZ(pic
->mb_type_base
, (big_mb_num
+ s
->mb_stride
) * sizeof(uint32_t))
253 pic
->mb_type
= pic
->mb_type_base
+ 2*s
->mb_stride
+1;
254 if(s
->out_format
== FMT_H264
){
256 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b4_array_size
+4) * sizeof(int16_t))
257 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+4;
258 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
260 pic
->motion_subsample_log2
= 2;
261 }else if(s
->out_format
== FMT_H263
|| s
->encoding
|| (s
->avctx
->debug
&FF_DEBUG_MV
) || (s
->avctx
->debug_mv
)){
263 CHECKED_ALLOCZ(pic
->motion_val_base
[i
], 2 * (b8_array_size
+4) * sizeof(int16_t))
264 pic
->motion_val
[i
]= pic
->motion_val_base
[i
]+4;
265 CHECKED_ALLOCZ(pic
->ref_index
[i
], b8_array_size
* sizeof(uint8_t))
267 pic
->motion_subsample_log2
= 3;
269 if(s
->avctx
->debug
&FF_DEBUG_DCT_COEFF
) {
270 CHECKED_ALLOCZ(pic
->dct_coeff
, 64 * mb_array_size
* sizeof(DCTELEM
)*6)
272 pic
->qstride
= s
->mb_stride
;
273 CHECKED_ALLOCZ(pic
->pan_scan
, 1 * sizeof(AVPanScan
))
276 /* It might be nicer if the application would keep track of these
277 * but it would require an API change. */
278 memmove(s
->prev_pict_types
+1, s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
-1);
279 s
->prev_pict_types
[0]= s
->dropable
? FF_B_TYPE
: s
->pict_type
;
280 if(pic
->age
< PREV_PICT_TYPES_BUFFER_SIZE
&& s
->prev_pict_types
[pic
->age
] == FF_B_TYPE
)
281 pic
->age
= INT_MAX
; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
284 fail
: //for the CHECKED_ALLOCZ macro
286 free_frame_buffer(s
, pic
);
291 * deallocates a picture
293 static void free_picture(MpegEncContext
*s
, Picture
*pic
){
296 if(pic
->data
[0] && pic
->type
!=FF_BUFFER_TYPE_SHARED
){
297 free_frame_buffer(s
, pic
);
300 av_freep(&pic
->mb_var
);
301 av_freep(&pic
->mc_mb_var
);
302 av_freep(&pic
->mb_mean
);
303 av_freep(&pic
->mbskip_table
);
304 av_freep(&pic
->qscale_table
);
305 av_freep(&pic
->mb_type_base
);
306 av_freep(&pic
->dct_coeff
);
307 av_freep(&pic
->pan_scan
);
310 av_freep(&pic
->motion_val_base
[i
]);
311 av_freep(&pic
->ref_index
[i
]);
314 if(pic
->type
== FF_BUFFER_TYPE_SHARED
){
323 static int init_duplicate_context(MpegEncContext
*s
, MpegEncContext
*base
){
326 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
327 CHECKED_ALLOCZ(s
->allocated_edge_emu_buffer
, (s
->width
+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance
328 s
->edge_emu_buffer
= s
->allocated_edge_emu_buffer
+ (s
->width
+64)*2*21;
330 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
331 CHECKED_ALLOCZ(s
->me
.scratchpad
, (s
->width
+64)*4*16*2*sizeof(uint8_t))
332 s
->me
.temp
= s
->me
.scratchpad
;
333 s
->rd_scratchpad
= s
->me
.scratchpad
;
334 s
->b_scratchpad
= s
->me
.scratchpad
;
335 s
->obmc_scratchpad
= s
->me
.scratchpad
+ 16;
337 CHECKED_ALLOCZ(s
->me
.map
, ME_MAP_SIZE
*sizeof(uint32_t))
338 CHECKED_ALLOCZ(s
->me
.score_map
, ME_MAP_SIZE
*sizeof(uint32_t))
339 if(s
->avctx
->noise_reduction
){
340 CHECKED_ALLOCZ(s
->dct_error_sum
, 2 * 64 * sizeof(int))
343 CHECKED_ALLOCZ(s
->blocks
, 64*12*2 * sizeof(DCTELEM
))
344 s
->block
= s
->blocks
[0];
347 s
->pblocks
[i
] = &s
->block
[i
];
351 return -1; //free() through MPV_common_end()
354 static void free_duplicate_context(MpegEncContext
*s
){
357 av_freep(&s
->allocated_edge_emu_buffer
); s
->edge_emu_buffer
= NULL
;
358 av_freep(&s
->me
.scratchpad
);
362 s
->obmc_scratchpad
= NULL
;
364 av_freep(&s
->dct_error_sum
);
365 av_freep(&s
->me
.map
);
366 av_freep(&s
->me
.score_map
);
367 av_freep(&s
->blocks
);
371 static void backup_duplicate_context(MpegEncContext
*bak
, MpegEncContext
*src
){
372 #define COPY(a) bak->a= src->a
373 COPY(allocated_edge_emu_buffer
);
374 COPY(edge_emu_buffer
);
379 COPY(obmc_scratchpad
);
386 COPY(me
.map_generation
);
394 void ff_update_duplicate_context(MpegEncContext
*dst
, MpegEncContext
*src
){
397 //FIXME copy only needed parts
399 backup_duplicate_context(&bak
, dst
);
400 memcpy(dst
, src
, sizeof(MpegEncContext
));
401 backup_duplicate_context(dst
, &bak
);
403 dst
->pblocks
[i
] = &dst
->block
[i
];
405 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
409 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
410 * the changed fields will not depend upon the prior state of the MpegEncContext.
412 void MPV_common_defaults(MpegEncContext
*s
){
414 s
->c_dc_scale_table
= ff_mpeg1_dc_scale_table
;
415 s
->chroma_qscale_table
= ff_default_chroma_qscale_table
;
416 s
->progressive_frame
= 1;
417 s
->progressive_sequence
= 1;
418 s
->picture_structure
= PICT_FRAME
;
420 s
->coded_picture_number
= 0;
421 s
->picture_number
= 0;
422 s
->input_picture_number
= 0;
424 s
->picture_in_gop_number
= 0;
431 * sets the given MpegEncContext to defaults for decoding.
432 * the changed fields will not depend upon the prior state of the MpegEncContext.
434 void MPV_decode_defaults(MpegEncContext
*s
){
435 MPV_common_defaults(s
);
439 * init common structure for both encoder and decoder.
440 * this assumes that some variables like width/height are already set
442 av_cold
int MPV_common_init(MpegEncContext
*s
)
444 int y_size
, c_size
, yc_size
, i
, mb_array_size
, mv_table_size
, x
, y
, threads
;
446 s
->mb_height
= (s
->height
+ 15) / 16;
448 if(s
->avctx
->pix_fmt
== PIX_FMT_NONE
){
449 av_log(s
->avctx
, AV_LOG_ERROR
, "decoding to PIX_FMT_NONE is not supported.\n");
453 if(s
->avctx
->thread_count
> MAX_THREADS
|| (s
->avctx
->thread_count
> s
->mb_height
&& s
->mb_height
)){
454 av_log(s
->avctx
, AV_LOG_ERROR
, "too many threads\n");
458 if((s
->width
|| s
->height
) && avcodec_check_dimensions(s
->avctx
, s
->width
, s
->height
))
461 dsputil_init(&s
->dsp
, s
->avctx
);
462 ff_dct_common_init(s
);
464 s
->flags
= s
->avctx
->flags
;
465 s
->flags2
= s
->avctx
->flags2
;
467 s
->mb_width
= (s
->width
+ 15) / 16;
468 s
->mb_stride
= s
->mb_width
+ 1;
469 s
->b8_stride
= s
->mb_width
*2 + 1;
470 s
->b4_stride
= s
->mb_width
*4 + 1;
471 mb_array_size
= s
->mb_height
* s
->mb_stride
;
472 mv_table_size
= (s
->mb_height
+2) * s
->mb_stride
+ 1;
474 /* set chroma shifts */
475 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
,&(s
->chroma_x_shift
),
476 &(s
->chroma_y_shift
) );
478 /* set default edge pos, will be overriden in decode_header if needed */
479 s
->h_edge_pos
= s
->mb_width
*16;
480 s
->v_edge_pos
= s
->mb_height
*16;
482 s
->mb_num
= s
->mb_width
* s
->mb_height
;
487 s
->block_wrap
[3]= s
->b8_stride
;
489 s
->block_wrap
[5]= s
->mb_stride
;
491 y_size
= s
->b8_stride
* (2 * s
->mb_height
+ 1);
492 c_size
= s
->mb_stride
* (s
->mb_height
+ 1);
493 yc_size
= y_size
+ 2 * c_size
;
495 /* convert fourcc to upper case */
496 s
->codec_tag
= toupper( s
->avctx
->codec_tag
&0xFF)
497 + (toupper((s
->avctx
->codec_tag
>>8 )&0xFF)<<8 )
498 + (toupper((s
->avctx
->codec_tag
>>16)&0xFF)<<16)
499 + (toupper((s
->avctx
->codec_tag
>>24)&0xFF)<<24);
501 s
->stream_codec_tag
= toupper( s
->avctx
->stream_codec_tag
&0xFF)
502 + (toupper((s
->avctx
->stream_codec_tag
>>8 )&0xFF)<<8 )
503 + (toupper((s
->avctx
->stream_codec_tag
>>16)&0xFF)<<16)
504 + (toupper((s
->avctx
->stream_codec_tag
>>24)&0xFF)<<24);
506 s
->avctx
->coded_frame
= (AVFrame
*)&s
->current_picture
;
508 CHECKED_ALLOCZ(s
->mb_index2xy
, (s
->mb_num
+1)*sizeof(int)) //error ressilience code looks cleaner with this
509 for(y
=0; y
<s
->mb_height
; y
++){
510 for(x
=0; x
<s
->mb_width
; x
++){
511 s
->mb_index2xy
[ x
+ y
*s
->mb_width
] = x
+ y
*s
->mb_stride
;
514 s
->mb_index2xy
[ s
->mb_height
*s
->mb_width
] = (s
->mb_height
-1)*s
->mb_stride
+ s
->mb_width
; //FIXME really needed?
517 /* Allocate MV tables */
518 CHECKED_ALLOCZ(s
->p_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
519 CHECKED_ALLOCZ(s
->b_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
520 CHECKED_ALLOCZ(s
->b_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
521 CHECKED_ALLOCZ(s
->b_bidir_forw_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
522 CHECKED_ALLOCZ(s
->b_bidir_back_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
523 CHECKED_ALLOCZ(s
->b_direct_mv_table_base
, mv_table_size
* 2 * sizeof(int16_t))
524 s
->p_mv_table
= s
->p_mv_table_base
+ s
->mb_stride
+ 1;
525 s
->b_forw_mv_table
= s
->b_forw_mv_table_base
+ s
->mb_stride
+ 1;
526 s
->b_back_mv_table
= s
->b_back_mv_table_base
+ s
->mb_stride
+ 1;
527 s
->b_bidir_forw_mv_table
= s
->b_bidir_forw_mv_table_base
+ s
->mb_stride
+ 1;
528 s
->b_bidir_back_mv_table
= s
->b_bidir_back_mv_table_base
+ s
->mb_stride
+ 1;
529 s
->b_direct_mv_table
= s
->b_direct_mv_table_base
+ s
->mb_stride
+ 1;
531 if(s
->msmpeg4_version
){
532 CHECKED_ALLOCZ(s
->ac_stats
, 2*2*(MAX_LEVEL
+1)*(MAX_RUN
+1)*2*sizeof(int));
534 CHECKED_ALLOCZ(s
->avctx
->stats_out
, 256);
536 /* Allocate MB type table */
537 CHECKED_ALLOCZ(s
->mb_type
, mb_array_size
* sizeof(uint16_t)) //needed for encoding
539 CHECKED_ALLOCZ(s
->lambda_table
, mb_array_size
* sizeof(int))
541 CHECKED_ALLOCZ(s
->q_intra_matrix
, 64*32 * sizeof(int))
542 CHECKED_ALLOCZ(s
->q_inter_matrix
, 64*32 * sizeof(int))
543 CHECKED_ALLOCZ(s
->q_intra_matrix16
, 64*32*2 * sizeof(uint16_t))
544 CHECKED_ALLOCZ(s
->q_inter_matrix16
, 64*32*2 * sizeof(uint16_t))
545 CHECKED_ALLOCZ(s
->input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
546 CHECKED_ALLOCZ(s
->reordered_input_picture
, MAX_PICTURE_COUNT
* sizeof(Picture
*))
548 if(s
->avctx
->noise_reduction
){
549 CHECKED_ALLOCZ(s
->dct_offset
, 2 * 64 * sizeof(uint16_t))
552 CHECKED_ALLOCZ(s
->picture
, MAX_PICTURE_COUNT
* sizeof(Picture
))
554 CHECKED_ALLOCZ(s
->error_status_table
, mb_array_size
*sizeof(uint8_t))
556 if(s
->codec_id
==CODEC_ID_MPEG4
|| (s
->flags
& CODEC_FLAG_INTERLACED_ME
)){
557 /* interlaced direct mode decoding tables */
562 CHECKED_ALLOCZ(s
->b_field_mv_table_base
[i
][j
][k
] , mv_table_size
* 2 * sizeof(int16_t))
563 s
->b_field_mv_table
[i
][j
][k
] = s
->b_field_mv_table_base
[i
][j
][k
] + s
->mb_stride
+ 1;
565 CHECKED_ALLOCZ(s
->b_field_select_table
[i
][j
] , mb_array_size
* 2 * sizeof(uint8_t))
566 CHECKED_ALLOCZ(s
->p_field_mv_table_base
[i
][j
] , mv_table_size
* 2 * sizeof(int16_t))
567 s
->p_field_mv_table
[i
][j
] = s
->p_field_mv_table_base
[i
][j
] + s
->mb_stride
+ 1;
569 CHECKED_ALLOCZ(s
->p_field_select_table
[i
] , mb_array_size
* 2 * sizeof(uint8_t))
572 if (s
->out_format
== FMT_H263
) {
574 CHECKED_ALLOCZ(s
->ac_val_base
, yc_size
* sizeof(int16_t) * 16);
575 s
->ac_val
[0] = s
->ac_val_base
+ s
->b8_stride
+ 1;
576 s
->ac_val
[1] = s
->ac_val_base
+ y_size
+ s
->mb_stride
+ 1;
577 s
->ac_val
[2] = s
->ac_val
[1] + c_size
;
580 CHECKED_ALLOCZ(s
->coded_block_base
, y_size
);
581 s
->coded_block
= s
->coded_block_base
+ s
->b8_stride
+ 1;
583 /* cbp, ac_pred, pred_dir */
584 CHECKED_ALLOCZ(s
->cbp_table
, mb_array_size
* sizeof(uint8_t))
585 CHECKED_ALLOCZ(s
->pred_dir_table
, mb_array_size
* sizeof(uint8_t))
588 if (s
->h263_pred
|| s
->h263_plus
|| !s
->encoding
) {
590 //MN: we need these for error resilience of intra-frames
591 CHECKED_ALLOCZ(s
->dc_val_base
, yc_size
* sizeof(int16_t));
592 s
->dc_val
[0] = s
->dc_val_base
+ s
->b8_stride
+ 1;
593 s
->dc_val
[1] = s
->dc_val_base
+ y_size
+ s
->mb_stride
+ 1;
594 s
->dc_val
[2] = s
->dc_val
[1] + c_size
;
595 for(i
=0;i
<yc_size
;i
++)
596 s
->dc_val_base
[i
] = 1024;
599 /* which mb is a intra block */
600 CHECKED_ALLOCZ(s
->mbintra_table
, mb_array_size
);
601 memset(s
->mbintra_table
, 1, mb_array_size
);
603 /* init macroblock skip table */
604 CHECKED_ALLOCZ(s
->mbskip_table
, mb_array_size
+2);
605 //Note the +1 is for a quicker mpeg4 slice_end detection
606 CHECKED_ALLOCZ(s
->prev_pict_types
, PREV_PICT_TYPES_BUFFER_SIZE
);
608 s
->parse_context
.state
= -1;
609 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
610 s
->visualization_buffer
[0] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
611 s
->visualization_buffer
[1] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
612 s
->visualization_buffer
[2] = av_malloc((s
->mb_width
*16 + 2*EDGE_WIDTH
) * s
->mb_height
*16 + 2*EDGE_WIDTH
);
615 s
->context_initialized
= 1;
617 s
->thread_context
[0]= s
;
618 threads
= s
->avctx
->thread_count
;
620 for(i
=1; i
<threads
; i
++){
621 s
->thread_context
[i
]= av_malloc(sizeof(MpegEncContext
));
622 memcpy(s
->thread_context
[i
], s
, sizeof(MpegEncContext
));
625 for(i
=0; i
<threads
; i
++){
626 if(init_duplicate_context(s
->thread_context
[i
], s
) < 0)
628 s
->thread_context
[i
]->start_mb_y
= (s
->mb_height
*(i
) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
629 s
->thread_context
[i
]->end_mb_y
= (s
->mb_height
*(i
+1) + s
->avctx
->thread_count
/2) / s
->avctx
->thread_count
;
638 /* init common structure for both encoder and decoder */
639 void MPV_common_end(MpegEncContext
*s
)
643 for(i
=0; i
<s
->avctx
->thread_count
; i
++){
644 free_duplicate_context(s
->thread_context
[i
]);
646 for(i
=1; i
<s
->avctx
->thread_count
; i
++){
647 av_freep(&s
->thread_context
[i
]);
650 av_freep(&s
->parse_context
.buffer
);
651 s
->parse_context
.buffer_size
=0;
653 av_freep(&s
->mb_type
);
654 av_freep(&s
->p_mv_table_base
);
655 av_freep(&s
->b_forw_mv_table_base
);
656 av_freep(&s
->b_back_mv_table_base
);
657 av_freep(&s
->b_bidir_forw_mv_table_base
);
658 av_freep(&s
->b_bidir_back_mv_table_base
);
659 av_freep(&s
->b_direct_mv_table_base
);
661 s
->b_forw_mv_table
= NULL
;
662 s
->b_back_mv_table
= NULL
;
663 s
->b_bidir_forw_mv_table
= NULL
;
664 s
->b_bidir_back_mv_table
= NULL
;
665 s
->b_direct_mv_table
= NULL
;
669 av_freep(&s
->b_field_mv_table_base
[i
][j
][k
]);
670 s
->b_field_mv_table
[i
][j
][k
]=NULL
;
672 av_freep(&s
->b_field_select_table
[i
][j
]);
673 av_freep(&s
->p_field_mv_table_base
[i
][j
]);
674 s
->p_field_mv_table
[i
][j
]=NULL
;
676 av_freep(&s
->p_field_select_table
[i
]);
679 av_freep(&s
->dc_val_base
);
680 av_freep(&s
->ac_val_base
);
681 av_freep(&s
->coded_block_base
);
682 av_freep(&s
->mbintra_table
);
683 av_freep(&s
->cbp_table
);
684 av_freep(&s
->pred_dir_table
);
686 av_freep(&s
->mbskip_table
);
687 av_freep(&s
->prev_pict_types
);
688 av_freep(&s
->bitstream_buffer
);
689 s
->allocated_bitstream_buffer_size
=0;
691 av_freep(&s
->avctx
->stats_out
);
692 av_freep(&s
->ac_stats
);
693 av_freep(&s
->error_status_table
);
694 av_freep(&s
->mb_index2xy
);
695 av_freep(&s
->lambda_table
);
696 av_freep(&s
->q_intra_matrix
);
697 av_freep(&s
->q_inter_matrix
);
698 av_freep(&s
->q_intra_matrix16
);
699 av_freep(&s
->q_inter_matrix16
);
700 av_freep(&s
->input_picture
);
701 av_freep(&s
->reordered_input_picture
);
702 av_freep(&s
->dct_offset
);
705 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
706 free_picture(s
, &s
->picture
[i
]);
709 av_freep(&s
->picture
);
710 s
->context_initialized
= 0;
713 s
->current_picture_ptr
= NULL
;
714 s
->linesize
= s
->uvlinesize
= 0;
717 av_freep(&s
->visualization_buffer
[i
]);
719 avcodec_default_free_buffers(s
->avctx
);
722 void init_rl(RLTable
*rl
, uint8_t static_store
[2][2*MAX_RUN
+ MAX_LEVEL
+ 3])
724 int8_t max_level
[MAX_RUN
+1], max_run
[MAX_LEVEL
+1];
725 uint8_t index_run
[MAX_RUN
+1];
726 int last
, run
, level
, start
, end
, i
;
728 /* If table is static, we can quit if rl->max_level[0] is not NULL */
729 if(static_store
&& rl
->max_level
[0])
732 /* compute max_level[], max_run[] and index_run[] */
733 for(last
=0;last
<2;last
++) {
742 memset(max_level
, 0, MAX_RUN
+ 1);
743 memset(max_run
, 0, MAX_LEVEL
+ 1);
744 memset(index_run
, rl
->n
, MAX_RUN
+ 1);
745 for(i
=start
;i
<end
;i
++) {
746 run
= rl
->table_run
[i
];
747 level
= rl
->table_level
[i
];
748 if (index_run
[run
] == rl
->n
)
750 if (level
> max_level
[run
])
751 max_level
[run
] = level
;
752 if (run
> max_run
[level
])
753 max_run
[level
] = run
;
756 rl
->max_level
[last
] = static_store
[last
];
758 rl
->max_level
[last
] = av_malloc(MAX_RUN
+ 1);
759 memcpy(rl
->max_level
[last
], max_level
, MAX_RUN
+ 1);
761 rl
->max_run
[last
] = static_store
[last
] + MAX_RUN
+ 1;
763 rl
->max_run
[last
] = av_malloc(MAX_LEVEL
+ 1);
764 memcpy(rl
->max_run
[last
], max_run
, MAX_LEVEL
+ 1);
766 rl
->index_run
[last
] = static_store
[last
] + MAX_RUN
+ MAX_LEVEL
+ 2;
768 rl
->index_run
[last
] = av_malloc(MAX_RUN
+ 1);
769 memcpy(rl
->index_run
[last
], index_run
, MAX_RUN
+ 1);
773 void init_vlc_rl(RLTable
*rl
)
785 for(i
=0; i
<rl
->vlc
.table_size
; i
++){
786 int code
= rl
->vlc
.table
[i
][0];
787 int len
= rl
->vlc
.table
[i
][1];
790 if(len
==0){ // illegal code
793 }else if(len
<0){ //more bits needed
797 if(code
==rl
->n
){ //esc
801 run
= rl
->table_run
[code
] + 1;
802 level
= rl
->table_level
[code
] * qmul
+ qadd
;
803 if(code
>= rl
->last
) run
+=192;
806 rl
->rl_vlc
[q
][i
].len
= len
;
807 rl
->rl_vlc
[q
][i
].level
= level
;
808 rl
->rl_vlc
[q
][i
].run
= run
;
813 int ff_find_unused_picture(MpegEncContext
*s
, int shared
){
817 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
818 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
==0) return i
;
821 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
822 if(s
->picture
[i
].data
[0]==NULL
&& s
->picture
[i
].type
!=0) return i
; //FIXME
824 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
825 if(s
->picture
[i
].data
[0]==NULL
) return i
;
829 av_log(s
->avctx
, AV_LOG_FATAL
, "Internal error, picture buffer overflow\n");
830 /* We could return -1, but the codec would crash trying to draw into a
831 * non-existing frame anyway. This is safer than waiting for a random crash.
832 * Also the return of this is never useful, an encoder must only allocate
833 * as much as allowed in the specification. This has no relationship to how
834 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
835 * enough for such valid streams).
836 * Plus, a decoder has to check stream validity and remove frames if too
837 * many reference frames are around. Waiting for "OOM" is not correct at
838 * all. Similarly, missing reference frames have to be replaced by
839 * interpolated/MC frames, anything else is a bug in the codec ...
845 static void update_noise_reduction(MpegEncContext
*s
){
848 for(intra
=0; intra
<2; intra
++){
849 if(s
->dct_count
[intra
] > (1<<16)){
851 s
->dct_error_sum
[intra
][i
] >>=1;
853 s
->dct_count
[intra
] >>= 1;
857 s
->dct_offset
[intra
][i
]= (s
->avctx
->noise_reduction
* s
->dct_count
[intra
] + s
->dct_error_sum
[intra
][i
]/2) / (s
->dct_error_sum
[intra
][i
]+1);
863 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
865 int MPV_frame_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
871 assert(s
->last_picture_ptr
==NULL
|| s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
);
873 /* mark&release old frames */
874 if (s
->pict_type
!= FF_B_TYPE
&& s
->last_picture_ptr
&& s
->last_picture_ptr
!= s
->next_picture_ptr
&& s
->last_picture_ptr
->data
[0]) {
875 if(s
->out_format
!= FMT_H264
|| s
->codec_id
== CODEC_ID_SVQ3
){
876 free_frame_buffer(s
, s
->last_picture_ptr
);
878 /* release forgotten pictures */
879 /* if(mpeg124/h263) */
881 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
882 if(s
->picture
[i
].data
[0] && &s
->picture
[i
] != s
->next_picture_ptr
&& s
->picture
[i
].reference
){
883 av_log(avctx
, AV_LOG_ERROR
, "releasing zombie picture\n");
884 free_frame_buffer(s
, &s
->picture
[i
]);
892 /* release non reference frames */
893 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
894 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
895 free_frame_buffer(s
, &s
->picture
[i
]);
899 if(s
->current_picture_ptr
&& s
->current_picture_ptr
->data
[0]==NULL
)
900 pic
= (AVFrame
*)s
->current_picture_ptr
; //we already have a unused image (maybe it was set before reading the header)
902 i
= ff_find_unused_picture(s
, 0);
903 pic
= (AVFrame
*)&s
->picture
[i
];
908 if (s
->codec_id
== CODEC_ID_H264
)
909 pic
->reference
= s
->picture_structure
;
910 else if (s
->pict_type
!= FF_B_TYPE
)
914 pic
->coded_picture_number
= s
->coded_picture_number
++;
916 if( alloc_picture(s
, (Picture
*)pic
, 0) < 0)
919 s
->current_picture_ptr
= (Picture
*)pic
;
920 s
->current_picture_ptr
->top_field_first
= s
->top_field_first
; //FIXME use only the vars from current_pic
921 s
->current_picture_ptr
->interlaced_frame
= !s
->progressive_frame
&& !s
->progressive_sequence
;
924 s
->current_picture_ptr
->pict_type
= s
->pict_type
;
925 // if(s->flags && CODEC_FLAG_QSCALE)
926 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
927 s
->current_picture_ptr
->key_frame
= s
->pict_type
== FF_I_TYPE
;
929 ff_copy_picture(&s
->current_picture
, s
->current_picture_ptr
);
931 if (s
->pict_type
!= FF_B_TYPE
) {
932 s
->last_picture_ptr
= s
->next_picture_ptr
;
934 s
->next_picture_ptr
= s
->current_picture_ptr
;
936 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
937 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
938 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
939 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
940 s->pict_type, s->dropable);*/
942 if(s
->last_picture_ptr
) ff_copy_picture(&s
->last_picture
, s
->last_picture_ptr
);
943 if(s
->next_picture_ptr
) ff_copy_picture(&s
->next_picture
, s
->next_picture_ptr
);
945 if(s
->pict_type
!= FF_I_TYPE
&& (s
->last_picture_ptr
==NULL
|| s
->last_picture_ptr
->data
[0]==NULL
) && !s
->dropable
&& s
->codec_id
!= CODEC_ID_H264
){
946 av_log(avctx
, AV_LOG_ERROR
, "warning: first frame is no keyframe\n");
947 assert(s
->pict_type
!= FF_B_TYPE
); //these should have been dropped if we don't have a reference
951 assert(s
->pict_type
== FF_I_TYPE
|| (s
->last_picture_ptr
&& s
->last_picture_ptr
->data
[0]));
953 if(s
->picture_structure
!=PICT_FRAME
&& s
->out_format
!= FMT_H264
){
956 if(s
->picture_structure
== PICT_BOTTOM_FIELD
){
957 s
->current_picture
.data
[i
] += s
->current_picture
.linesize
[i
];
959 s
->current_picture
.linesize
[i
] *= 2;
960 s
->last_picture
.linesize
[i
] *=2;
961 s
->next_picture
.linesize
[i
] *=2;
965 s
->hurry_up
= s
->avctx
->hurry_up
;
966 s
->error_recognition
= avctx
->error_recognition
;
968 /* set dequantizer, we can't do it during init as it might change for mpeg4
969 and we can't do it in the header decode as init is not called for mpeg4 there yet */
970 if(s
->mpeg_quant
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
){
971 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg2_intra
;
972 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg2_inter
;
973 }else if(s
->out_format
== FMT_H263
|| s
->out_format
== FMT_H261
){
974 s
->dct_unquantize_intra
= s
->dct_unquantize_h263_intra
;
975 s
->dct_unquantize_inter
= s
->dct_unquantize_h263_inter
;
977 s
->dct_unquantize_intra
= s
->dct_unquantize_mpeg1_intra
;
978 s
->dct_unquantize_inter
= s
->dct_unquantize_mpeg1_inter
;
981 if(s
->dct_error_sum
){
982 assert(s
->avctx
->noise_reduction
&& s
->encoding
);
984 update_noise_reduction(s
);
987 if(CONFIG_MPEG_XVMC_DECODER
&& s
->avctx
->xvmc_acceleration
)
988 return ff_xvmc_field_start(s
, avctx
);
993 /* generic function for encode/decode called after a frame has been coded/decoded */
994 void MPV_frame_end(MpegEncContext
*s
)
997 /* draw edge for correct motion prediction if outside */
998 //just to make sure that all data is rendered.
999 if(CONFIG_MPEG_XVMC_DECODER
&& s
->avctx
->xvmc_acceleration
){
1000 ff_xvmc_field_end(s
);
1001 }else if(!s
->avctx
->hwaccel
1002 && !(s
->avctx
->codec
->capabilities
&CODEC_CAP_HWACCEL_VDPAU
)
1003 && s
->unrestricted_mv
1004 && s
->current_picture
.reference
1006 && !(s
->flags
&CODEC_FLAG_EMU_EDGE
)) {
1007 s
->dsp
.draw_edges(s
->current_picture
.data
[0], s
->linesize
, s
->h_edge_pos
, s
->v_edge_pos
, EDGE_WIDTH
);
1008 s
->dsp
.draw_edges(s
->current_picture
.data
[1], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1009 s
->dsp
.draw_edges(s
->current_picture
.data
[2], s
->uvlinesize
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1, EDGE_WIDTH
/2);
1013 s
->last_pict_type
= s
->pict_type
;
1014 s
->last_lambda_for
[s
->pict_type
]= s
->current_picture_ptr
->quality
;
1015 if(s
->pict_type
!=FF_B_TYPE
){
1016 s
->last_non_b_pict_type
= s
->pict_type
;
1019 /* copy back current_picture variables */
1020 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1021 if(s
->picture
[i
].data
[0] == s
->current_picture
.data
[0]){
1022 s
->picture
[i
]= s
->current_picture
;
1026 assert(i
<MAX_PICTURE_COUNT
);
1030 /* release non-reference frames */
1031 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
1032 if(s
->picture
[i
].data
[0] && !s
->picture
[i
].reference
/*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1033 free_frame_buffer(s
, &s
->picture
[i
]);
1037 // clear copies, to avoid confusion
1039 memset(&s
->last_picture
, 0, sizeof(Picture
));
1040 memset(&s
->next_picture
, 0, sizeof(Picture
));
1041 memset(&s
->current_picture
, 0, sizeof(Picture
));
1043 s
->avctx
->coded_frame
= (AVFrame
*)s
->current_picture_ptr
;
1047 * draws an line from (ex, ey) -> (sx, sy).
1048 * @param w width of the image
1049 * @param h height of the image
1050 * @param stride stride/linesize of the image
1051 * @param color color of the arrow
1053 static void draw_line(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1056 sx
= av_clip(sx
, 0, w
-1);
1057 sy
= av_clip(sy
, 0, h
-1);
1058 ex
= av_clip(ex
, 0, w
-1);
1059 ey
= av_clip(ey
, 0, h
-1);
1061 buf
[sy
*stride
+ sx
]+= color
;
1063 if(FFABS(ex
- sx
) > FFABS(ey
- sy
)){
1065 FFSWAP(int, sx
, ex
);
1066 FFSWAP(int, sy
, ey
);
1068 buf
+= sx
+ sy
*stride
;
1070 f
= ((ey
-sy
)<<16)/ex
;
1071 for(x
= 0; x
<= ex
; x
++){
1074 buf
[ y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;
1075 buf
[(y
+1)*stride
+ x
]+= (color
* fr
)>>16;
1079 FFSWAP(int, sx
, ex
);
1080 FFSWAP(int, sy
, ey
);
1082 buf
+= sx
+ sy
*stride
;
1084 if(ey
) f
= ((ex
-sx
)<<16)/ey
;
1086 for(y
= 0; y
<= ey
; y
++){
1089 buf
[y
*stride
+ x
]+= (color
*(0x10000-fr
))>>16;
1090 buf
[y
*stride
+ x
+1]+= (color
* fr
)>>16;
1096 * draws an arrow from (ex, ey) -> (sx, sy).
1097 * @param w width of the image
1098 * @param h height of the image
1099 * @param stride stride/linesize of the image
1100 * @param color color of the arrow
1102 static void draw_arrow(uint8_t *buf
, int sx
, int sy
, int ex
, int ey
, int w
, int h
, int stride
, int color
){
1105 sx
= av_clip(sx
, -100, w
+100);
1106 sy
= av_clip(sy
, -100, h
+100);
1107 ex
= av_clip(ex
, -100, w
+100);
1108 ey
= av_clip(ey
, -100, h
+100);
1113 if(dx
*dx
+ dy
*dy
> 3*3){
1116 int length
= ff_sqrt((rx
*rx
+ ry
*ry
)<<8);
1118 //FIXME subpixel accuracy
1119 rx
= ROUNDED_DIV(rx
*3<<4, length
);
1120 ry
= ROUNDED_DIV(ry
*3<<4, length
);
1122 draw_line(buf
, sx
, sy
, sx
+ rx
, sy
+ ry
, w
, h
, stride
, color
);
1123 draw_line(buf
, sx
, sy
, sx
- ry
, sy
+ rx
, w
, h
, stride
, color
);
1125 draw_line(buf
, sx
, sy
, ex
, ey
, w
, h
, stride
, color
);
1129 * prints debuging info for the given picture.
1131 void ff_print_debug_info(MpegEncContext
*s
, AVFrame
*pict
){
1133 if(s
->avctx
->hwaccel
|| !pict
|| !pict
->mb_type
) return;
1135 if(s
->avctx
->debug
&(FF_DEBUG_SKIP
| FF_DEBUG_QP
| FF_DEBUG_MB_TYPE
)){
1138 av_log(s
->avctx
,AV_LOG_DEBUG
,"New frame, type: ");
1139 switch (pict
->pict_type
) {
1140 case FF_I_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"I\n"); break;
1141 case FF_P_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"P\n"); break;
1142 case FF_B_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"B\n"); break;
1143 case FF_S_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"S\n"); break;
1144 case FF_SI_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SI\n"); break;
1145 case FF_SP_TYPE
: av_log(s
->avctx
,AV_LOG_DEBUG
,"SP\n"); break;
1147 for(y
=0; y
<s
->mb_height
; y
++){
1148 for(x
=0; x
<s
->mb_width
; x
++){
1149 if(s
->avctx
->debug
&FF_DEBUG_SKIP
){
1150 int count
= s
->mbskip_table
[x
+ y
*s
->mb_stride
];
1151 if(count
>9) count
=9;
1152 av_log(s
->avctx
, AV_LOG_DEBUG
, "%1d", count
);
1154 if(s
->avctx
->debug
&FF_DEBUG_QP
){
1155 av_log(s
->avctx
, AV_LOG_DEBUG
, "%2d", pict
->qscale_table
[x
+ y
*s
->mb_stride
]);
1157 if(s
->avctx
->debug
&FF_DEBUG_MB_TYPE
){
1158 int mb_type
= pict
->mb_type
[x
+ y
*s
->mb_stride
];
1159 //Type & MV direction
1161 av_log(s
->avctx
, AV_LOG_DEBUG
, "P");
1162 else if(IS_INTRA(mb_type
) && IS_ACPRED(mb_type
))
1163 av_log(s
->avctx
, AV_LOG_DEBUG
, "A");
1164 else if(IS_INTRA4x4(mb_type
))
1165 av_log(s
->avctx
, AV_LOG_DEBUG
, "i");
1166 else if(IS_INTRA16x16(mb_type
))
1167 av_log(s
->avctx
, AV_LOG_DEBUG
, "I");
1168 else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
))
1169 av_log(s
->avctx
, AV_LOG_DEBUG
, "d");
1170 else if(IS_DIRECT(mb_type
))
1171 av_log(s
->avctx
, AV_LOG_DEBUG
, "D");
1172 else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
))
1173 av_log(s
->avctx
, AV_LOG_DEBUG
, "g");
1174 else if(IS_GMC(mb_type
))
1175 av_log(s
->avctx
, AV_LOG_DEBUG
, "G");
1176 else if(IS_SKIP(mb_type
))
1177 av_log(s
->avctx
, AV_LOG_DEBUG
, "S");
1178 else if(!USES_LIST(mb_type
, 1))
1179 av_log(s
->avctx
, AV_LOG_DEBUG
, ">");
1180 else if(!USES_LIST(mb_type
, 0))
1181 av_log(s
->avctx
, AV_LOG_DEBUG
, "<");
1183 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1184 av_log(s
->avctx
, AV_LOG_DEBUG
, "X");
1189 av_log(s
->avctx
, AV_LOG_DEBUG
, "+");
1190 else if(IS_16X8(mb_type
))
1191 av_log(s
->avctx
, AV_LOG_DEBUG
, "-");
1192 else if(IS_8X16(mb_type
))
1193 av_log(s
->avctx
, AV_LOG_DEBUG
, "|");
1194 else if(IS_INTRA(mb_type
) || IS_16X16(mb_type
))
1195 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1197 av_log(s
->avctx
, AV_LOG_DEBUG
, "?");
1200 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
)
1201 av_log(s
->avctx
, AV_LOG_DEBUG
, "=");
1203 av_log(s
->avctx
, AV_LOG_DEBUG
, " ");
1205 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1207 av_log(s
->avctx
, AV_LOG_DEBUG
, "\n");
1211 if((s
->avctx
->debug
&(FF_DEBUG_VIS_QP
|FF_DEBUG_VIS_MB_TYPE
)) || (s
->avctx
->debug_mv
)){
1212 const int shift
= 1 + s
->quarter_sample
;
1216 int h_chroma_shift
, v_chroma_shift
, block_height
;
1217 const int width
= s
->avctx
->width
;
1218 const int height
= s
->avctx
->height
;
1219 const int mv_sample_log2
= 4 - pict
->motion_subsample_log2
;
1220 const int mv_stride
= (s
->mb_width
<< mv_sample_log2
) + (s
->codec_id
== CODEC_ID_H264
? 0 : 1);
1221 s
->low_delay
=0; //needed to see the vectors without trashing the buffers
1223 avcodec_get_chroma_sub_sample(s
->avctx
->pix_fmt
, &h_chroma_shift
, &v_chroma_shift
);
1225 memcpy(s
->visualization_buffer
[i
], pict
->data
[i
], (i
==0) ? pict
->linesize
[i
]*height
:pict
->linesize
[i
]*height
>> v_chroma_shift
);
1226 pict
->data
[i
]= s
->visualization_buffer
[i
];
1228 pict
->type
= FF_BUFFER_TYPE_COPY
;
1230 block_height
= 16>>v_chroma_shift
;
1232 for(mb_y
=0; mb_y
<s
->mb_height
; mb_y
++){
1234 for(mb_x
=0; mb_x
<s
->mb_width
; mb_x
++){
1235 const int mb_index
= mb_x
+ mb_y
*s
->mb_stride
;
1236 if((s
->avctx
->debug_mv
) && pict
->motion_val
){
1238 for(type
=0; type
<3; type
++){
1241 case 0: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_P_FOR
)) || (pict
->pict_type
!=FF_P_TYPE
))
1245 case 1: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_FOR
)) || (pict
->pict_type
!=FF_B_TYPE
))
1249 case 2: if ((!(s
->avctx
->debug_mv
&FF_DEBUG_VIS_MV_B_BACK
)) || (pict
->pict_type
!=FF_B_TYPE
))
1254 if(!USES_LIST(pict
->mb_type
[mb_index
], direction
))
1257 if(IS_8X8(pict
->mb_type
[mb_index
])){
1260 int sx
= mb_x
*16 + 4 + 8*(i
&1);
1261 int sy
= mb_y
*16 + 4 + 8*(i
>>1);
1262 int xy
= (mb_x
*2 + (i
&1) + (mb_y
*2 + (i
>>1))*mv_stride
) << (mv_sample_log2
-1);
1263 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1264 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1265 draw_arrow(ptr
, sx
, sy
, mx
, my
, width
, height
, s
->linesize
, 100);
1267 }else if(IS_16X8(pict
->mb_type
[mb_index
])){
1271 int sy
=mb_y
*16 + 4 + 8*i
;
1272 int xy
= (mb_x
*2 + (mb_y
*2 + i
)*mv_stride
) << (mv_sample_log2
-1);
1273 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
);
1274 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
);
1276 if(IS_INTERLACED(pict
->mb_type
[mb_index
]))
1279 draw_arrow(ptr
, sx
, sy
, mx
+sx
, my
+sy
, width
, height
, s
->linesize
, 100);
1281 }else if(IS_8X16(pict
->mb_type
[mb_index
])){
1284 int sx
=mb_x
*16 + 4 + 8*i
;
1286 int xy
= (mb_x
*2 + i
+ mb_y
*2*mv_stride
) << (mv_sample_log2
-1);
1287 int mx
=(pict
->motion_val
[direction
][xy
][0]>>shift
);
1288 int my
=(pict
->motion_val
[direction
][xy
][1]>>shift
);
1290 if(IS_INTERLACED(pict
->mb_type
[mb_index
]))
1293 draw_arrow(ptr
, sx
, sy
, mx
+sx
, my
+sy
, width
, height
, s
->linesize
, 100);
1296 int sx
= mb_x
*16 + 8;
1297 int sy
= mb_y
*16 + 8;
1298 int xy
= (mb_x
+ mb_y
*mv_stride
) << mv_sample_log2
;
1299 int mx
= (pict
->motion_val
[direction
][xy
][0]>>shift
) + sx
;
1300 int my
= (pict
->motion_val
[direction
][xy
][1]>>shift
) + sy
;
1301 draw_arrow(ptr
, sx
, sy
, mx
, my
, width
, height
, s
->linesize
, 100);
1305 if((s
->avctx
->debug
&FF_DEBUG_VIS_QP
) && pict
->motion_val
){
1306 uint64_t c
= (pict
->qscale_table
[mb_index
]*128/31) * 0x0101010101010101ULL
;
1308 for(y
=0; y
<block_height
; y
++){
1309 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (block_height
*mb_y
+ y
)*pict
->linesize
[1])= c
;
1310 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (block_height
*mb_y
+ y
)*pict
->linesize
[2])= c
;
1313 if((s
->avctx
->debug
&FF_DEBUG_VIS_MB_TYPE
) && pict
->motion_val
){
1314 int mb_type
= pict
->mb_type
[mb_index
];
1317 #define COLOR(theta, r)\
1318 u= (int)(128 + r*cos(theta*3.141592/180));\
1319 v= (int)(128 + r*sin(theta*3.141592/180));
1323 if(IS_PCM(mb_type
)){
1325 }else if((IS_INTRA(mb_type
) && IS_ACPRED(mb_type
)) || IS_INTRA16x16(mb_type
)){
1327 }else if(IS_INTRA4x4(mb_type
)){
1329 }else if(IS_DIRECT(mb_type
) && IS_SKIP(mb_type
)){
1331 }else if(IS_DIRECT(mb_type
)){
1333 }else if(IS_GMC(mb_type
) && IS_SKIP(mb_type
)){
1335 }else if(IS_GMC(mb_type
)){
1337 }else if(IS_SKIP(mb_type
)){
1339 }else if(!USES_LIST(mb_type
, 1)){
1341 }else if(!USES_LIST(mb_type
, 0)){
1344 assert(USES_LIST(mb_type
, 0) && USES_LIST(mb_type
, 1));
1348 u
*= 0x0101010101010101ULL
;
1349 v
*= 0x0101010101010101ULL
;
1350 for(y
=0; y
<block_height
; y
++){
1351 *(uint64_t*)(pict
->data
[1] + 8*mb_x
+ (block_height
*mb_y
+ y
)*pict
->linesize
[1])= u
;
1352 *(uint64_t*)(pict
->data
[2] + 8*mb_x
+ (block_height
*mb_y
+ y
)*pict
->linesize
[2])= v
;
1356 if(IS_8X8(mb_type
) || IS_16X8(mb_type
)){
1357 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 0 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1358 *(uint64_t*)(pict
->data
[0] + 16*mb_x
+ 8 + (16*mb_y
+ 8)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1360 if(IS_8X8(mb_type
) || IS_8X16(mb_type
)){
1362 pict
->data
[0][16*mb_x
+ 8 + (16*mb_y
+ y
)*pict
->linesize
[0]]^= 0x80;
1364 if(IS_8X8(mb_type
) && mv_sample_log2
>= 2){
1365 int dm
= 1 << (mv_sample_log2
-2);
1367 int sx
= mb_x
*16 + 8*(i
&1);
1368 int sy
= mb_y
*16 + 8*(i
>>1);
1369 int xy
= (mb_x
*2 + (i
&1) + (mb_y
*2 + (i
>>1))*mv_stride
) << (mv_sample_log2
-1);
1371 int32_t *mv
= (int32_t*)&pict
->motion_val
[0][xy
];
1372 if(mv
[0] != mv
[dm
] || mv
[dm
*mv_stride
] != mv
[dm
*(mv_stride
+1)])
1374 pict
->data
[0][sx
+ 4 + (sy
+ y
)*pict
->linesize
[0]]^= 0x80;
1375 if(mv
[0] != mv
[dm
*mv_stride
] || mv
[dm
] != mv
[dm
*(mv_stride
+1)])
1376 *(uint64_t*)(pict
->data
[0] + sx
+ (sy
+ 4)*pict
->linesize
[0])^= 0x8080808080808080ULL
;
1380 if(IS_INTERLACED(mb_type
) && s
->codec_id
== CODEC_ID_H264
){
1384 s
->mbskip_table
[mb_index
]=0;
1390 static inline int hpel_motion_lowres(MpegEncContext
*s
,
1391 uint8_t *dest
, uint8_t *src
,
1392 int field_based
, int field_select
,
1393 int src_x
, int src_y
,
1394 int width
, int height
, int stride
,
1395 int h_edge_pos
, int v_edge_pos
,
1396 int w
, int h
, h264_chroma_mc_func
*pix_op
,
1397 int motion_x
, int motion_y
)
1399 const int lowres
= s
->avctx
->lowres
;
1400 const int s_mask
= (2<<lowres
)-1;
1404 if(s
->quarter_sample
){
1409 sx
= motion_x
& s_mask
;
1410 sy
= motion_y
& s_mask
;
1411 src_x
+= motion_x
>> (lowres
+1);
1412 src_y
+= motion_y
>> (lowres
+1);
1414 src
+= src_y
* stride
+ src_x
;
1416 if( (unsigned)src_x
> h_edge_pos
- (!!sx
) - w
1417 || (unsigned)src_y
>(v_edge_pos
>> field_based
) - (!!sy
) - h
){
1418 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
1419 src_x
, src_y
<<field_based
, h_edge_pos
, v_edge_pos
);
1420 src
= s
->edge_emu_buffer
;
1428 pix_op
[lowres
](dest
, src
, stride
, h
, sx
, sy
);
1432 /* apply one mpeg motion vector to the three components */
1433 static av_always_inline
void mpeg_motion_lowres(MpegEncContext
*s
,
1434 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
1435 int field_based
, int bottom_field
, int field_select
,
1436 uint8_t **ref_picture
, h264_chroma_mc_func
*pix_op
,
1437 int motion_x
, int motion_y
, int h
)
1439 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
1440 int mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, uvlinesize
, linesize
, sx
, sy
, uvsx
, uvsy
;
1441 const int lowres
= s
->avctx
->lowres
;
1442 const int block_s
= 8>>lowres
;
1443 const int s_mask
= (2<<lowres
)-1;
1444 const int h_edge_pos
= s
->h_edge_pos
>> lowres
;
1445 const int v_edge_pos
= s
->v_edge_pos
>> lowres
;
1446 linesize
= s
->current_picture
.linesize
[0] << field_based
;
1447 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
1449 if(s
->quarter_sample
){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1455 motion_y
+= (bottom_field
- field_select
)*((1<<lowres
)-1);
1458 sx
= motion_x
& s_mask
;
1459 sy
= motion_y
& s_mask
;
1460 src_x
= s
->mb_x
*2*block_s
+ (motion_x
>> (lowres
+1));
1461 src_y
=(s
->mb_y
*2*block_s
>>field_based
) + (motion_y
>> (lowres
+1));
1463 if (s
->out_format
== FMT_H263
) {
1464 uvsx
= ((motion_x
>>1) & s_mask
) | (sx
&1);
1465 uvsy
= ((motion_y
>>1) & s_mask
) | (sy
&1);
1468 }else if(s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
1471 uvsx
= (2*mx
) & s_mask
;
1472 uvsy
= (2*my
) & s_mask
;
1473 uvsrc_x
= s
->mb_x
*block_s
+ (mx
>> lowres
);
1474 uvsrc_y
= s
->mb_y
*block_s
+ (my
>> lowres
);
1480 uvsrc_x
= s
->mb_x
*block_s
+ (mx
>> (lowres
+1));
1481 uvsrc_y
=(s
->mb_y
*block_s
>>field_based
) + (my
>> (lowres
+1));
1484 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
1485 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
1486 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
1488 if( (unsigned)src_x
> h_edge_pos
- (!!sx
) - 2*block_s
1489 || (unsigned)src_y
>(v_edge_pos
>> field_based
) - (!!sy
) - h
){
1490 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
1491 src_x
, src_y
<<field_based
, h_edge_pos
, v_edge_pos
);
1492 ptr_y
= s
->edge_emu_buffer
;
1493 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1494 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
1495 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9+field_based
,
1496 uvsrc_x
, uvsrc_y
<<field_based
, h_edge_pos
>>1, v_edge_pos
>>1);
1497 ff_emulated_edge_mc(uvbuf
+16, ptr_cr
, s
->uvlinesize
, 9, 9+field_based
,
1498 uvsrc_x
, uvsrc_y
<<field_based
, h_edge_pos
>>1, v_edge_pos
>>1);
1504 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1505 dest_y
+= s
->linesize
;
1506 dest_cb
+= s
->uvlinesize
;
1507 dest_cr
+= s
->uvlinesize
;
1511 ptr_y
+= s
->linesize
;
1512 ptr_cb
+= s
->uvlinesize
;
1513 ptr_cr
+= s
->uvlinesize
;
1518 pix_op
[lowres
-1](dest_y
, ptr_y
, linesize
, h
, sx
, sy
);
1520 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1521 uvsx
<<= 2 - lowres
;
1522 uvsy
<<= 2 - lowres
;
1523 pix_op
[lowres
](dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
, uvsx
, uvsy
);
1524 pix_op
[lowres
](dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
, uvsx
, uvsy
);
1526 //FIXME h261 lowres loop filter
1529 static inline void chroma_4mv_motion_lowres(MpegEncContext
*s
,
1530 uint8_t *dest_cb
, uint8_t *dest_cr
,
1531 uint8_t **ref_picture
,
1532 h264_chroma_mc_func
*pix_op
,
1534 const int lowres
= s
->avctx
->lowres
;
1535 const int block_s
= 8>>lowres
;
1536 const int s_mask
= (2<<lowres
)-1;
1537 const int h_edge_pos
= s
->h_edge_pos
>> (lowres
+1);
1538 const int v_edge_pos
= s
->v_edge_pos
>> (lowres
+1);
1539 int emu
=0, src_x
, src_y
, offset
, sx
, sy
;
1542 if(s
->quarter_sample
){
1547 /* In case of 8X8, we construct a single chroma motion vector
1548 with a special rounding */
1549 mx
= ff_h263_round_chroma(mx
);
1550 my
= ff_h263_round_chroma(my
);
1554 src_x
= s
->mb_x
*block_s
+ (mx
>> (lowres
+1));
1555 src_y
= s
->mb_y
*block_s
+ (my
>> (lowres
+1));
1557 offset
= src_y
* s
->uvlinesize
+ src_x
;
1558 ptr
= ref_picture
[1] + offset
;
1559 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
1560 if( (unsigned)src_x
> h_edge_pos
- (!!sx
) - block_s
1561 || (unsigned)src_y
> v_edge_pos
- (!!sy
) - block_s
){
1562 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, h_edge_pos
, v_edge_pos
);
1563 ptr
= s
->edge_emu_buffer
;
1569 pix_op
[lowres
](dest_cb
, ptr
, s
->uvlinesize
, block_s
, sx
, sy
);
1571 ptr
= ref_picture
[2] + offset
;
1573 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, h_edge_pos
, v_edge_pos
);
1574 ptr
= s
->edge_emu_buffer
;
1576 pix_op
[lowres
](dest_cr
, ptr
, s
->uvlinesize
, block_s
, sx
, sy
);
1580 * motion compensation of a single macroblock
1582 * @param dest_y luma destination pointer
1583 * @param dest_cb chroma cb/u destination pointer
1584 * @param dest_cr chroma cr/v destination pointer
1585 * @param dir direction (0->forward, 1->backward)
1586 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1587 * @param pic_op halfpel motion compensation function (average or put normally)
1588 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1590 static inline void MPV_motion_lowres(MpegEncContext
*s
,
1591 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
1592 int dir
, uint8_t **ref_picture
,
1593 h264_chroma_mc_func
*pix_op
)
1597 const int lowres
= s
->avctx
->lowres
;
1598 const int block_s
= 8>>lowres
;
1603 switch(s
->mv_type
) {
1605 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1607 ref_picture
, pix_op
,
1608 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 2*block_s
);
1614 hpel_motion_lowres(s
, dest_y
+ ((i
& 1) + (i
>> 1) * s
->linesize
)*block_s
,
1615 ref_picture
[0], 0, 0,
1616 (2*mb_x
+ (i
& 1))*block_s
, (2*mb_y
+ (i
>>1))*block_s
,
1617 s
->width
, s
->height
, s
->linesize
,
1618 s
->h_edge_pos
>> lowres
, s
->v_edge_pos
>> lowres
,
1619 block_s
, block_s
, pix_op
,
1620 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
1622 mx
+= s
->mv
[dir
][i
][0];
1623 my
+= s
->mv
[dir
][i
][1];
1626 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
1627 chroma_4mv_motion_lowres(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
, mx
, my
);
1630 if (s
->picture_structure
== PICT_FRAME
) {
1632 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1633 1, 0, s
->field_select
[dir
][0],
1634 ref_picture
, pix_op
,
1635 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], block_s
);
1637 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1638 1, 1, s
->field_select
[dir
][1],
1639 ref_picture
, pix_op
,
1640 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], block_s
);
1642 if(s
->picture_structure
!= s
->field_select
[dir
][0] + 1 && s
->pict_type
!= FF_B_TYPE
&& !s
->first_field
){
1643 ref_picture
= s
->current_picture_ptr
->data
;
1646 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1647 0, 0, s
->field_select
[dir
][0],
1648 ref_picture
, pix_op
,
1649 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 2*block_s
);
1654 uint8_t ** ref2picture
;
1656 if(s
->picture_structure
== s
->field_select
[dir
][i
] + 1 || s
->pict_type
== FF_B_TYPE
|| s
->first_field
){
1657 ref2picture
= ref_picture
;
1659 ref2picture
= s
->current_picture_ptr
->data
;
1662 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1663 0, 0, s
->field_select
[dir
][i
],
1664 ref2picture
, pix_op
,
1665 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1] + 2*block_s
*i
, block_s
);
1667 dest_y
+= 2*block_s
*s
->linesize
;
1668 dest_cb
+= (2*block_s
>>s
->chroma_y_shift
)*s
->uvlinesize
;
1669 dest_cr
+= (2*block_s
>>s
->chroma_y_shift
)*s
->uvlinesize
;
1673 if(s
->picture_structure
== PICT_FRAME
){
1677 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1679 ref_picture
, pix_op
,
1680 s
->mv
[dir
][2*i
+ j
][0], s
->mv
[dir
][2*i
+ j
][1], block_s
);
1682 pix_op
= s
->dsp
.avg_h264_chroma_pixels_tab
;
1686 mpeg_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
,
1687 0, 0, s
->picture_structure
!= i
+1,
1688 ref_picture
, pix_op
,
1689 s
->mv
[dir
][2*i
][0],s
->mv
[dir
][2*i
][1],2*block_s
);
1691 // after put we make avg of the same block
1692 pix_op
= s
->dsp
.avg_h264_chroma_pixels_tab
;
1694 //opposite parity is always in the same frame if this is second field
1695 if(!s
->first_field
){
1696 ref_picture
= s
->current_picture_ptr
->data
;
1705 /* put block[] to dest[] */
1706 static inline void put_dct(MpegEncContext
*s
,
1707 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
, int qscale
)
1709 s
->dct_unquantize_intra(s
, block
, i
, qscale
);
1710 s
->dsp
.idct_put (dest
, line_size
, block
);
1713 /* add block[] to dest[] */
1714 static inline void add_dct(MpegEncContext
*s
,
1715 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
)
1717 if (s
->block_last_index
[i
] >= 0) {
1718 s
->dsp
.idct_add (dest
, line_size
, block
);
1722 static inline void add_dequant_dct(MpegEncContext
*s
,
1723 DCTELEM
*block
, int i
, uint8_t *dest
, int line_size
, int qscale
)
1725 if (s
->block_last_index
[i
] >= 0) {
1726 s
->dct_unquantize_inter(s
, block
, i
, qscale
);
1728 s
->dsp
.idct_add (dest
, line_size
, block
);
1733 * cleans dc, ac, coded_block for the current non intra MB
1735 void ff_clean_intra_table_entries(MpegEncContext
*s
)
1737 int wrap
= s
->b8_stride
;
1738 int xy
= s
->block_index
[0];
1741 s
->dc_val
[0][xy
+ 1 ] =
1742 s
->dc_val
[0][xy
+ wrap
] =
1743 s
->dc_val
[0][xy
+ 1 + wrap
] = 1024;
1745 memset(s
->ac_val
[0][xy
], 0, 32 * sizeof(int16_t));
1746 memset(s
->ac_val
[0][xy
+ wrap
], 0, 32 * sizeof(int16_t));
1747 if (s
->msmpeg4_version
>=3) {
1748 s
->coded_block
[xy
] =
1749 s
->coded_block
[xy
+ 1 ] =
1750 s
->coded_block
[xy
+ wrap
] =
1751 s
->coded_block
[xy
+ 1 + wrap
] = 0;
1754 wrap
= s
->mb_stride
;
1755 xy
= s
->mb_x
+ s
->mb_y
* wrap
;
1757 s
->dc_val
[2][xy
] = 1024;
1759 memset(s
->ac_val
[1][xy
], 0, 16 * sizeof(int16_t));
1760 memset(s
->ac_val
[2][xy
], 0, 16 * sizeof(int16_t));
1762 s
->mbintra_table
[xy
]= 0;
1765 /* generic function called after a macroblock has been parsed by the
1766 decoder or after it has been encoded by the encoder.
1768 Important variables used:
1769 s->mb_intra : true if intra macroblock
1770 s->mv_dir : motion vector direction
1771 s->mv_type : motion vector type
1772 s->mv : motion vector
1773 s->interlaced_dct : true if interlaced dct used (mpeg2)
1775 static av_always_inline
1776 void MPV_decode_mb_internal(MpegEncContext
*s
, DCTELEM block
[12][64],
1777 int lowres_flag
, int is_mpeg12
)
1780 const int mb_xy
= s
->mb_y
* s
->mb_stride
+ s
->mb_x
;
1781 if(CONFIG_MPEG_XVMC_DECODER
&& s
->avctx
->xvmc_acceleration
){
1782 ff_xvmc_decode_mb(s
);//xvmc uses pblocks
1789 if(s
->avctx
->debug
&FF_DEBUG_DCT_COEFF
) {
1790 /* save DCT coefficients */
1792 DCTELEM
*dct
= &s
->current_picture
.dct_coeff
[mb_xy
*64*6];
1795 *dct
++ = block
[i
][s
->dsp
.idct_permutation
[j
]];
1798 s
->current_picture
.qscale_table
[mb_xy
]= s
->qscale
;
1800 /* update DC predictors for P macroblocks */
1802 if (!is_mpeg12
&& (s
->h263_pred
|| s
->h263_aic
)) {
1803 if(s
->mbintra_table
[mb_xy
])
1804 ff_clean_intra_table_entries(s
);
1808 s
->last_dc
[2] = 128 << s
->intra_dc_precision
;
1811 else if (!is_mpeg12
&& (s
->h263_pred
|| s
->h263_aic
))
1812 s
->mbintra_table
[mb_xy
]=1;
1814 if ((s
->flags
&CODEC_FLAG_PSNR
) || !(s
->encoding
&& (s
->intra_only
|| s
->pict_type
==FF_B_TYPE
) && s
->avctx
->mb_decision
!= FF_MB_DECISION_RD
)) { //FIXME precalc
1815 uint8_t *dest_y
, *dest_cb
, *dest_cr
;
1816 int dct_linesize
, dct_offset
;
1817 op_pixels_func (*op_pix
)[4];
1818 qpel_mc_func (*op_qpix
)[16];
1819 const int linesize
= s
->current_picture
.linesize
[0]; //not s->linesize as this would be wrong for field pics
1820 const int uvlinesize
= s
->current_picture
.linesize
[1];
1821 const int readable
= s
->pict_type
!= FF_B_TYPE
|| s
->encoding
|| s
->avctx
->draw_horiz_band
|| lowres_flag
;
1822 const int block_size
= lowres_flag
? 8>>s
->avctx
->lowres
: 8;
1824 /* avoid copy if macroblock skipped in last frame too */
1825 /* skip only during decoding as we might trash the buffers during encoding a bit */
1827 uint8_t *mbskip_ptr
= &s
->mbskip_table
[mb_xy
];
1828 const int age
= s
->current_picture
.age
;
1832 if (s
->mb_skipped
) {
1834 assert(s
->pict_type
!=FF_I_TYPE
);
1836 (*mbskip_ptr
) ++; /* indicate that this time we skipped it */
1837 if(*mbskip_ptr
>99) *mbskip_ptr
= 99;
1839 /* if previous was skipped too, then nothing to do ! */
1840 if (*mbskip_ptr
>= age
&& s
->current_picture
.reference
){
1843 } else if(!s
->current_picture
.reference
){
1844 (*mbskip_ptr
) ++; /* increase counter so the age can be compared cleanly */
1845 if(*mbskip_ptr
>99) *mbskip_ptr
= 99;
1847 *mbskip_ptr
= 0; /* not skipped */
1851 dct_linesize
= linesize
<< s
->interlaced_dct
;
1852 dct_offset
=(s
->interlaced_dct
)? linesize
: linesize
*block_size
;
1856 dest_cb
= s
->dest
[1];
1857 dest_cr
= s
->dest
[2];
1859 dest_y
= s
->b_scratchpad
;
1860 dest_cb
= s
->b_scratchpad
+16*linesize
;
1861 dest_cr
= s
->b_scratchpad
+32*linesize
;
1865 /* motion handling */
1866 /* decoding or more than one mb_type (MC was already done otherwise) */
1869 h264_chroma_mc_func
*op_pix
= s
->dsp
.put_h264_chroma_pixels_tab
;
1871 if (s
->mv_dir
& MV_DIR_FORWARD
) {
1872 MPV_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
, 0, s
->last_picture
.data
, op_pix
);
1873 op_pix
= s
->dsp
.avg_h264_chroma_pixels_tab
;
1875 if (s
->mv_dir
& MV_DIR_BACKWARD
) {
1876 MPV_motion_lowres(s
, dest_y
, dest_cb
, dest_cr
, 1, s
->next_picture
.data
, op_pix
);
1879 op_qpix
= s
->me
.qpel_put
;
1880 if ((!s
->no_rounding
) || s
->pict_type
==FF_B_TYPE
){
1881 op_pix
= s
->dsp
.put_pixels_tab
;
1883 op_pix
= s
->dsp
.put_no_rnd_pixels_tab
;
1885 if (s
->mv_dir
& MV_DIR_FORWARD
) {
1886 MPV_motion(s
, dest_y
, dest_cb
, dest_cr
, 0, s
->last_picture
.data
, op_pix
, op_qpix
);
1887 op_pix
= s
->dsp
.avg_pixels_tab
;
1888 op_qpix
= s
->me
.qpel_avg
;
1890 if (s
->mv_dir
& MV_DIR_BACKWARD
) {
1891 MPV_motion(s
, dest_y
, dest_cb
, dest_cr
, 1, s
->next_picture
.data
, op_pix
, op_qpix
);
1896 /* skip dequant / idct if we are really late ;) */
1897 if(s
->hurry_up
>1) goto skip_idct
;
1898 if(s
->avctx
->skip_idct
){
1899 if( (s
->avctx
->skip_idct
>= AVDISCARD_NONREF
&& s
->pict_type
== FF_B_TYPE
)
1900 ||(s
->avctx
->skip_idct
>= AVDISCARD_NONKEY
&& s
->pict_type
!= FF_I_TYPE
)
1901 || s
->avctx
->skip_idct
>= AVDISCARD_ALL
)
1905 /* add dct residue */
1906 if(s
->encoding
|| !( s
->h263_msmpeg4
|| s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
1907 || (s
->codec_id
==CODEC_ID_MPEG4
&& !s
->mpeg_quant
))){
1908 add_dequant_dct(s
, block
[0], 0, dest_y
, dct_linesize
, s
->qscale
);
1909 add_dequant_dct(s
, block
[1], 1, dest_y
+ block_size
, dct_linesize
, s
->qscale
);
1910 add_dequant_dct(s
, block
[2], 2, dest_y
+ dct_offset
, dct_linesize
, s
->qscale
);
1911 add_dequant_dct(s
, block
[3], 3, dest_y
+ dct_offset
+ block_size
, dct_linesize
, s
->qscale
);
1913 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1914 if (s
->chroma_y_shift
){
1915 add_dequant_dct(s
, block
[4], 4, dest_cb
, uvlinesize
, s
->chroma_qscale
);
1916 add_dequant_dct(s
, block
[5], 5, dest_cr
, uvlinesize
, s
->chroma_qscale
);
1920 add_dequant_dct(s
, block
[4], 4, dest_cb
, dct_linesize
, s
->chroma_qscale
);
1921 add_dequant_dct(s
, block
[5], 5, dest_cr
, dct_linesize
, s
->chroma_qscale
);
1922 add_dequant_dct(s
, block
[6], 6, dest_cb
+ dct_offset
, dct_linesize
, s
->chroma_qscale
);
1923 add_dequant_dct(s
, block
[7], 7, dest_cr
+ dct_offset
, dct_linesize
, s
->chroma_qscale
);
1926 } else if(is_mpeg12
|| (s
->codec_id
!= CODEC_ID_WMV2
)){
1927 add_dct(s
, block
[0], 0, dest_y
, dct_linesize
);
1928 add_dct(s
, block
[1], 1, dest_y
+ block_size
, dct_linesize
);
1929 add_dct(s
, block
[2], 2, dest_y
+ dct_offset
, dct_linesize
);
1930 add_dct(s
, block
[3], 3, dest_y
+ dct_offset
+ block_size
, dct_linesize
);
1932 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1933 if(s
->chroma_y_shift
){//Chroma420
1934 add_dct(s
, block
[4], 4, dest_cb
, uvlinesize
);
1935 add_dct(s
, block
[5], 5, dest_cr
, uvlinesize
);
1938 dct_linesize
= uvlinesize
<< s
->interlaced_dct
;
1939 dct_offset
=(s
->interlaced_dct
)? uvlinesize
: uvlinesize
*8;
1941 add_dct(s
, block
[4], 4, dest_cb
, dct_linesize
);
1942 add_dct(s
, block
[5], 5, dest_cr
, dct_linesize
);
1943 add_dct(s
, block
[6], 6, dest_cb
+dct_offset
, dct_linesize
);
1944 add_dct(s
, block
[7], 7, dest_cr
+dct_offset
, dct_linesize
);
1945 if(!s
->chroma_x_shift
){//Chroma444
1946 add_dct(s
, block
[8], 8, dest_cb
+8, dct_linesize
);
1947 add_dct(s
, block
[9], 9, dest_cr
+8, dct_linesize
);
1948 add_dct(s
, block
[10], 10, dest_cb
+8+dct_offset
, dct_linesize
);
1949 add_dct(s
, block
[11], 11, dest_cr
+8+dct_offset
, dct_linesize
);
1954 else if (CONFIG_WMV2
) {
1955 ff_wmv2_add_mb(s
, block
, dest_y
, dest_cb
, dest_cr
);
1958 /* dct only in intra block */
1959 if(s
->encoding
|| !(s
->codec_id
==CODEC_ID_MPEG1VIDEO
|| s
->codec_id
==CODEC_ID_MPEG2VIDEO
)){
1960 put_dct(s
, block
[0], 0, dest_y
, dct_linesize
, s
->qscale
);
1961 put_dct(s
, block
[1], 1, dest_y
+ block_size
, dct_linesize
, s
->qscale
);
1962 put_dct(s
, block
[2], 2, dest_y
+ dct_offset
, dct_linesize
, s
->qscale
);
1963 put_dct(s
, block
[3], 3, dest_y
+ dct_offset
+ block_size
, dct_linesize
, s
->qscale
);
1965 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1966 if(s
->chroma_y_shift
){
1967 put_dct(s
, block
[4], 4, dest_cb
, uvlinesize
, s
->chroma_qscale
);
1968 put_dct(s
, block
[5], 5, dest_cr
, uvlinesize
, s
->chroma_qscale
);
1972 put_dct(s
, block
[4], 4, dest_cb
, dct_linesize
, s
->chroma_qscale
);
1973 put_dct(s
, block
[5], 5, dest_cr
, dct_linesize
, s
->chroma_qscale
);
1974 put_dct(s
, block
[6], 6, dest_cb
+ dct_offset
, dct_linesize
, s
->chroma_qscale
);
1975 put_dct(s
, block
[7], 7, dest_cr
+ dct_offset
, dct_linesize
, s
->chroma_qscale
);
1979 s
->dsp
.idct_put(dest_y
, dct_linesize
, block
[0]);
1980 s
->dsp
.idct_put(dest_y
+ block_size
, dct_linesize
, block
[1]);
1981 s
->dsp
.idct_put(dest_y
+ dct_offset
, dct_linesize
, block
[2]);
1982 s
->dsp
.idct_put(dest_y
+ dct_offset
+ block_size
, dct_linesize
, block
[3]);
1984 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
1985 if(s
->chroma_y_shift
){
1986 s
->dsp
.idct_put(dest_cb
, uvlinesize
, block
[4]);
1987 s
->dsp
.idct_put(dest_cr
, uvlinesize
, block
[5]);
1990 dct_linesize
= uvlinesize
<< s
->interlaced_dct
;
1991 dct_offset
=(s
->interlaced_dct
)? uvlinesize
: uvlinesize
*8;
1993 s
->dsp
.idct_put(dest_cb
, dct_linesize
, block
[4]);
1994 s
->dsp
.idct_put(dest_cr
, dct_linesize
, block
[5]);
1995 s
->dsp
.idct_put(dest_cb
+ dct_offset
, dct_linesize
, block
[6]);
1996 s
->dsp
.idct_put(dest_cr
+ dct_offset
, dct_linesize
, block
[7]);
1997 if(!s
->chroma_x_shift
){//Chroma444
1998 s
->dsp
.idct_put(dest_cb
+ 8, dct_linesize
, block
[8]);
1999 s
->dsp
.idct_put(dest_cr
+ 8, dct_linesize
, block
[9]);
2000 s
->dsp
.idct_put(dest_cb
+ 8 + dct_offset
, dct_linesize
, block
[10]);
2001 s
->dsp
.idct_put(dest_cr
+ 8 + dct_offset
, dct_linesize
, block
[11]);
2009 s
->dsp
.put_pixels_tab
[0][0](s
->dest
[0], dest_y
, linesize
,16);
2010 s
->dsp
.put_pixels_tab
[s
->chroma_x_shift
][0](s
->dest
[1], dest_cb
, uvlinesize
,16 >> s
->chroma_y_shift
);
2011 s
->dsp
.put_pixels_tab
[s
->chroma_x_shift
][0](s
->dest
[2], dest_cr
, uvlinesize
,16 >> s
->chroma_y_shift
);
2016 void MPV_decode_mb(MpegEncContext
*s
, DCTELEM block
[12][64]){
2018 if(s
->out_format
== FMT_MPEG1
) {
2019 if(s
->avctx
->lowres
) MPV_decode_mb_internal(s
, block
, 1, 1);
2020 else MPV_decode_mb_internal(s
, block
, 0, 1);
2023 if(s
->avctx
->lowres
) MPV_decode_mb_internal(s
, block
, 1, 0);
2024 else MPV_decode_mb_internal(s
, block
, 0, 0);
2029 * @param h is the normal height, this will be reduced automatically if needed for the last row
2031 void ff_draw_horiz_band(MpegEncContext
*s
, int y
, int h
){
2032 if (s
->avctx
->draw_horiz_band
) {
2036 if(s
->picture_structure
!= PICT_FRAME
){
2039 if(s
->first_field
&& !(s
->avctx
->slice_flags
&SLICE_FLAG_ALLOW_FIELD
)) return;
2042 h
= FFMIN(h
, s
->avctx
->height
- y
);
2044 if(s
->pict_type
==FF_B_TYPE
|| s
->low_delay
|| (s
->avctx
->slice_flags
&SLICE_FLAG_CODED_ORDER
))
2045 src
= (AVFrame
*)s
->current_picture_ptr
;
2046 else if(s
->last_picture_ptr
)
2047 src
= (AVFrame
*)s
->last_picture_ptr
;
2051 if(s
->pict_type
==FF_B_TYPE
&& s
->picture_structure
== PICT_FRAME
&& s
->out_format
!= FMT_H264
){
2057 offset
[0]= y
* s
->linesize
;
2059 offset
[2]= (y
>> s
->chroma_y_shift
) * s
->uvlinesize
;
2065 s
->avctx
->draw_horiz_band(s
->avctx
, src
, offset
,
2066 y
, s
->picture_structure
, h
);
2070 void ff_init_block_index(MpegEncContext
*s
){ //FIXME maybe rename
2071 const int linesize
= s
->current_picture
.linesize
[0]; //not s->linesize as this would be wrong for field pics
2072 const int uvlinesize
= s
->current_picture
.linesize
[1];
2073 const int mb_size
= 4 - s
->avctx
->lowres
;
2075 s
->block_index
[0]= s
->b8_stride
*(s
->mb_y
*2 ) - 2 + s
->mb_x
*2;
2076 s
->block_index
[1]= s
->b8_stride
*(s
->mb_y
*2 ) - 1 + s
->mb_x
*2;
2077 s
->block_index
[2]= s
->b8_stride
*(s
->mb_y
*2 + 1) - 2 + s
->mb_x
*2;
2078 s
->block_index
[3]= s
->b8_stride
*(s
->mb_y
*2 + 1) - 1 + s
->mb_x
*2;
2079 s
->block_index
[4]= s
->mb_stride
*(s
->mb_y
+ 1) + s
->b8_stride
*s
->mb_height
*2 + s
->mb_x
- 1;
2080 s
->block_index
[5]= s
->mb_stride
*(s
->mb_y
+ s
->mb_height
+ 2) + s
->b8_stride
*s
->mb_height
*2 + s
->mb_x
- 1;
2081 //block_index is not used by mpeg2, so it is not affected by chroma_format
2083 s
->dest
[0] = s
->current_picture
.data
[0] + ((s
->mb_x
- 1) << mb_size
);
2084 s
->dest
[1] = s
->current_picture
.data
[1] + ((s
->mb_x
- 1) << (mb_size
- s
->chroma_x_shift
));
2085 s
->dest
[2] = s
->current_picture
.data
[2] + ((s
->mb_x
- 1) << (mb_size
- s
->chroma_x_shift
));
2087 if(!(s
->pict_type
==FF_B_TYPE
&& s
->avctx
->draw_horiz_band
&& s
->picture_structure
==PICT_FRAME
))
2089 s
->dest
[0] += s
->mb_y
* linesize
<< mb_size
;
2090 s
->dest
[1] += s
->mb_y
* uvlinesize
<< (mb_size
- s
->chroma_y_shift
);
2091 s
->dest
[2] += s
->mb_y
* uvlinesize
<< (mb_size
- s
->chroma_y_shift
);
2095 void ff_mpeg_flush(AVCodecContext
*avctx
){
2097 MpegEncContext
*s
= avctx
->priv_data
;
2099 if(s
==NULL
|| s
->picture
==NULL
)
2102 for(i
=0; i
<MAX_PICTURE_COUNT
; i
++){
2103 if(s
->picture
[i
].data
[0] && ( s
->picture
[i
].type
== FF_BUFFER_TYPE_INTERNAL
2104 || s
->picture
[i
].type
== FF_BUFFER_TYPE_USER
))
2105 free_frame_buffer(s
, &s
->picture
[i
]);
2107 s
->current_picture_ptr
= s
->last_picture_ptr
= s
->next_picture_ptr
= NULL
;
2109 s
->mb_x
= s
->mb_y
= 0;
2111 s
->parse_context
.state
= -1;
2112 s
->parse_context
.frame_start_found
= 0;
2113 s
->parse_context
.overread
= 0;
2114 s
->parse_context
.overread_index
= 0;
2115 s
->parse_context
.index
= 0;
2116 s
->parse_context
.last_index
= 0;
2117 s
->bitstream_buffer_size
=0;
2121 static void dct_unquantize_mpeg1_intra_c(MpegEncContext
*s
,
2122 DCTELEM
*block
, int n
, int qscale
)
2124 int i
, level
, nCoeffs
;
2125 const uint16_t *quant_matrix
;
2127 nCoeffs
= s
->block_last_index
[n
];
2130 block
[0] = block
[0] * s
->y_dc_scale
;
2132 block
[0] = block
[0] * s
->c_dc_scale
;
2133 /* XXX: only mpeg1 */
2134 quant_matrix
= s
->intra_matrix
;
2135 for(i
=1;i
<=nCoeffs
;i
++) {
2136 int j
= s
->intra_scantable
.permutated
[i
];
2141 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2142 level
= (level
- 1) | 1;
2145 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2146 level
= (level
- 1) | 1;
2153 static void dct_unquantize_mpeg1_inter_c(MpegEncContext
*s
,
2154 DCTELEM
*block
, int n
, int qscale
)
2156 int i
, level
, nCoeffs
;
2157 const uint16_t *quant_matrix
;
2159 nCoeffs
= s
->block_last_index
[n
];
2161 quant_matrix
= s
->inter_matrix
;
2162 for(i
=0; i
<=nCoeffs
; i
++) {
2163 int j
= s
->intra_scantable
.permutated
[i
];
2168 level
= (((level
<< 1) + 1) * qscale
*
2169 ((int) (quant_matrix
[j
]))) >> 4;
2170 level
= (level
- 1) | 1;
2173 level
= (((level
<< 1) + 1) * qscale
*
2174 ((int) (quant_matrix
[j
]))) >> 4;
2175 level
= (level
- 1) | 1;
2182 static void dct_unquantize_mpeg2_intra_c(MpegEncContext
*s
,
2183 DCTELEM
*block
, int n
, int qscale
)
2185 int i
, level
, nCoeffs
;
2186 const uint16_t *quant_matrix
;
2188 if(s
->alternate_scan
) nCoeffs
= 63;
2189 else nCoeffs
= s
->block_last_index
[n
];
2192 block
[0] = block
[0] * s
->y_dc_scale
;
2194 block
[0] = block
[0] * s
->c_dc_scale
;
2195 quant_matrix
= s
->intra_matrix
;
2196 for(i
=1;i
<=nCoeffs
;i
++) {
2197 int j
= s
->intra_scantable
.permutated
[i
];
2202 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2205 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2212 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext
*s
,
2213 DCTELEM
*block
, int n
, int qscale
)
2215 int i
, level
, nCoeffs
;
2216 const uint16_t *quant_matrix
;
2219 if(s
->alternate_scan
) nCoeffs
= 63;
2220 else nCoeffs
= s
->block_last_index
[n
];
2223 block
[0] = block
[0] * s
->y_dc_scale
;
2225 block
[0] = block
[0] * s
->c_dc_scale
;
2226 quant_matrix
= s
->intra_matrix
;
2227 for(i
=1;i
<=nCoeffs
;i
++) {
2228 int j
= s
->intra_scantable
.permutated
[i
];
2233 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2236 level
= (int)(level
* qscale
* quant_matrix
[j
]) >> 3;
2245 static void dct_unquantize_mpeg2_inter_c(MpegEncContext
*s
,
2246 DCTELEM
*block
, int n
, int qscale
)
2248 int i
, level
, nCoeffs
;
2249 const uint16_t *quant_matrix
;
2252 if(s
->alternate_scan
) nCoeffs
= 63;
2253 else nCoeffs
= s
->block_last_index
[n
];
2255 quant_matrix
= s
->inter_matrix
;
2256 for(i
=0; i
<=nCoeffs
; i
++) {
2257 int j
= s
->intra_scantable
.permutated
[i
];
2262 level
= (((level
<< 1) + 1) * qscale
*
2263 ((int) (quant_matrix
[j
]))) >> 4;
2266 level
= (((level
<< 1) + 1) * qscale
*
2267 ((int) (quant_matrix
[j
]))) >> 4;
2276 static void dct_unquantize_h263_intra_c(MpegEncContext
*s
,
2277 DCTELEM
*block
, int n
, int qscale
)
2279 int i
, level
, qmul
, qadd
;
2282 assert(s
->block_last_index
[n
]>=0);
2288 block
[0] = block
[0] * s
->y_dc_scale
;
2290 block
[0] = block
[0] * s
->c_dc_scale
;
2291 qadd
= (qscale
- 1) | 1;
2298 nCoeffs
= s
->inter_scantable
.raster_end
[ s
->block_last_index
[n
] ];
2300 for(i
=1; i
<=nCoeffs
; i
++) {
2304 level
= level
* qmul
- qadd
;
2306 level
= level
* qmul
+ qadd
;
2313 static void dct_unquantize_h263_inter_c(MpegEncContext
*s
,
2314 DCTELEM
*block
, int n
, int qscale
)
2316 int i
, level
, qmul
, qadd
;
2319 assert(s
->block_last_index
[n
]>=0);
2321 qadd
= (qscale
- 1) | 1;
2324 nCoeffs
= s
->inter_scantable
.raster_end
[ s
->block_last_index
[n
] ];
2326 for(i
=0; i
<=nCoeffs
; i
++) {
2330 level
= level
* qmul
- qadd
;
2332 level
= level
* qmul
+ qadd
;
2340 * set qscale and update qscale dependent variables.
2342 void ff_set_qscale(MpegEncContext
* s
, int qscale
)
2346 else if (qscale
> 31)
2350 s
->chroma_qscale
= s
->chroma_qscale_table
[qscale
];
2352 s
->y_dc_scale
= s
->y_dc_scale_table
[ qscale
];
2353 s
->c_dc_scale
= s
->c_dc_scale_table
[ s
->chroma_qscale
];