2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * @file mpegvideo_common.h
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #ifndef AVCODEC_MPEGVIDEO_COMMON_H
31 #define AVCODEC_MPEGVIDEO_COMMON_H
35 #include "mpegvideo.h"
41 int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
42 int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
43 void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
47 * The pixels are allocated/set by calling get_buffer() if shared=0
49 int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
);
52 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
53 * the changed fields will not depend upon the prior state of the MpegEncContext.
55 void MPV_common_defaults(MpegEncContext
*s
);
57 static inline void gmc1_motion(MpegEncContext
*s
,
58 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
59 uint8_t **ref_picture
)
62 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
63 int motion_x
, motion_y
;
66 motion_x
= s
->sprite_offset
[0][0];
67 motion_y
= s
->sprite_offset
[0][1];
68 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
69 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
70 motion_x
<<=(3-s
->sprite_warping_accuracy
);
71 motion_y
<<=(3-s
->sprite_warping_accuracy
);
72 src_x
= av_clip(src_x
, -16, s
->width
);
73 if (src_x
== s
->width
)
75 src_y
= av_clip(src_y
, -16, s
->height
);
76 if (src_y
== s
->height
)
79 linesize
= s
->linesize
;
80 uvlinesize
= s
->uvlinesize
;
82 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
;
84 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
85 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
86 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
87 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
88 ptr
= s
->edge_emu_buffer
;
92 if((motion_x
|motion_y
)&7){
93 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
94 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
98 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
100 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
102 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
106 if(ENABLE_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
108 motion_x
= s
->sprite_offset
[1][0];
109 motion_y
= s
->sprite_offset
[1][1];
110 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
111 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
112 motion_x
<<=(3-s
->sprite_warping_accuracy
);
113 motion_y
<<=(3-s
->sprite_warping_accuracy
);
114 src_x
= av_clip(src_x
, -8, s
->width
>>1);
115 if (src_x
== s
->width
>>1)
117 src_y
= av_clip(src_y
, -8, s
->height
>>1);
118 if (src_y
== s
->height
>>1)
121 offset
= (src_y
* uvlinesize
) + src_x
;
122 ptr
= ref_picture
[1] + offset
;
123 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
124 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
125 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
126 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
127 ptr
= s
->edge_emu_buffer
;
131 s
->dsp
.gmc1(dest_cb
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
133 ptr
= ref_picture
[2] + offset
;
135 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
136 ptr
= s
->edge_emu_buffer
;
138 s
->dsp
.gmc1(dest_cr
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
143 static inline void gmc_motion(MpegEncContext
*s
,
144 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
145 uint8_t **ref_picture
)
148 int linesize
, uvlinesize
;
149 const int a
= s
->sprite_warping_accuracy
;
152 linesize
= s
->linesize
;
153 uvlinesize
= s
->uvlinesize
;
155 ptr
= ref_picture
[0];
157 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
158 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
160 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
163 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
164 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
165 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
166 s
->h_edge_pos
, s
->v_edge_pos
);
167 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
168 ox
+ s
->sprite_delta
[0][0]*8,
169 oy
+ s
->sprite_delta
[1][0]*8,
170 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
171 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
172 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
173 s
->h_edge_pos
, s
->v_edge_pos
);
175 if(ENABLE_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
177 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
178 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
180 ptr
= ref_picture
[1];
181 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
184 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
185 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
186 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
187 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
189 ptr
= ref_picture
[2];
190 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
193 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
194 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
195 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
196 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
199 static inline int hpel_motion(MpegEncContext
*s
,
200 uint8_t *dest
, uint8_t *src
,
201 int field_based
, int field_select
,
202 int src_x
, int src_y
,
203 int width
, int height
, int stride
,
204 int h_edge_pos
, int v_edge_pos
,
205 int w
, int h
, op_pixels_func
*pix_op
,
206 int motion_x
, int motion_y
)
211 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
212 src_x
+= motion_x
>> 1;
213 src_y
+= motion_y
>> 1;
215 /* WARNING: do no forget half pels */
216 src_x
= av_clip(src_x
, -16, width
); //FIXME unneeded for emu?
219 src_y
= av_clip(src_y
, -16, height
);
222 src
+= src_y
* stride
+ src_x
;
224 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
225 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
226 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
227 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
228 src_x
, src_y
<<field_based
, h_edge_pos
, s
->v_edge_pos
);
229 src
= s
->edge_emu_buffer
;
235 pix_op
[dxy
](dest
, src
, stride
, h
);
239 static av_always_inline
240 void mpeg_motion_internal(MpegEncContext
*s
,
241 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
242 int field_based
, int bottom_field
, int field_select
,
243 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
244 int motion_x
, int motion_y
, int h
, int is_mpeg12
)
246 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
247 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
,
248 uvsrc_x
, uvsrc_y
, v_edge_pos
, uvlinesize
, linesize
;
251 if(s
->quarter_sample
)
258 v_edge_pos
= s
->v_edge_pos
>> field_based
;
259 linesize
= s
->current_picture
.linesize
[0] << field_based
;
260 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
262 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
263 src_x
= s
->mb_x
* 16 + (motion_x
>> 1);
264 src_y
=(s
->mb_y
<<(4-field_based
)) + (motion_y
>> 1);
266 if (!is_mpeg12
&& s
->out_format
== FMT_H263
) {
267 if((s
->workaround_bugs
& FF_BUG_HPEL_CHROMA
) && field_based
){
268 mx
= (motion_x
>>1)|(motion_x
&1);
270 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
271 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
272 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
274 uvdxy
= dxy
| (motion_y
& 2) | ((motion_x
& 2) >> 1);
278 }else if(!is_mpeg12
&& s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
282 uvsrc_x
= s
->mb_x
*8 + mx
;
283 uvsrc_y
= s
->mb_y
*8 + my
;
285 if(s
->chroma_y_shift
){
288 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
289 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
290 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
292 if(s
->chroma_x_shift
){
295 uvdxy
= ((motion_y
& 1) << 1) | (mx
& 1);
296 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
307 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
308 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
309 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
311 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&1) - 16
312 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
313 if(is_mpeg12
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
||
314 s
->codec_id
== CODEC_ID_MPEG1VIDEO
){
315 av_log(s
->avctx
,AV_LOG_DEBUG
,
316 "MPEG motion vector out of boundary\n");
319 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
,
321 src_x
, src_y
<<field_based
,
322 s
->h_edge_pos
, s
->v_edge_pos
);
323 ptr_y
= s
->edge_emu_buffer
;
324 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
325 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
326 ff_emulated_edge_mc(uvbuf
,
327 ptr_cb
, s
->uvlinesize
,
329 uvsrc_x
, uvsrc_y
<<field_based
,
330 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
331 ff_emulated_edge_mc(uvbuf
+16,
332 ptr_cr
, s
->uvlinesize
,
334 uvsrc_x
, uvsrc_y
<<field_based
,
335 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
341 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
342 dest_y
+= s
->linesize
;
343 dest_cb
+= s
->uvlinesize
;
344 dest_cr
+= s
->uvlinesize
;
348 ptr_y
+= s
->linesize
;
349 ptr_cb
+= s
->uvlinesize
;
350 ptr_cr
+= s
->uvlinesize
;
353 pix_op
[0][dxy
](dest_y
, ptr_y
, linesize
, h
);
355 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
356 pix_op
[s
->chroma_x_shift
][uvdxy
]
357 (dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
);
358 pix_op
[s
->chroma_x_shift
][uvdxy
]
359 (dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
);
361 if(!is_mpeg12
&& (ENABLE_H261_ENCODER
|| ENABLE_H261_DECODER
) &&
362 s
->out_format
== FMT_H261
){
363 ff_h261_loop_filter(s
);
366 /* apply one mpeg motion vector to the three components */
367 static av_always_inline
368 void mpeg_motion(MpegEncContext
*s
,
369 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
370 int field_based
, int bottom_field
, int field_select
,
371 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
372 int motion_x
, int motion_y
, int h
)
375 if(s
->out_format
== FMT_MPEG1
)
376 mpeg_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, field_based
,
377 bottom_field
, field_select
, ref_picture
, pix_op
,
378 motion_x
, motion_y
, h
, 1);
381 mpeg_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, field_based
,
382 bottom_field
, field_select
, ref_picture
, pix_op
,
383 motion_x
, motion_y
, h
, 0);
386 //FIXME move to dsputil, avg variant, 16x16 version
387 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
389 uint8_t * const top
= src
[1];
390 uint8_t * const left
= src
[2];
391 uint8_t * const mid
= src
[0];
392 uint8_t * const right
= src
[3];
393 uint8_t * const bottom
= src
[4];
394 #define OBMC_FILTER(x, t, l, m, r, b)\
395 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
396 #define OBMC_FILTER4(x, t, l, m, r, b)\
397 OBMC_FILTER(x , t, l, m, r, b);\
398 OBMC_FILTER(x+1 , t, l, m, r, b);\
399 OBMC_FILTER(x +stride, t, l, m, r, b);\
400 OBMC_FILTER(x+1+stride, t, l, m, r, b);
403 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
404 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
405 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
406 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
407 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
408 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
410 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
411 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
412 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
413 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
415 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
416 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
417 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
418 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
420 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
421 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
422 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
423 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
425 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
426 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
427 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
428 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
429 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
430 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
432 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
433 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
434 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
435 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
438 /* obmc for 1 8x8 luma block */
439 static inline void obmc_motion(MpegEncContext
*s
,
440 uint8_t *dest
, uint8_t *src
,
441 int src_x
, int src_y
,
442 op_pixels_func
*pix_op
,
443 int16_t mv
[5][2]/* mid top left right bottom*/)
449 assert(s
->quarter_sample
==0);
452 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
455 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
456 hpel_motion(s
, ptr
[i
], src
, 0, 0,
458 s
->width
, s
->height
, s
->linesize
,
459 s
->h_edge_pos
, s
->v_edge_pos
,
465 put_obmc(dest
, ptr
, s
->linesize
);
468 static inline void qpel_motion(MpegEncContext
*s
,
469 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
470 int field_based
, int bottom_field
, int field_select
,
471 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
472 qpel_mc_func (*qpix_op
)[16],
473 int motion_x
, int motion_y
, int h
)
475 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
476 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, linesize
, uvlinesize
;
478 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
479 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
480 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
482 v_edge_pos
= s
->v_edge_pos
>> field_based
;
483 linesize
= s
->linesize
<< field_based
;
484 uvlinesize
= s
->uvlinesize
<< field_based
;
489 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
490 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
491 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
492 my
= (motion_y
>>1) + rtab
[motion_y
&7];
493 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
494 mx
= (motion_x
>>1)|(motion_x
&1);
495 my
= (motion_y
>>1)|(motion_y
&1);
503 uvdxy
= (mx
&1) | ((my
&1)<<1);
507 uvsrc_x
= s
->mb_x
* 8 + mx
;
508 uvsrc_y
= s
->mb_y
* (8 >> field_based
) + my
;
510 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
511 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
512 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
514 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
515 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
516 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
,
517 17, 17+field_based
, src_x
, src_y
<<field_based
,
518 s
->h_edge_pos
, s
->v_edge_pos
);
519 ptr_y
= s
->edge_emu_buffer
;
520 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
521 uint8_t *uvbuf
= s
->edge_emu_buffer
+ 18*s
->linesize
;
522 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
,
524 uvsrc_x
, uvsrc_y
<<field_based
,
525 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
526 ff_emulated_edge_mc(uvbuf
+ 16, ptr_cr
, s
->uvlinesize
,
528 uvsrc_x
, uvsrc_y
<<field_based
,
529 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
536 qpix_op
[0][dxy
](dest_y
, ptr_y
, linesize
);
539 dest_y
+= s
->linesize
;
540 dest_cb
+= s
->uvlinesize
;
541 dest_cr
+= s
->uvlinesize
;
545 ptr_y
+= s
->linesize
;
546 ptr_cb
+= s
->uvlinesize
;
547 ptr_cr
+= s
->uvlinesize
;
549 //damn interlaced mode
550 //FIXME boundary mirroring is not exactly correct here
551 qpix_op
[1][dxy
](dest_y
, ptr_y
, linesize
);
552 qpix_op
[1][dxy
](dest_y
+8, ptr_y
+8, linesize
);
554 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
555 pix_op
[1][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> 1);
556 pix_op
[1][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> 1);
561 * h263 chroma 4mv motion compensation.
563 static inline void chroma_4mv_motion(MpegEncContext
*s
,
564 uint8_t *dest_cb
, uint8_t *dest_cr
,
565 uint8_t **ref_picture
,
566 op_pixels_func
*pix_op
,
568 int dxy
, emu
=0, src_x
, src_y
, offset
;
571 /* In case of 8X8, we construct a single chroma motion vector
572 with a special rounding */
573 mx
= ff_h263_round_chroma(mx
);
574 my
= ff_h263_round_chroma(my
);
576 dxy
= ((my
& 1) << 1) | (mx
& 1);
580 src_x
= s
->mb_x
* 8 + mx
;
581 src_y
= s
->mb_y
* 8 + my
;
582 src_x
= av_clip(src_x
, -8, s
->width
/2);
583 if (src_x
== s
->width
/2)
585 src_y
= av_clip(src_y
, -8, s
->height
/2);
586 if (src_y
== s
->height
/2)
589 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
590 ptr
= ref_picture
[1] + offset
;
591 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
592 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
593 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
594 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
,
596 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
597 ptr
= s
->edge_emu_buffer
;
601 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
603 ptr
= ref_picture
[2] + offset
;
605 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
,
607 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
608 ptr
= s
->edge_emu_buffer
;
610 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
613 static inline void prefetch_motion(MpegEncContext
*s
, uint8_t **pix
, int dir
){
614 /* fetch pixels for estimated mv 4 macroblocks ahead
615 * optimized for 64byte cache lines */
616 const int shift
= s
->quarter_sample
? 2 : 1;
617 const int mx
= (s
->mv
[dir
][0][0]>>shift
) + 16*s
->mb_x
+ 8;
618 const int my
= (s
->mv
[dir
][0][1]>>shift
) + 16*s
->mb_y
;
619 int off
= mx
+ (my
+ (s
->mb_x
&3)*4)*s
->linesize
+ 64;
620 s
->dsp
.prefetch(pix
[0]+off
, s
->linesize
, 4);
621 off
= (mx
>>1) + ((my
>>1) + (s
->mb_x
&7))*s
->uvlinesize
+ 64;
622 s
->dsp
.prefetch(pix
[1]+off
, pix
[2]-pix
[1], 2);
626 * motion compensation of a single macroblock
628 * @param dest_y luma destination pointer
629 * @param dest_cb chroma cb/u destination pointer
630 * @param dest_cr chroma cr/v destination pointer
631 * @param dir direction (0->forward, 1->backward)
632 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
633 * @param pic_op halfpel motion compensation function (average or put normally)
634 * @param pic_op qpel motion compensation function (average or put normally)
635 * the motion vectors are taken from s->mv and the MV type from s->mv_type
637 static av_always_inline
void MPV_motion_internal(MpegEncContext
*s
,
638 uint8_t *dest_y
, uint8_t *dest_cb
,
639 uint8_t *dest_cr
, int dir
,
640 uint8_t **ref_picture
,
641 op_pixels_func (*pix_op
)[4],
642 qpel_mc_func (*qpix_op
)[16], int is_mpeg12
)
644 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
651 prefetch_motion(s
, ref_picture
, dir
);
653 if(!is_mpeg12
&& s
->obmc
&& s
->pict_type
!= FF_B_TYPE
){
654 int16_t mv_cache
[4][4][2];
655 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
656 const int mot_stride
= s
->b8_stride
;
657 const int mot_xy
= mb_x
*2 + mb_y
*2*mot_stride
;
659 assert(!s
->mb_skipped
);
661 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
662 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
663 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
665 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
666 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
668 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
671 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
672 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
673 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
675 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
676 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
679 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
680 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
681 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
683 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
684 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
690 const int x
= (i
&1)+1;
691 const int y
= (i
>>1)+1;
693 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
694 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
695 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
696 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
697 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
699 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
701 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
708 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
709 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
717 if(s
->real_sprite_warping_points
==1){
718 gmc1_motion(s
, dest_y
, dest_cb
, dest_cr
,
721 gmc_motion(s
, dest_y
, dest_cb
, dest_cr
,
724 }else if(!is_mpeg12
&& s
->quarter_sample
){
725 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
727 ref_picture
, pix_op
, qpix_op
,
728 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
729 }else if(!is_mpeg12
&& ENABLE_WMV2
&& s
->mspel
){
730 ff_mspel_motion(s
, dest_y
, dest_cb
, dest_cr
,
732 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
735 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
738 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
745 if(s
->quarter_sample
){
747 motion_x
= s
->mv
[dir
][i
][0];
748 motion_y
= s
->mv
[dir
][i
][1];
750 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
751 src_x
= mb_x
* 16 + (motion_x
>> 2) + (i
& 1) * 8;
752 src_y
= mb_y
* 16 + (motion_y
>> 2) + (i
>>1) * 8;
754 /* WARNING: do no forget half pels */
755 src_x
= av_clip(src_x
, -16, s
->width
);
756 if (src_x
== s
->width
)
758 src_y
= av_clip(src_y
, -16, s
->height
);
759 if (src_y
== s
->height
)
762 ptr
= ref_picture
[0] + (src_y
* s
->linesize
) + (src_x
);
763 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
764 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 8
765 || (unsigned)src_y
> s
->v_edge_pos
- (motion_y
&3) - 8 ){
766 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
,
769 s
->h_edge_pos
, s
->v_edge_pos
);
770 ptr
= s
->edge_emu_buffer
;
773 dest
= dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
;
774 qpix_op
[1][dxy
](dest
, ptr
, s
->linesize
);
776 mx
+= s
->mv
[dir
][i
][0]/2;
777 my
+= s
->mv
[dir
][i
][1]/2;
781 hpel_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
782 ref_picture
[0], 0, 0,
783 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
784 s
->width
, s
->height
, s
->linesize
,
785 s
->h_edge_pos
, s
->v_edge_pos
,
787 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
789 mx
+= s
->mv
[dir
][i
][0];
790 my
+= s
->mv
[dir
][i
][1];
794 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
795 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
799 if (s
->picture_structure
== PICT_FRAME
) {
800 if(!is_mpeg12
&& s
->quarter_sample
){
802 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
803 1, i
, s
->field_select
[dir
][i
],
804 ref_picture
, pix_op
, qpix_op
,
805 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1], 8);
809 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
810 1, 0, s
->field_select
[dir
][0],
812 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
814 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
815 1, 1, s
->field_select
[dir
][1],
817 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
820 if(s
->picture_structure
!= s
->field_select
[dir
][0] + 1 && s
->pict_type
!= FF_B_TYPE
&& !s
->first_field
){
821 ref_picture
= s
->current_picture_ptr
->data
;
824 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
825 0, 0, s
->field_select
[dir
][0],
827 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
832 uint8_t ** ref2picture
;
834 if(s
->picture_structure
== s
->field_select
[dir
][i
] + 1
835 || s
->pict_type
== FF_B_TYPE
|| s
->first_field
){
836 ref2picture
= ref_picture
;
838 ref2picture
= s
->current_picture_ptr
->data
;
841 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
842 0, 0, s
->field_select
[dir
][i
],
844 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1] + 16*i
, 8);
846 dest_y
+= 16*s
->linesize
;
847 dest_cb
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
848 dest_cr
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
852 if(s
->picture_structure
== PICT_FRAME
){
856 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
859 s
->mv
[dir
][2*i
+ j
][0], s
->mv
[dir
][2*i
+ j
][1], 8);
861 pix_op
= s
->dsp
.avg_pixels_tab
;
865 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
866 0, 0, s
->picture_structure
!= i
+1,
868 s
->mv
[dir
][2*i
][0],s
->mv
[dir
][2*i
][1],16);
870 // after put we make avg of the same block
871 pix_op
=s
->dsp
.avg_pixels_tab
;
873 //opposite parity is always in the same frame if this is second field
875 ref_picture
= s
->current_picture_ptr
->data
;
884 static inline void MPV_motion(MpegEncContext
*s
,
885 uint8_t *dest_y
, uint8_t *dest_cb
,
886 uint8_t *dest_cr
, int dir
,
887 uint8_t **ref_picture
,
888 op_pixels_func (*pix_op
)[4],
889 qpel_mc_func (*qpix_op
)[16])
892 if(s
->out_format
== FMT_MPEG1
)
893 MPV_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, dir
,
894 ref_picture
, pix_op
, qpix_op
, 1);
897 MPV_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, dir
,
898 ref_picture
, pix_op
, qpix_op
, 0);
900 #endif /* AVCODEC_MPEGVIDEO_COMMON_H */