2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * @file mpegvideo_common.h
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #ifndef FFMPEG_MPEGVIDEO_COMMON_H
31 #define FFMPEG_MPEGVIDEO_COMMON_H
35 #include "mpegvideo.h"
41 int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
42 int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
43 void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
44 void copy_picture(Picture
*dst
, Picture
*src
);
48 * The pixels are allocated/set by calling get_buffer() if shared=0
50 int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
);
53 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
54 * the changed fields will not depend upon the prior state of the MpegEncContext.
56 void MPV_common_defaults(MpegEncContext
*s
);
58 static inline void gmc1_motion(MpegEncContext
*s
,
59 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
60 uint8_t **ref_picture
)
63 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
64 int motion_x
, motion_y
;
67 motion_x
= s
->sprite_offset
[0][0];
68 motion_y
= s
->sprite_offset
[0][1];
69 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
70 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
71 motion_x
<<=(3-s
->sprite_warping_accuracy
);
72 motion_y
<<=(3-s
->sprite_warping_accuracy
);
73 src_x
= av_clip(src_x
, -16, s
->width
);
74 if (src_x
== s
->width
)
76 src_y
= av_clip(src_y
, -16, s
->height
);
77 if (src_y
== s
->height
)
80 linesize
= s
->linesize
;
81 uvlinesize
= s
->uvlinesize
;
83 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
;
85 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
86 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
87 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
88 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
89 ptr
= s
->edge_emu_buffer
;
93 if((motion_x
|motion_y
)&7){
94 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
95 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
99 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
101 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
103 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
107 if(ENABLE_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
109 motion_x
= s
->sprite_offset
[1][0];
110 motion_y
= s
->sprite_offset
[1][1];
111 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
112 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
113 motion_x
<<=(3-s
->sprite_warping_accuracy
);
114 motion_y
<<=(3-s
->sprite_warping_accuracy
);
115 src_x
= av_clip(src_x
, -8, s
->width
>>1);
116 if (src_x
== s
->width
>>1)
118 src_y
= av_clip(src_y
, -8, s
->height
>>1);
119 if (src_y
== s
->height
>>1)
122 offset
= (src_y
* uvlinesize
) + src_x
;
123 ptr
= ref_picture
[1] + offset
;
124 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
125 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
126 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
127 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
128 ptr
= s
->edge_emu_buffer
;
132 s
->dsp
.gmc1(dest_cb
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
134 ptr
= ref_picture
[2] + offset
;
136 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
137 ptr
= s
->edge_emu_buffer
;
139 s
->dsp
.gmc1(dest_cr
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
144 static inline void gmc_motion(MpegEncContext
*s
,
145 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
146 uint8_t **ref_picture
)
149 int linesize
, uvlinesize
;
150 const int a
= s
->sprite_warping_accuracy
;
153 linesize
= s
->linesize
;
154 uvlinesize
= s
->uvlinesize
;
156 ptr
= ref_picture
[0];
158 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
159 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
161 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
164 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
165 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
166 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
167 s
->h_edge_pos
, s
->v_edge_pos
);
168 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
169 ox
+ s
->sprite_delta
[0][0]*8,
170 oy
+ s
->sprite_delta
[1][0]*8,
171 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
172 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
173 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
174 s
->h_edge_pos
, s
->v_edge_pos
);
176 if(ENABLE_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
178 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
179 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
181 ptr
= ref_picture
[1];
182 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
185 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
186 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
187 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
188 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
190 ptr
= ref_picture
[2];
191 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
194 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
195 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
196 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
197 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
200 static inline int hpel_motion(MpegEncContext
*s
,
201 uint8_t *dest
, uint8_t *src
,
202 int field_based
, int field_select
,
203 int src_x
, int src_y
,
204 int width
, int height
, int stride
,
205 int h_edge_pos
, int v_edge_pos
,
206 int w
, int h
, op_pixels_func
*pix_op
,
207 int motion_x
, int motion_y
)
212 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
213 src_x
+= motion_x
>> 1;
214 src_y
+= motion_y
>> 1;
216 /* WARNING: do no forget half pels */
217 src_x
= av_clip(src_x
, -16, width
); //FIXME unneeded for emu?
220 src_y
= av_clip(src_y
, -16, height
);
223 src
+= src_y
* stride
+ src_x
;
225 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
226 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
227 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
228 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
229 src_x
, src_y
<<field_based
, h_edge_pos
, s
->v_edge_pos
);
230 src
= s
->edge_emu_buffer
;
236 pix_op
[dxy
](dest
, src
, stride
, h
);
240 /* apply one mpeg motion vector to the three components */
241 static av_always_inline
void mpeg_motion(MpegEncContext
*s
,
242 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
243 int field_based
, int bottom_field
, int field_select
,
244 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
245 int motion_x
, int motion_y
, int h
)
247 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
248 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, uvlinesize
, linesize
;
251 if(s
->quarter_sample
)
258 v_edge_pos
= s
->v_edge_pos
>> field_based
;
259 linesize
= s
->current_picture
.linesize
[0] << field_based
;
260 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
262 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
263 src_x
= s
->mb_x
* 16 + (motion_x
>> 1);
264 src_y
=(s
->mb_y
<<(4-field_based
)) + (motion_y
>> 1);
266 if (s
->out_format
== FMT_H263
) {
267 if((s
->workaround_bugs
& FF_BUG_HPEL_CHROMA
) && field_based
){
268 mx
= (motion_x
>>1)|(motion_x
&1);
270 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
271 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
272 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
274 uvdxy
= dxy
| (motion_y
& 2) | ((motion_x
& 2) >> 1);
278 }else if(s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
282 uvsrc_x
= s
->mb_x
*8 + mx
;
283 uvsrc_y
= s
->mb_y
*8 + my
;
285 if(s
->chroma_y_shift
){
288 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
289 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
290 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
292 if(s
->chroma_x_shift
){
295 uvdxy
= ((motion_y
& 1) << 1) | (mx
& 1);
296 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
307 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
308 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
309 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
311 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&1) - 16
312 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
313 if(s
->codec_id
== CODEC_ID_MPEG2VIDEO
||
314 s
->codec_id
== CODEC_ID_MPEG1VIDEO
){
315 av_log(s
->avctx
,AV_LOG_DEBUG
,"MPEG motion vector out of boundary\n");
318 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
319 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
320 ptr_y
= s
->edge_emu_buffer
;
321 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
322 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
323 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9+field_based
,
324 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
325 ff_emulated_edge_mc(uvbuf
+16, ptr_cr
, s
->uvlinesize
, 9, 9+field_based
,
326 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
332 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
333 dest_y
+= s
->linesize
;
334 dest_cb
+= s
->uvlinesize
;
335 dest_cr
+= s
->uvlinesize
;
339 ptr_y
+= s
->linesize
;
340 ptr_cb
+= s
->uvlinesize
;
341 ptr_cr
+= s
->uvlinesize
;
344 pix_op
[0][dxy
](dest_y
, ptr_y
, linesize
, h
);
346 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
347 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
);
348 pix_op
[s
->chroma_x_shift
][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
);
350 if((ENABLE_H261_ENCODER
|| ENABLE_H261_DECODER
) && s
->out_format
== FMT_H261
){
351 ff_h261_loop_filter(s
);
355 //FIXME move to dsputil, avg variant, 16x16 version
356 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
358 uint8_t * const top
= src
[1];
359 uint8_t * const left
= src
[2];
360 uint8_t * const mid
= src
[0];
361 uint8_t * const right
= src
[3];
362 uint8_t * const bottom
= src
[4];
363 #define OBMC_FILTER(x, t, l, m, r, b)\
364 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
365 #define OBMC_FILTER4(x, t, l, m, r, b)\
366 OBMC_FILTER(x , t, l, m, r, b);\
367 OBMC_FILTER(x+1 , t, l, m, r, b);\
368 OBMC_FILTER(x +stride, t, l, m, r, b);\
369 OBMC_FILTER(x+1+stride, t, l, m, r, b);
372 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
373 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
374 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
375 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
376 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
377 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
379 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
380 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
381 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
382 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
384 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
385 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
386 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
387 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
389 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
390 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
391 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
392 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
394 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
395 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
396 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
397 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
398 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
399 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
401 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
402 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
403 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
404 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
407 /* obmc for 1 8x8 luma block */
408 static inline void obmc_motion(MpegEncContext
*s
,
409 uint8_t *dest
, uint8_t *src
,
410 int src_x
, int src_y
,
411 op_pixels_func
*pix_op
,
412 int16_t mv
[5][2]/* mid top left right bottom*/)
418 assert(s
->quarter_sample
==0);
421 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
424 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
425 hpel_motion(s
, ptr
[i
], src
, 0, 0,
427 s
->width
, s
->height
, s
->linesize
,
428 s
->h_edge_pos
, s
->v_edge_pos
,
434 put_obmc(dest
, ptr
, s
->linesize
);
437 static inline void qpel_motion(MpegEncContext
*s
,
438 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
439 int field_based
, int bottom_field
, int field_select
,
440 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
441 qpel_mc_func (*qpix_op
)[16],
442 int motion_x
, int motion_y
, int h
)
444 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
445 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, linesize
, uvlinesize
;
447 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
448 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
449 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
451 v_edge_pos
= s
->v_edge_pos
>> field_based
;
452 linesize
= s
->linesize
<< field_based
;
453 uvlinesize
= s
->uvlinesize
<< field_based
;
458 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
459 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
460 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
461 my
= (motion_y
>>1) + rtab
[motion_y
&7];
462 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
463 mx
= (motion_x
>>1)|(motion_x
&1);
464 my
= (motion_y
>>1)|(motion_y
&1);
472 uvdxy
= (mx
&1) | ((my
&1)<<1);
476 uvsrc_x
= s
->mb_x
* 8 + mx
;
477 uvsrc_y
= s
->mb_y
* (8 >> field_based
) + my
;
479 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
480 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
481 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
483 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
484 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
485 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
, 17, 17+field_based
,
486 src_x
, src_y
<<field_based
, s
->h_edge_pos
, s
->v_edge_pos
);
487 ptr_y
= s
->edge_emu_buffer
;
488 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
489 uint8_t *uvbuf
= s
->edge_emu_buffer
+ 18*s
->linesize
;
490 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
, 9, 9 + field_based
,
491 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
492 ff_emulated_edge_mc(uvbuf
+ 16, ptr_cr
, s
->uvlinesize
, 9, 9 + field_based
,
493 uvsrc_x
, uvsrc_y
<<field_based
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
500 qpix_op
[0][dxy
](dest_y
, ptr_y
, linesize
);
503 dest_y
+= s
->linesize
;
504 dest_cb
+= s
->uvlinesize
;
505 dest_cr
+= s
->uvlinesize
;
509 ptr_y
+= s
->linesize
;
510 ptr_cb
+= s
->uvlinesize
;
511 ptr_cr
+= s
->uvlinesize
;
513 //damn interlaced mode
514 //FIXME boundary mirroring is not exactly correct here
515 qpix_op
[1][dxy
](dest_y
, ptr_y
, linesize
);
516 qpix_op
[1][dxy
](dest_y
+8, ptr_y
+8, linesize
);
518 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
519 pix_op
[1][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> 1);
520 pix_op
[1][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> 1);
525 * h263 chroma 4mv motion compensation.
527 static inline void chroma_4mv_motion(MpegEncContext
*s
,
528 uint8_t *dest_cb
, uint8_t *dest_cr
,
529 uint8_t **ref_picture
,
530 op_pixels_func
*pix_op
,
532 int dxy
, emu
=0, src_x
, src_y
, offset
;
535 /* In case of 8X8, we construct a single chroma motion vector
536 with a special rounding */
537 mx
= ff_h263_round_chroma(mx
);
538 my
= ff_h263_round_chroma(my
);
540 dxy
= ((my
& 1) << 1) | (mx
& 1);
544 src_x
= s
->mb_x
* 8 + mx
;
545 src_y
= s
->mb_y
* 8 + my
;
546 src_x
= av_clip(src_x
, -8, s
->width
/2);
547 if (src_x
== s
->width
/2)
549 src_y
= av_clip(src_y
, -8, s
->height
/2);
550 if (src_y
== s
->height
/2)
553 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
554 ptr
= ref_picture
[1] + offset
;
555 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
556 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
557 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
558 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
559 ptr
= s
->edge_emu_buffer
;
563 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
565 ptr
= ref_picture
[2] + offset
;
567 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
568 ptr
= s
->edge_emu_buffer
;
570 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
573 static inline void prefetch_motion(MpegEncContext
*s
, uint8_t **pix
, int dir
){
574 /* fetch pixels for estimated mv 4 macroblocks ahead
575 * optimized for 64byte cache lines */
576 const int shift
= s
->quarter_sample
? 2 : 1;
577 const int mx
= (s
->mv
[dir
][0][0]>>shift
) + 16*s
->mb_x
+ 8;
578 const int my
= (s
->mv
[dir
][0][1]>>shift
) + 16*s
->mb_y
;
579 int off
= mx
+ (my
+ (s
->mb_x
&3)*4)*s
->linesize
+ 64;
580 s
->dsp
.prefetch(pix
[0]+off
, s
->linesize
, 4);
581 off
= (mx
>>1) + ((my
>>1) + (s
->mb_x
&7))*s
->uvlinesize
+ 64;
582 s
->dsp
.prefetch(pix
[1]+off
, pix
[2]-pix
[1], 2);
586 * motion compensation of a single macroblock
588 * @param dest_y luma destination pointer
589 * @param dest_cb chroma cb/u destination pointer
590 * @param dest_cr chroma cr/v destination pointer
591 * @param dir direction (0->forward, 1->backward)
592 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
593 * @param pic_op halfpel motion compensation function (average or put normally)
594 * @param pic_op qpel motion compensation function (average or put normally)
595 * the motion vectors are taken from s->mv and the MV type from s->mv_type
597 static inline void MPV_motion(MpegEncContext
*s
,
598 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
599 int dir
, uint8_t **ref_picture
,
600 op_pixels_func (*pix_op
)[4], qpel_mc_func (*qpix_op
)[16])
602 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
609 prefetch_motion(s
, ref_picture
, dir
);
611 if(s
->obmc
&& s
->pict_type
!= B_TYPE
){
612 int16_t mv_cache
[4][4][2];
613 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
614 const int mot_stride
= s
->b8_stride
;
615 const int mot_xy
= mb_x
*2 + mb_y
*2*mot_stride
;
617 assert(!s
->mb_skipped
);
619 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
620 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
621 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
623 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
624 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
626 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
629 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
630 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
631 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
633 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
634 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
637 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
638 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
639 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
641 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
642 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
648 const int x
= (i
&1)+1;
649 const int y
= (i
>>1)+1;
651 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
652 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
653 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
654 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
655 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
657 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
659 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
666 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
667 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
675 if(s
->real_sprite_warping_points
==1){
676 gmc1_motion(s
, dest_y
, dest_cb
, dest_cr
,
679 gmc_motion(s
, dest_y
, dest_cb
, dest_cr
,
682 }else if(s
->quarter_sample
){
683 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
685 ref_picture
, pix_op
, qpix_op
,
686 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
687 }else if(ENABLE_WMV2
&& s
->mspel
){
688 ff_mspel_motion(s
, dest_y
, dest_cb
, dest_cr
,
690 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
693 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
696 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
702 if(s
->quarter_sample
){
704 motion_x
= s
->mv
[dir
][i
][0];
705 motion_y
= s
->mv
[dir
][i
][1];
707 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
708 src_x
= mb_x
* 16 + (motion_x
>> 2) + (i
& 1) * 8;
709 src_y
= mb_y
* 16 + (motion_y
>> 2) + (i
>>1) * 8;
711 /* WARNING: do no forget half pels */
712 src_x
= av_clip(src_x
, -16, s
->width
);
713 if (src_x
== s
->width
)
715 src_y
= av_clip(src_y
, -16, s
->height
);
716 if (src_y
== s
->height
)
719 ptr
= ref_picture
[0] + (src_y
* s
->linesize
) + (src_x
);
720 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
721 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 8
722 || (unsigned)src_y
> s
->v_edge_pos
- (motion_y
&3) - 8 ){
723 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->linesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
724 ptr
= s
->edge_emu_buffer
;
727 dest
= dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
;
728 qpix_op
[1][dxy
](dest
, ptr
, s
->linesize
);
730 mx
+= s
->mv
[dir
][i
][0]/2;
731 my
+= s
->mv
[dir
][i
][1]/2;
735 hpel_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
736 ref_picture
[0], 0, 0,
737 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
738 s
->width
, s
->height
, s
->linesize
,
739 s
->h_edge_pos
, s
->v_edge_pos
,
741 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
743 mx
+= s
->mv
[dir
][i
][0];
744 my
+= s
->mv
[dir
][i
][1];
748 if(!ENABLE_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
749 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
752 if (s
->picture_structure
== PICT_FRAME
) {
753 if(s
->quarter_sample
){
755 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
756 1, i
, s
->field_select
[dir
][i
],
757 ref_picture
, pix_op
, qpix_op
,
758 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1], 8);
762 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
763 1, 0, s
->field_select
[dir
][0],
765 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
767 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
768 1, 1, s
->field_select
[dir
][1],
770 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
773 if(s
->picture_structure
!= s
->field_select
[dir
][0] + 1 && s
->pict_type
!= B_TYPE
&& !s
->first_field
){
774 ref_picture
= s
->current_picture_ptr
->data
;
777 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
778 0, 0, s
->field_select
[dir
][0],
780 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
785 uint8_t ** ref2picture
;
787 if(s
->picture_structure
== s
->field_select
[dir
][i
] + 1 || s
->pict_type
== B_TYPE
|| s
->first_field
){
788 ref2picture
= ref_picture
;
790 ref2picture
= s
->current_picture_ptr
->data
;
793 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
794 0, 0, s
->field_select
[dir
][i
],
796 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1] + 16*i
, 8);
798 dest_y
+= 16*s
->linesize
;
799 dest_cb
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
800 dest_cr
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
804 if(s
->picture_structure
== PICT_FRAME
){
808 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
811 s
->mv
[dir
][2*i
+ j
][0], s
->mv
[dir
][2*i
+ j
][1], 8);
813 pix_op
= s
->dsp
.avg_pixels_tab
;
817 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
818 0, 0, s
->picture_structure
!= i
+1,
820 s
->mv
[dir
][2*i
][0],s
->mv
[dir
][2*i
][1],16);
822 // after put we make avg of the same block
823 pix_op
=s
->dsp
.avg_pixels_tab
;
825 //opposite parity is always in the same frame if this is second field
827 ref_picture
= s
->current_picture_ptr
->data
;
836 #endif /* FFMPEG_MPEGVIDEO_COMMON_H */