ao_sun: Add support for AOPLAY_FINAL_CHUNK
[mplayer.git] / libmpcodecs / vf_mcdeint.c
blob1d92697955b24e80a823f40bc24ad67a2b224799
1 /*
2 * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
4 * This file is part of MPlayer.
6 * MPlayer is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * MPlayer is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 Known Issues:
24 * The motion estimation is somewhat at the mercy of the input, if the input
25 frames are created purely based on spatial interpolation then for example
26 a thin black line or another random and not interpolateable pattern
27 will cause problems
28 Note: completly ignoring the "unavailable" lines during motion estimation
29 didnt look any better, so the most obvious solution would be to improve
30 tfields or penalize problematic motion vectors ...
32 * If non iterative ME is used then snow currently ignores the OBMC window
33 and as a result sometimes creates artifacts
35 * only past frames are used, we should ideally use future frames too, something
36 like filtering the whole movie in forward and then backward direction seems
37 like a interresting idea but the current filter framework is FAR from
38 supporting such things
40 * combining the motion compensated image with the input image also isnt
41 as trivial as it seems, simple blindly taking even lines from one and
42 odd ones from the other doesnt work at all as ME/MC sometimes simple
43 has nothing in the previous frames which matches the current, the current
44 algo has been found by trial and error and almost certainly can be
45 improved ...
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <inttypes.h>
52 #include <math.h>
54 #include "mp_msg.h"
55 #include "cpudetect.h"
57 #include "libavutil/intreadwrite.h"
58 #include "libavcodec/avcodec.h"
59 #include "libavcodec/dsputil.h"
61 #include "img_format.h"
62 #include "mp_image.h"
63 #include "vf.h"
64 #include "vd_ffmpeg.h"
66 #define MIN(a,b) ((a) > (b) ? (b) : (a))
67 #define MAX(a,b) ((a) < (b) ? (b) : (a))
68 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
70 //===========================================================================//
72 struct vf_priv_s {
73 int mode;
74 int qp;
75 int parity;
76 #if 0
77 int temp_stride[3];
78 uint8_t *src[3];
79 int16_t *temp[3];
80 #endif
81 int outbuf_size;
82 uint8_t *outbuf;
83 AVCodecContext *avctx_enc;
84 AVFrame *frame;
85 AVFrame *frame_dec;
88 static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
89 int x, y, i;
90 int out_size;
92 for(i=0; i<3; i++){
93 p->frame->data[i]= src[i];
94 p->frame->linesize[i]= src_stride[i];
97 p->avctx_enc->me_cmp=
98 p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
99 p->frame->quality= p->qp*FF_QP2LAMBDA;
100 out_size = avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
101 p->frame_dec = p->avctx_enc->coded_frame;
103 for(i=0; i<3; i++){
104 int is_chroma= !!i;
105 int w= width >>is_chroma;
106 int h= height>>is_chroma;
107 int fils= p->frame_dec->linesize[i];
108 int srcs= src_stride[i];
110 for(y=0; y<h; y++){
111 if((y ^ p->parity) & 1){
112 for(x=0; x<w; x++){
113 if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
114 uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
115 uint8_t *srcp= &src[i][x + y*srcs];
116 int diff0= filp[-fils] - srcp[-srcs];
117 int diff1= filp[+fils] - srcp[+srcs];
118 int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
119 +ABS(srcp[-srcs ] - srcp[+srcs ])
120 +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
121 int temp= filp[0];
123 #define CHECK(j)\
124 { int score= ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
125 + ABS(srcp[-srcs +j] - srcp[+srcs -j])\
126 + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
127 if(score < spatial_score){\
128 spatial_score= score;\
129 diff0= filp[-fils+j] - srcp[-srcs+j];\
130 diff1= filp[+fils-j] - srcp[+srcs-j];
132 CHECK(-1) CHECK(-2) }} }}
133 CHECK( 1) CHECK( 2) }} }}
134 #if 0
135 if((diff0 ^ diff1) > 0){
136 int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
137 temp-= mindiff;
139 #elif 1
140 if(diff0 + diff1 > 0)
141 temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
142 else
143 temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
144 #else
145 temp-= (diff0 + diff1)/2;
146 #endif
147 #if 1
148 filp[0]=
149 dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
150 #else
151 dst[i][x + y*dst_stride[i]]= filp[0];
152 filp[0]= temp > 255U ? ~(temp>>31) : temp;
153 #endif
154 }else
155 dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
159 for(y=0; y<h; y++){
160 if(!((y ^ p->parity) & 1)){
161 for(x=0; x<w; x++){
162 #if 1
163 p->frame_dec->data[i][x + y*fils]=
164 dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
165 #else
166 dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
167 p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
168 #endif
173 p->parity ^= 1;
177 static int config(struct vf_instance *vf,
178 int width, int height, int d_width, int d_height,
179 unsigned int flags, unsigned int outfmt){
180 int i;
181 AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
183 for(i=0; i<3; i++){
184 AVCodecContext *avctx_enc;
185 #if 0
186 int is_chroma= !!i;
187 int w= ((width + 31) & (~31))>>is_chroma;
188 int h= ((height + 31) & (~31))>>is_chroma;
190 vf->priv->temp_stride[i]= w;
191 vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
192 vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
193 #endif
194 avctx_enc=
195 vf->priv->avctx_enc= avcodec_alloc_context();
196 avctx_enc->width = width;
197 avctx_enc->height = height;
198 avctx_enc->time_base= (AVRational){1,25}; // meaningless
199 avctx_enc->gop_size = 300;
200 avctx_enc->max_b_frames= 0;
201 avctx_enc->pix_fmt = PIX_FMT_YUV420P;
202 avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
203 avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
204 avctx_enc->global_quality= 1;
205 avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
206 avctx_enc->me_cmp=
207 avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
208 avctx_enc->mb_cmp= FF_CMP_SSE;
210 switch(vf->priv->mode){
211 case 3:
212 avctx_enc->refs= 3;
213 case 2:
214 avctx_enc->me_method= ME_ITER;
215 case 1:
216 avctx_enc->flags |= CODEC_FLAG_4MV;
217 avctx_enc->dia_size=2;
218 // avctx_enc->mb_decision = MB_DECISION_RD;
219 case 0:
220 avctx_enc->flags |= CODEC_FLAG_QPEL;
223 avcodec_open(avctx_enc, enc);
226 vf->priv->frame= avcodec_alloc_frame();
228 vf->priv->outbuf_size= width*height*10;
229 vf->priv->outbuf= malloc(vf->priv->outbuf_size);
231 return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
234 static void get_image(struct vf_instance *vf, mp_image_t *mpi){
235 if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
236 return; //caused problems, dunno why
237 // ok, we can do pp in-place (or pp disabled):
238 vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
239 mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
240 mpi->planes[0]=vf->dmpi->planes[0];
241 mpi->stride[0]=vf->dmpi->stride[0];
242 mpi->width=vf->dmpi->width;
243 if(mpi->flags&MP_IMGFLAG_PLANAR){
244 mpi->planes[1]=vf->dmpi->planes[1];
245 mpi->planes[2]=vf->dmpi->planes[2];
246 mpi->stride[1]=vf->dmpi->stride[1];
247 mpi->stride[2]=vf->dmpi->stride[2];
249 mpi->flags|=MP_IMGFLAG_DIRECT;
252 static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
253 mp_image_t *dmpi;
255 if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
256 // no DR, so get a new image! hope we'll get DR buffer:
257 dmpi=vf_get_image(vf->next,mpi->imgfmt,
258 MP_IMGTYPE_TEMP,
259 MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
260 mpi->width,mpi->height);
261 vf_clone_mpi_attributes(dmpi, mpi);
262 }else{
263 dmpi=vf->dmpi;
266 filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
268 return vf_next_put_image(vf,dmpi, pts);
271 static void uninit(struct vf_instance *vf){
272 if(!vf->priv) return;
274 #if 0
275 for(i=0; i<3; i++){
276 if(vf->priv->temp[i]) free(vf->priv->temp[i]);
277 vf->priv->temp[i]= NULL;
278 if(vf->priv->src[i]) free(vf->priv->src[i]);
279 vf->priv->src[i]= NULL;
281 #endif
282 if (vf->priv->avctx_enc) {
283 avcodec_close(vf->priv->avctx_enc);
284 av_freep(&vf->priv->avctx_enc);
287 free(vf->priv->outbuf);
288 free(vf->priv);
289 vf->priv=NULL;
292 //===========================================================================//
293 static int query_format(struct vf_instance *vf, unsigned int fmt){
294 switch(fmt){
295 case IMGFMT_YV12:
296 case IMGFMT_I420:
297 case IMGFMT_IYUV:
298 case IMGFMT_Y800:
299 case IMGFMT_Y8:
300 return vf_next_query_format(vf,fmt);
302 return 0;
305 static int vf_open(vf_instance_t *vf, char *args){
307 vf->config=config;
308 vf->put_image=put_image;
309 vf->get_image=get_image;
310 vf->query_format=query_format;
311 vf->uninit=uninit;
312 vf->priv=malloc(sizeof(struct vf_priv_s));
313 memset(vf->priv, 0, sizeof(struct vf_priv_s));
315 init_avcodec();
317 vf->priv->mode=0;
318 vf->priv->parity= -1;
319 vf->priv->qp=1;
321 if (args) sscanf(args, "%d:%d:%d", &vf->priv->mode, &vf->priv->parity, &vf->priv->qp);
323 return 1;
326 const vf_info_t vf_info_mcdeint = {
327 "motion compensating deinterlacer",
328 "mcdeint",
329 "Michael Niedermayer",
331 vf_open,
332 NULL