2 * Copyright (C) 2006-2010 Michael Niedermayer <michaelni@gmx.at>
3 * 2010 James Darnley <james.darnley@gmail.com>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with Libav; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "libavutil/cpu.h"
23 #include "libavutil/common.h"
24 #include "libavutil/pixdesc.h"
34 #define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE
37 { int score = FFABS(cur[mrefs + off_left + (j)] - cur[prefs + off_left - (j)])\
38 + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\
39 + FFABS(cur[mrefs + off_right + (j)] - cur[prefs + off_right - (j)]);\
40 if (score < spatial_score) {\
41 spatial_score= score;\
42 spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\
44 #define FILTER(start, end) \
45 for (x = start; x < end; x++) { \
47 int d = (prev2[0] + next2[0])>>1; \
49 int temporal_diff0 = FFABS(prev2[0] - next2[0]); \
50 int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \
51 int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \
52 int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \
53 int spatial_pred = (c+e) >> 1; \
54 int off_right = (x < w - 1) ? 1 : -1;\
55 int off_left = x ? -1 : 1;\
56 int spatial_score = FFABS(cur[mrefs + off_left] - cur[prefs + off_left]) + FFABS(c-e) \
57 + FFABS(cur[mrefs + off_right] - cur[prefs + off_right]) - 1; \
59 if (x > 2 && x < w - 3) {\
60 CHECK(-1) CHECK(-2) }} }} \
61 CHECK( 1) CHECK( 2) }} }} \
65 int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \
66 int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \
67 int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \
68 int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \
70 diff = FFMAX3(diff, min, -max); \
73 if (spatial_pred > d + diff) \
74 spatial_pred = d + diff; \
75 else if (spatial_pred < d - diff) \
76 spatial_pred = d - diff; \
78 dst[0] = spatial_pred; \
88 static void filter_line_c(void *dst1
,
89 void *prev1
, void *cur1
, void *next1
,
90 int w
, int prefs
, int mrefs
, int parity
, int mode
)
93 uint8_t *prev
= prev1
;
95 uint8_t *next
= next1
;
97 uint8_t *prev2
= parity
? prev
: cur
;
98 uint8_t *next2
= parity
? cur
: next
;
103 static void filter_edges(void *dst1
, void *prev1
, void *cur1
, void *next1
,
104 int w
, int prefs
, int mrefs
, int parity
, int mode
,
108 uint8_t *prev
= prev1
;
110 uint8_t *next
= next1
;
112 uint8_t *prev2
= parity
? prev
: cur
;
113 uint8_t *next2
= parity
? cur
: next
;
117 dst
= (uint8_t*)dst1
+ w
- 3;
118 prev
= (uint8_t*)prev1
+ w
- 3;
119 cur
= (uint8_t*)cur1
+ w
- 3;
120 next
= (uint8_t*)next1
+ w
- 3;
121 prev2
= (uint8_t*)(parity
? prev
: cur
);
122 next2
= (uint8_t*)(parity
? cur
: next
);
128 static void filter_line_c_16bit(void *dst1
,
129 void *prev1
, void *cur1
, void *next1
,
130 int w
, int prefs
, int mrefs
, int parity
,
133 uint16_t *dst
= dst1
;
134 uint16_t *prev
= prev1
;
135 uint16_t *cur
= cur1
;
136 uint16_t *next
= next1
;
138 uint16_t *prev2
= parity
? prev
: cur
;
139 uint16_t *next2
= parity
? cur
: next
;
146 static void filter_edges_16bit(void *dst1
, void *prev1
, void *cur1
, void *next1
,
147 int w
, int prefs
, int mrefs
, int parity
, int mode
,
150 uint16_t *dst
= dst1
;
151 uint16_t *prev
= prev1
;
152 uint16_t *cur
= cur1
;
153 uint16_t *next
= next1
;
155 uint16_t *prev2
= parity
? prev
: cur
;
156 uint16_t *next2
= parity
? cur
: next
;
160 dst
= (uint16_t*)dst1
+ w
- 3;
161 prev
= (uint16_t*)prev1
+ w
- 3;
162 cur
= (uint16_t*)cur1
+ w
- 3;
163 next
= (uint16_t*)next1
+ w
- 3;
164 prev2
= (uint16_t*)(parity
? prev
: cur
);
165 next2
= (uint16_t*)(parity
? cur
: next
);
170 static void filter(AVFilterContext
*ctx
, AVFilterBufferRef
*dstpic
,
173 YADIFContext
*yadif
= ctx
->priv
;
176 for (i
= 0; i
< yadif
->csp
->nb_components
; i
++) {
177 int w
= dstpic
->video
->w
;
178 int h
= dstpic
->video
->h
;
179 int refs
= yadif
->cur
->linesize
[i
];
180 int df
= (yadif
->csp
->comp
[i
].depth_minus1
+ 8) / 8;
181 int l_edge
, l_edge_pix
;
183 if (i
== 1 || i
== 2) {
184 /* Why is this not part of the per-plane description thing? */
185 w
>>= yadif
->csp
->log2_chroma_w
;
186 h
>>= yadif
->csp
->log2_chroma_h
;
189 /* filtering reads 3 pixels to the left/right; to avoid invalid reads,
190 * we need to call the c variant which avoids this for border pixels
192 l_edge
= yadif
->req_align
;
193 l_edge_pix
= l_edge
/ df
;
195 for (y
= 0; y
< h
; y
++) {
196 if ((y
^ parity
) & 1) {
197 uint8_t *prev
= &yadif
->prev
->data
[i
][y
* refs
];
198 uint8_t *cur
= &yadif
->cur
->data
[i
][y
* refs
];
199 uint8_t *next
= &yadif
->next
->data
[i
][y
* refs
];
200 uint8_t *dst
= &dstpic
->data
[i
][y
* dstpic
->linesize
[i
]];
201 int mode
= y
== 1 || y
+ 2 == h
? 2 : yadif
->mode
;
202 if (yadif
->req_align
) {
203 yadif
->filter_line(dst
+ l_edge
, prev
+ l_edge
, cur
+ l_edge
,
204 next
+ l_edge
, w
- l_edge_pix
- 3,
205 y
+ 1 < h
? refs
: -refs
,
208 yadif
->filter_edges(dst
, prev
, cur
, next
, w
,
209 y
+ 1 < h
? refs
: -refs
,
211 parity
^ tff
, mode
, l_edge_pix
);
213 yadif
->filter_line(dst
, prev
, cur
, next
+ l_edge
, w
,
214 y
+ 1 < h
? refs
: -refs
,
219 memcpy(&dstpic
->data
[i
][y
* dstpic
->linesize
[i
]],
220 &yadif
->cur
->data
[i
][y
* refs
], w
* df
);
228 static AVFilterBufferRef
*get_video_buffer(AVFilterLink
*link
, int perms
,
231 AVFilterBufferRef
*picref
;
232 int width
= FFALIGN(w
, 32);
233 int height
= FFALIGN(h
+ 2, 32);
236 picref
= ff_default_get_video_buffer(link
, perms
, width
, height
);
238 picref
->video
->w
= w
;
239 picref
->video
->h
= h
;
241 for (i
= 0; i
< 3; i
++)
242 picref
->data
[i
] += picref
->linesize
[i
];
247 static int return_frame(AVFilterContext
*ctx
, int is_second
)
249 YADIFContext
*yadif
= ctx
->priv
;
250 AVFilterLink
*link
= ctx
->outputs
[0];
253 if (yadif
->parity
== -1) {
254 tff
= yadif
->cur
->video
->interlaced
?
255 yadif
->cur
->video
->top_field_first
: 1;
257 tff
= yadif
->parity
^ 1;
261 yadif
->out
= ff_get_video_buffer(link
, PERM_RWP
, link
->w
, link
->h
);
263 return AVERROR(ENOMEM
);
265 avfilter_copy_buffer_ref_props(yadif
->out
, yadif
->cur
);
266 yadif
->out
->video
->interlaced
= 0;
269 filter(ctx
, yadif
->out
, tff
^ !is_second
, tff
);
272 int64_t cur_pts
= yadif
->cur
->pts
;
273 int64_t next_pts
= yadif
->next
->pts
;
275 if (next_pts
!= AV_NOPTS_VALUE
&& cur_pts
!= AV_NOPTS_VALUE
) {
276 yadif
->out
->pts
= cur_pts
+ next_pts
;
278 yadif
->out
->pts
= AV_NOPTS_VALUE
;
281 ret
= ff_filter_frame(ctx
->outputs
[0], yadif
->out
);
283 yadif
->frame_pending
= (yadif
->mode
&1) && !is_second
;
287 static int filter_frame(AVFilterLink
*link
, AVFilterBufferRef
*picref
)
289 AVFilterContext
*ctx
= link
->dst
;
290 YADIFContext
*yadif
= ctx
->priv
;
292 if (yadif
->frame_pending
)
293 return_frame(ctx
, 1);
296 avfilter_unref_buffer(yadif
->prev
);
297 yadif
->prev
= yadif
->cur
;
298 yadif
->cur
= yadif
->next
;
299 yadif
->next
= picref
;
304 if (yadif
->auto_enable
&& !yadif
->cur
->video
->interlaced
) {
305 yadif
->out
= avfilter_ref_buffer(yadif
->cur
, AV_PERM_READ
);
307 return AVERROR(ENOMEM
);
309 avfilter_unref_bufferp(&yadif
->prev
);
310 if (yadif
->out
->pts
!= AV_NOPTS_VALUE
)
311 yadif
->out
->pts
*= 2;
312 return ff_filter_frame(ctx
->outputs
[0], yadif
->out
);
316 !(yadif
->prev
= avfilter_ref_buffer(yadif
->cur
, AV_PERM_READ
)))
317 return AVERROR(ENOMEM
);
319 yadif
->out
= ff_get_video_buffer(ctx
->outputs
[0], PERM_RWP
,
322 return AVERROR(ENOMEM
);
324 avfilter_copy_buffer_ref_props(yadif
->out
, yadif
->cur
);
325 yadif
->out
->video
->interlaced
= 0;
327 if (yadif
->out
->pts
!= AV_NOPTS_VALUE
)
328 yadif
->out
->pts
*= 2;
330 return return_frame(ctx
, 0);
333 static int request_frame(AVFilterLink
*link
)
335 AVFilterContext
*ctx
= link
->src
;
336 YADIFContext
*yadif
= ctx
->priv
;
338 if (yadif
->frame_pending
) {
339 return_frame(ctx
, 1);
349 ret
= ff_request_frame(link
->src
->inputs
[0]);
351 if (ret
== AVERROR_EOF
&& yadif
->next
) {
352 AVFilterBufferRef
*next
=
353 avfilter_ref_buffer(yadif
->next
, AV_PERM_READ
);
356 return AVERROR(ENOMEM
);
358 next
->pts
= yadif
->next
->pts
* 2 - yadif
->cur
->pts
;
360 filter_frame(link
->src
->inputs
[0], next
);
362 } else if (ret
< 0) {
365 } while (!yadif
->cur
);
370 static int poll_frame(AVFilterLink
*link
)
372 YADIFContext
*yadif
= link
->src
->priv
;
375 if (yadif
->frame_pending
)
378 val
= ff_poll_frame(link
->src
->inputs
[0]);
382 //FIXME change API to not requre this red tape
383 if (val
== 1 && !yadif
->next
) {
384 if ((ret
= ff_request_frame(link
->src
->inputs
[0])) < 0)
386 val
= ff_poll_frame(link
->src
->inputs
[0]);
390 assert(yadif
->next
|| !val
);
392 if (yadif
->auto_enable
&& yadif
->next
&& !yadif
->next
->video
->interlaced
)
395 return val
* ((yadif
->mode
&1)+1);
398 static av_cold
void uninit(AVFilterContext
*ctx
)
400 YADIFContext
*yadif
= ctx
->priv
;
402 if (yadif
->prev
) avfilter_unref_bufferp(&yadif
->prev
);
403 if (yadif
->cur
) avfilter_unref_bufferp(&yadif
->cur
);
404 if (yadif
->next
) avfilter_unref_bufferp(&yadif
->next
);
407 static int query_formats(AVFilterContext
*ctx
)
409 static const enum AVPixelFormat pix_fmts
[] = {
419 AV_NE( AV_PIX_FMT_GRAY16BE
, AV_PIX_FMT_GRAY16LE
),
422 AV_NE( AV_PIX_FMT_YUV420P10BE
, AV_PIX_FMT_YUV420P10LE
),
423 AV_NE( AV_PIX_FMT_YUV422P10BE
, AV_PIX_FMT_YUV422P10LE
),
424 AV_NE( AV_PIX_FMT_YUV444P10BE
, AV_PIX_FMT_YUV444P10LE
),
425 AV_NE( AV_PIX_FMT_YUV420P16BE
, AV_PIX_FMT_YUV420P16LE
),
426 AV_NE( AV_PIX_FMT_YUV422P16BE
, AV_PIX_FMT_YUV422P16LE
),
427 AV_NE( AV_PIX_FMT_YUV444P16BE
, AV_PIX_FMT_YUV444P16LE
),
432 ff_set_common_formats(ctx
, ff_make_format_list(pix_fmts
));
437 static av_cold
int init(AVFilterContext
*ctx
, const char *args
)
439 YADIFContext
*yadif
= ctx
->priv
;
443 yadif
->auto_enable
= 0;
446 sscanf(args
, "%d:%d:%d",
447 &yadif
->mode
, &yadif
->parity
, &yadif
->auto_enable
);
449 av_log(ctx
, AV_LOG_VERBOSE
, "mode:%d parity:%d auto_enable:%d\n",
450 yadif
->mode
, yadif
->parity
, yadif
->auto_enable
);
455 static int config_props(AVFilterLink
*link
)
457 YADIFContext
*s
= link
->src
->priv
;
459 link
->time_base
.num
= link
->src
->inputs
[0]->time_base
.num
;
460 link
->time_base
.den
= link
->src
->inputs
[0]->time_base
.den
* 2;
461 link
->w
= link
->src
->inputs
[0]->w
;
462 link
->h
= link
->src
->inputs
[0]->h
;
464 s
->csp
= av_pix_fmt_desc_get(link
->format
);
465 if (s
->csp
->comp
[0].depth_minus1
/ 8 == 1) {
466 s
->filter_line
= filter_line_c_16bit
;
467 s
->filter_edges
= filter_edges_16bit
;
469 s
->filter_line
= filter_line_c
;
470 s
->filter_edges
= filter_edges
;
473 ff_yadif_init_x86(s
);
479 static const AVFilterPad avfilter_vf_yadif_inputs
[] = {
482 .type
= AVMEDIA_TYPE_VIDEO
,
483 .get_video_buffer
= get_video_buffer
,
484 .filter_frame
= filter_frame
,
489 static const AVFilterPad avfilter_vf_yadif_outputs
[] = {
492 .type
= AVMEDIA_TYPE_VIDEO
,
493 .poll_frame
= poll_frame
,
494 .request_frame
= request_frame
,
495 .config_props
= config_props
,
500 AVFilter avfilter_vf_yadif
= {
502 .description
= NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
504 .priv_size
= sizeof(YADIFContext
),
507 .query_formats
= query_formats
,
509 .inputs
= avfilter_vf_yadif_inputs
,
511 .outputs
= avfilter_vf_yadif_outputs
,