h264: on reference overflow, reset the reference count to 0, not 1.
[FFMpeg-mirror/mplayer-patches.git] / libavfilter / vf_select.c
blob674151d4ef73ecb13e7e30116f8a2bc8bdee9721
1 /*
2 * Copyright (c) 2011 Stefano Sabatini
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 /**
22 * @file
23 * filter for selecting which frame passes in the filterchain
26 #include "libavutil/eval.h"
27 #include "libavutil/fifo.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/mathematics.h"
30 #include "avfilter.h"
31 #include "internal.h"
32 #include "video.h"
34 static const char *const var_names[] = {
35 "E", ///< Euler number
36 "PHI", ///< golden ratio
37 "PI", ///< greek pi
39 "TB", ///< timebase
41 "pts", ///< original pts in the file of the frame
42 "start_pts", ///< first PTS in the stream, expressed in TB units
43 "prev_pts", ///< previous frame PTS
44 "prev_selected_pts", ///< previous selected frame PTS
46 "t", ///< first PTS in seconds
47 "start_t", ///< first PTS in the stream, expressed in seconds
48 "prev_t", ///< previous frame time
49 "prev_selected_t", ///< previously selected time
51 "pict_type", ///< the type of picture in the movie
52 "I",
53 "P",
54 "B",
55 "S",
56 "SI",
57 "SP",
58 "BI",
60 "interlace_type", ///< the frame interlace type
61 "PROGRESSIVE",
62 "TOPFIRST",
63 "BOTTOMFIRST",
65 "n", ///< frame number (starting from zero)
66 "selected_n", ///< selected frame number (starting from zero)
67 "prev_selected_n", ///< number of the last selected frame
69 "key", ///< tell if the frame is a key frame
70 "pos", ///< original position in the file of the frame
72 NULL
75 enum var_name {
76 VAR_E,
77 VAR_PHI,
78 VAR_PI,
80 VAR_TB,
82 VAR_PTS,
83 VAR_START_PTS,
84 VAR_PREV_PTS,
85 VAR_PREV_SELECTED_PTS,
87 VAR_T,
88 VAR_START_T,
89 VAR_PREV_T,
90 VAR_PREV_SELECTED_T,
92 VAR_PICT_TYPE,
93 VAR_PICT_TYPE_I,
94 VAR_PICT_TYPE_P,
95 VAR_PICT_TYPE_B,
96 VAR_PICT_TYPE_S,
97 VAR_PICT_TYPE_SI,
98 VAR_PICT_TYPE_SP,
99 VAR_PICT_TYPE_BI,
101 VAR_INTERLACE_TYPE,
102 VAR_INTERLACE_TYPE_P,
103 VAR_INTERLACE_TYPE_T,
104 VAR_INTERLACE_TYPE_B,
106 VAR_N,
107 VAR_SELECTED_N,
108 VAR_PREV_SELECTED_N,
110 VAR_KEY,
111 VAR_POS,
113 VAR_VARS_NB
116 #define FIFO_SIZE 8
118 typedef struct {
119 AVExpr *expr;
120 double var_values[VAR_VARS_NB];
121 double select;
122 int cache_frames;
123 AVFifoBuffer *pending_frames; ///< FIFO buffer of video frames
124 } SelectContext;
126 static av_cold int init(AVFilterContext *ctx, const char *args)
128 SelectContext *select = ctx->priv;
129 int ret;
131 if ((ret = av_expr_parse(&select->expr, args ? args : "1",
132 var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
133 av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", args);
134 return ret;
137 select->pending_frames = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
138 if (!select->pending_frames) {
139 av_log(ctx, AV_LOG_ERROR, "Failed to allocate pending frames buffer.\n");
140 return AVERROR(ENOMEM);
142 return 0;
145 #define INTERLACE_TYPE_P 0
146 #define INTERLACE_TYPE_T 1
147 #define INTERLACE_TYPE_B 2
149 static int config_input(AVFilterLink *inlink)
151 SelectContext *select = inlink->dst->priv;
153 select->var_values[VAR_E] = M_E;
154 select->var_values[VAR_PHI] = M_PHI;
155 select->var_values[VAR_PI] = M_PI;
157 select->var_values[VAR_N] = 0.0;
158 select->var_values[VAR_SELECTED_N] = 0.0;
160 select->var_values[VAR_TB] = av_q2d(inlink->time_base);
162 select->var_values[VAR_PREV_PTS] = NAN;
163 select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
164 select->var_values[VAR_PREV_SELECTED_T] = NAN;
165 select->var_values[VAR_START_PTS] = NAN;
166 select->var_values[VAR_START_T] = NAN;
168 select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
169 select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
170 select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
171 select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
172 select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
174 select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
175 select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
176 select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;;
178 return 0;
181 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
182 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
184 static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref)
186 SelectContext *select = ctx->priv;
187 AVFilterLink *inlink = ctx->inputs[0];
188 double res;
190 if (isnan(select->var_values[VAR_START_PTS]))
191 select->var_values[VAR_START_PTS] = TS2D(picref->pts);
192 if (isnan(select->var_values[VAR_START_T]))
193 select->var_values[VAR_START_T] = TS2D(picref->pts) * av_q2d(inlink->time_base);
195 select->var_values[VAR_PTS] = TS2D(picref->pts);
196 select->var_values[VAR_T ] = TS2D(picref->pts) * av_q2d(inlink->time_base);
197 select->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos;
198 select->var_values[VAR_PREV_PTS] = TS2D(picref ->pts);
200 select->var_values[VAR_INTERLACE_TYPE] =
201 !picref->video->interlaced ? INTERLACE_TYPE_P :
202 picref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
203 select->var_values[VAR_PICT_TYPE] = picref->video->pict_type;
205 res = av_expr_eval(select->expr, select->var_values, NULL);
206 av_log(inlink->dst, AV_LOG_DEBUG,
207 "n:%d pts:%d t:%f pos:%d interlace_type:%c key:%d pict_type:%c "
208 "-> select:%f\n",
209 (int)select->var_values[VAR_N],
210 (int)select->var_values[VAR_PTS],
211 select->var_values[VAR_T],
212 (int)select->var_values[VAR_POS],
213 select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' :
214 select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' :
215 select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
216 (int)select->var_values[VAR_KEY],
217 av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]),
218 res);
220 select->var_values[VAR_N] += 1.0;
222 if (res) {
223 select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
224 select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
225 select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
226 select->var_values[VAR_SELECTED_N] += 1.0;
228 return res;
231 static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
233 SelectContext *select = inlink->dst->priv;
235 select->select = select_frame(inlink->dst, frame);
236 if (select->select) {
237 /* frame was requested through poll_frame */
238 if (select->cache_frames) {
239 if (!av_fifo_space(select->pending_frames)) {
240 av_log(inlink->dst, AV_LOG_ERROR,
241 "Buffering limit reached, cannot cache more frames\n");
242 avfilter_unref_bufferp(&frame);
243 } else
244 av_fifo_generic_write(select->pending_frames, &frame,
245 sizeof(frame), NULL);
246 return 0;
248 return ff_filter_frame(inlink->dst->outputs[0], frame);
251 avfilter_unref_bufferp(&frame);
252 return 0;
255 static int request_frame(AVFilterLink *outlink)
257 AVFilterContext *ctx = outlink->src;
258 SelectContext *select = ctx->priv;
259 AVFilterLink *inlink = outlink->src->inputs[0];
260 select->select = 0;
262 if (av_fifo_size(select->pending_frames)) {
263 AVFilterBufferRef *picref;
265 av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL);
266 return ff_filter_frame(outlink, picref);
269 while (!select->select) {
270 int ret = ff_request_frame(inlink);
271 if (ret < 0)
272 return ret;
275 return 0;
278 static int poll_frame(AVFilterLink *outlink)
280 SelectContext *select = outlink->src->priv;
281 AVFilterLink *inlink = outlink->src->inputs[0];
282 int count, ret;
284 if (!av_fifo_size(select->pending_frames)) {
285 if ((count = ff_poll_frame(inlink)) <= 0)
286 return count;
287 /* request frame from input, and apply select condition to it */
288 select->cache_frames = 1;
289 while (count-- && av_fifo_space(select->pending_frames)) {
290 ret = ff_request_frame(inlink);
291 if (ret < 0)
292 break;
294 select->cache_frames = 0;
297 return av_fifo_size(select->pending_frames)/sizeof(AVFilterBufferRef *);
300 static av_cold void uninit(AVFilterContext *ctx)
302 SelectContext *select = ctx->priv;
303 AVFilterBufferRef *picref;
305 av_expr_free(select->expr);
306 select->expr = NULL;
308 while (select->pending_frames &&
309 av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL) == sizeof(picref))
310 avfilter_unref_buffer(picref);
311 av_fifo_free(select->pending_frames);
312 select->pending_frames = NULL;
315 static const AVFilterPad avfilter_vf_select_inputs[] = {
317 .name = "default",
318 .type = AVMEDIA_TYPE_VIDEO,
319 .get_video_buffer = ff_null_get_video_buffer,
320 .config_props = config_input,
321 .filter_frame = filter_frame,
323 { NULL }
326 static const AVFilterPad avfilter_vf_select_outputs[] = {
328 .name = "default",
329 .type = AVMEDIA_TYPE_VIDEO,
330 .poll_frame = poll_frame,
331 .request_frame = request_frame,
333 { NULL }
336 AVFilter avfilter_vf_select = {
337 .name = "select",
338 .description = NULL_IF_CONFIG_SMALL("Select frames to pass in output."),
339 .init = init,
340 .uninit = uninit,
342 .priv_size = sizeof(SelectContext),
344 .inputs = avfilter_vf_select_inputs,
345 .outputs = avfilter_vf_select_outputs,