Simplify rms(): merge a few operations in the same statement
[ffmpeg-lucabe.git] / ffplay.c
blob33ee8060092e52b0d81aeb18ee4c4467324a6aa7
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
31 #include "cmdutils.h"
33 #include <SDL.h>
34 #include <SDL_thread.h>
36 #ifdef __MINGW32__
37 #undef main /* We don't want SDL to override our main() */
38 #endif
40 #undef exit
42 const char program_name[] = "FFplay";
43 const int program_birth_year = 2003;
45 //#define DEBUG_SYNC
47 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51 /* SDL audio buffer size, in samples. Should be small to have precise
52 A/V sync as SDL does not have hardware buffer fullness info. */
53 #define SDL_AUDIO_BUFFER_SIZE 1024
55 /* no AV sync correction is done if below the AV sync threshold */
56 #define AV_SYNC_THRESHOLD 0.01
57 /* no AV correction is done if too big error */
58 #define AV_NOSYNC_THRESHOLD 10.0
60 /* maximum audio speed change to get correct sync */
61 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64 #define AUDIO_DIFF_AVG_NB 20
66 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67 #define SAMPLE_ARRAY_SIZE (2*65536)
69 static int sws_flags = SWS_BICUBIC;
71 typedef struct PacketQueue {
72 AVPacketList *first_pkt, *last_pkt;
73 int nb_packets;
74 int size;
75 int abort_request;
76 SDL_mutex *mutex;
77 SDL_cond *cond;
78 } PacketQueue;
80 #define VIDEO_PICTURE_QUEUE_SIZE 1
81 #define SUBPICTURE_QUEUE_SIZE 4
83 typedef struct VideoPicture {
84 double pts; ///<presentation time stamp for this picture
85 SDL_Overlay *bmp;
86 int width, height; /* source height & width */
87 int allocated;
88 } VideoPicture;
90 typedef struct SubPicture {
91 double pts; /* presentation time stamp for this picture */
92 AVSubtitle sub;
93 } SubPicture;
95 enum {
96 AV_SYNC_AUDIO_MASTER, /* default choice */
97 AV_SYNC_VIDEO_MASTER,
98 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101 typedef struct VideoState {
102 SDL_Thread *parse_tid;
103 SDL_Thread *video_tid;
104 AVInputFormat *iformat;
105 int no_background;
106 int abort_request;
107 int paused;
108 int last_paused;
109 int seek_req;
110 int seek_flags;
111 int64_t seek_pos;
112 AVFormatContext *ic;
113 int dtg_active_format;
115 int audio_stream;
117 int av_sync_type;
118 double external_clock; /* external clock base */
119 int64_t external_clock_time;
121 double audio_clock;
122 double audio_diff_cum; /* used for AV difference average computation */
123 double audio_diff_avg_coef;
124 double audio_diff_threshold;
125 int audio_diff_avg_count;
126 AVStream *audio_st;
127 PacketQueue audioq;
128 int audio_hw_buf_size;
129 /* samples output by the codec. we reserve more space for avsync
130 compensation */
131 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 uint8_t *audio_buf;
134 unsigned int audio_buf_size; /* in bytes */
135 int audio_buf_index; /* in bytes */
136 AVPacket audio_pkt;
137 uint8_t *audio_pkt_data;
138 int audio_pkt_size;
139 enum SampleFormat audio_src_fmt;
140 AVAudioConvert *reformat_ctx;
142 int show_audio; /* if true, display audio samples */
143 int16_t sample_array[SAMPLE_ARRAY_SIZE];
144 int sample_array_index;
145 int last_i_start;
147 SDL_Thread *subtitle_tid;
148 int subtitle_stream;
149 int subtitle_stream_changed;
150 AVStream *subtitle_st;
151 PacketQueue subtitleq;
152 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
153 int subpq_size, subpq_rindex, subpq_windex;
154 SDL_mutex *subpq_mutex;
155 SDL_cond *subpq_cond;
157 double frame_timer;
158 double frame_last_pts;
159 double frame_last_delay;
160 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
161 int video_stream;
162 AVStream *video_st;
163 PacketQueue videoq;
164 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
165 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
166 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
167 int pictq_size, pictq_rindex, pictq_windex;
168 SDL_mutex *pictq_mutex;
169 SDL_cond *pictq_cond;
171 // QETimer *video_timer;
172 char filename[1024];
173 int width, height, xleft, ytop;
174 } VideoState;
176 static void show_help(void);
177 static int audio_write_get_buf_size(VideoState *is);
179 /* options specified by the user */
180 static AVInputFormat *file_iformat;
181 static const char *input_filename;
182 static int fs_screen_width;
183 static int fs_screen_height;
184 static int screen_width = 0;
185 static int screen_height = 0;
186 static int frame_width = 0;
187 static int frame_height = 0;
188 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
189 static int audio_disable;
190 static int video_disable;
191 static int wanted_audio_stream= 0;
192 static int wanted_video_stream= 0;
193 static int seek_by_bytes;
194 static int display_disable;
195 static int show_status;
196 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
197 static int64_t start_time = AV_NOPTS_VALUE;
198 static int debug = 0;
199 static int debug_mv = 0;
200 static int step = 0;
201 static int thread_count = 1;
202 static int workaround_bugs = 1;
203 static int fast = 0;
204 static int genpts = 0;
205 static int lowres = 0;
206 static int idct = FF_IDCT_AUTO;
207 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
208 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
210 static int error_resilience = FF_ER_CAREFUL;
211 static int error_concealment = 3;
212 static int decoder_reorder_pts= 0;
214 /* current context */
215 static int is_full_screen;
216 static VideoState *cur_stream;
217 static int64_t audio_callback_time;
219 AVPacket flush_pkt;
221 #define FF_ALLOC_EVENT (SDL_USEREVENT)
222 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
223 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225 SDL_Surface *screen;
227 /* packet queue handling */
228 static void packet_queue_init(PacketQueue *q)
230 memset(q, 0, sizeof(PacketQueue));
231 q->mutex = SDL_CreateMutex();
232 q->cond = SDL_CreateCond();
235 static void packet_queue_flush(PacketQueue *q)
237 AVPacketList *pkt, *pkt1;
239 SDL_LockMutex(q->mutex);
240 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
241 pkt1 = pkt->next;
242 av_free_packet(&pkt->pkt);
243 av_freep(&pkt);
245 q->last_pkt = NULL;
246 q->first_pkt = NULL;
247 q->nb_packets = 0;
248 q->size = 0;
249 SDL_UnlockMutex(q->mutex);
252 static void packet_queue_end(PacketQueue *q)
254 packet_queue_flush(q);
255 SDL_DestroyMutex(q->mutex);
256 SDL_DestroyCond(q->cond);
259 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 AVPacketList *pkt1;
263 /* duplicate the packet */
264 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
265 return -1;
267 pkt1 = av_malloc(sizeof(AVPacketList));
268 if (!pkt1)
269 return -1;
270 pkt1->pkt = *pkt;
271 pkt1->next = NULL;
274 SDL_LockMutex(q->mutex);
276 if (!q->last_pkt)
278 q->first_pkt = pkt1;
279 else
280 q->last_pkt->next = pkt1;
281 q->last_pkt = pkt1;
282 q->nb_packets++;
283 q->size += pkt1->pkt.size;
284 /* XXX: should duplicate packet data in DV case */
285 SDL_CondSignal(q->cond);
287 SDL_UnlockMutex(q->mutex);
288 return 0;
291 static void packet_queue_abort(PacketQueue *q)
293 SDL_LockMutex(q->mutex);
295 q->abort_request = 1;
297 SDL_CondSignal(q->cond);
299 SDL_UnlockMutex(q->mutex);
302 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
303 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 AVPacketList *pkt1;
306 int ret;
308 SDL_LockMutex(q->mutex);
310 for(;;) {
311 if (q->abort_request) {
312 ret = -1;
313 break;
316 pkt1 = q->first_pkt;
317 if (pkt1) {
318 q->first_pkt = pkt1->next;
319 if (!q->first_pkt)
320 q->last_pkt = NULL;
321 q->nb_packets--;
322 q->size -= pkt1->pkt.size;
323 *pkt = pkt1->pkt;
324 av_free(pkt1);
325 ret = 1;
326 break;
327 } else if (!block) {
328 ret = 0;
329 break;
330 } else {
331 SDL_CondWait(q->cond, q->mutex);
334 SDL_UnlockMutex(q->mutex);
335 return ret;
338 static inline void fill_rectangle(SDL_Surface *screen,
339 int x, int y, int w, int h, int color)
341 SDL_Rect rect;
342 rect.x = x;
343 rect.y = y;
344 rect.w = w;
345 rect.h = h;
346 SDL_FillRect(screen, &rect, color);
349 #if 0
350 /* draw only the border of a rectangle */
351 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 int w1, w2, h1, h2;
355 /* fill the background */
356 w1 = x;
357 if (w1 < 0)
358 w1 = 0;
359 w2 = s->width - (x + w);
360 if (w2 < 0)
361 w2 = 0;
362 h1 = y;
363 if (h1 < 0)
364 h1 = 0;
365 h2 = s->height - (y + h);
366 if (h2 < 0)
367 h2 = 0;
368 fill_rectangle(screen,
369 s->xleft, s->ytop,
370 w1, s->height,
371 color);
372 fill_rectangle(screen,
373 s->xleft + s->width - w2, s->ytop,
374 w2, s->height,
375 color);
376 fill_rectangle(screen,
377 s->xleft + w1, s->ytop,
378 s->width - w1 - w2, h1,
379 color);
380 fill_rectangle(screen,
381 s->xleft + w1, s->ytop + s->height - h2,
382 s->width - w1 - w2, h2,
383 color);
385 #endif
389 #define SCALEBITS 10
390 #define ONE_HALF (1 << (SCALEBITS - 1))
391 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393 #define RGB_TO_Y_CCIR(r, g, b) \
394 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
395 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
398 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
399 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
402 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
403 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408 #define RGBA_IN(r, g, b, a, s)\
410 unsigned int v = ((const uint32_t *)(s))[0];\
411 a = (v >> 24) & 0xff;\
412 r = (v >> 16) & 0xff;\
413 g = (v >> 8) & 0xff;\
414 b = v & 0xff;\
417 #define YUVA_IN(y, u, v, a, s, pal)\
419 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420 a = (val >> 24) & 0xff;\
421 y = (val >> 16) & 0xff;\
422 u = (val >> 8) & 0xff;\
423 v = val & 0xff;\
426 #define YUVA_OUT(d, y, u, v, a)\
428 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 #define BPP 1
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 int wrap, wrap3, width2, skip2;
437 int y, u, v, a, u1, v1, a1, w, h;
438 uint8_t *lum, *cb, *cr;
439 const uint8_t *p;
440 const uint32_t *pal;
441 int dstx, dsty, dstw, dsth;
443 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
444 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
445 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
446 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
447 lum = dst->data[0] + dsty * dst->linesize[0];
448 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451 width2 = (dstw + 1) >> 1;
452 skip2 = dstx >> 1;
453 wrap = dst->linesize[0];
454 wrap3 = rect->linesize;
455 p = rect->bitmap;
456 pal = rect->rgba_palette; /* Now in YCrCb! */
458 if (dsty & 1) {
459 lum += dstx;
460 cb += skip2;
461 cr += skip2;
463 if (dstx & 1) {
464 YUVA_IN(y, u, v, a, p, pal);
465 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468 cb++;
469 cr++;
470 lum++;
471 p += BPP;
473 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474 YUVA_IN(y, u, v, a, p, pal);
475 u1 = u;
476 v1 = v;
477 a1 = a;
478 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480 YUVA_IN(y, u, v, a, p + BPP, pal);
481 u1 += u;
482 v1 += v;
483 a1 += a;
484 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487 cb++;
488 cr++;
489 p += 2 * BPP;
490 lum += 2;
492 if (w) {
493 YUVA_IN(y, u, v, a, p, pal);
494 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498 p += wrap3 + (wrap3 - dstw * BPP);
499 lum += wrap + (wrap - dstw - dstx);
500 cb += dst->linesize[1] - width2 - skip2;
501 cr += dst->linesize[2] - width2 - skip2;
503 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504 lum += dstx;
505 cb += skip2;
506 cr += skip2;
508 if (dstx & 1) {
509 YUVA_IN(y, u, v, a, p, pal);
510 u1 = u;
511 v1 = v;
512 a1 = a;
513 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514 p += wrap3;
515 lum += wrap;
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 += u;
518 v1 += v;
519 a1 += a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523 cb++;
524 cr++;
525 p += -wrap3 + BPP;
526 lum += -wrap + 1;
528 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529 YUVA_IN(y, u, v, a, p, pal);
530 u1 = u;
531 v1 = v;
532 a1 = a;
533 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540 p += wrap3;
541 lum += wrap;
543 YUVA_IN(y, u, v, a, p, pal);
544 u1 += u;
545 v1 += v;
546 a1 += a;
547 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549 YUVA_IN(y, u, v, a, p, pal);
550 u1 += u;
551 v1 += v;
552 a1 += a;
553 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558 cb++;
559 cr++;
560 p += -wrap3 + 2 * BPP;
561 lum += -wrap + 2;
563 if (w) {
564 YUVA_IN(y, u, v, a, p, pal);
565 u1 = u;
566 v1 = v;
567 a1 = a;
568 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569 p += wrap3;
570 lum += wrap;
571 YUVA_IN(y, u, v, a, p, pal);
572 u1 += u;
573 v1 += v;
574 a1 += a;
575 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578 cb++;
579 cr++;
580 p += -wrap3 + BPP;
581 lum += -wrap + 1;
583 p += wrap3 + (wrap3 - dstw * BPP);
584 lum += wrap + (wrap - dstw - dstx);
585 cb += dst->linesize[1] - width2 - skip2;
586 cr += dst->linesize[2] - width2 - skip2;
588 /* handle odd height */
589 if (h) {
590 lum += dstx;
591 cb += skip2;
592 cr += skip2;
594 if (dstx & 1) {
595 YUVA_IN(y, u, v, a, p, pal);
596 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599 cb++;
600 cr++;
601 lum++;
602 p += BPP;
604 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605 YUVA_IN(y, u, v, a, p, pal);
606 u1 = u;
607 v1 = v;
608 a1 = a;
609 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611 YUVA_IN(y, u, v, a, p + BPP, pal);
612 u1 += u;
613 v1 += v;
614 a1 += a;
615 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618 cb++;
619 cr++;
620 p += 2 * BPP;
621 lum += 2;
623 if (w) {
624 YUVA_IN(y, u, v, a, p, pal);
625 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632 static void free_subpicture(SubPicture *sp)
634 int i;
636 for (i = 0; i < sp->sub.num_rects; i++)
638 av_free(sp->sub.rects[i].bitmap);
639 av_free(sp->sub.rects[i].rgba_palette);
642 av_free(sp->sub.rects);
644 memset(&sp->sub, 0, sizeof(AVSubtitle));
647 static void video_image_display(VideoState *is)
649 VideoPicture *vp;
650 SubPicture *sp;
651 AVPicture pict;
652 float aspect_ratio;
653 int width, height, x, y;
654 SDL_Rect rect;
655 int i;
657 vp = &is->pictq[is->pictq_rindex];
658 if (vp->bmp) {
659 /* XXX: use variable in the frame */
660 if (is->video_st->codec->sample_aspect_ratio.num == 0)
661 aspect_ratio = 0;
662 else
663 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
664 * is->video_st->codec->width / is->video_st->codec->height;
665 if (aspect_ratio <= 0.0)
666 aspect_ratio = (float)is->video_st->codec->width /
667 (float)is->video_st->codec->height;
668 /* if an active format is indicated, then it overrides the
669 mpeg format */
670 #if 0
671 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
672 is->dtg_active_format = is->video_st->codec->dtg_active_format;
673 printf("dtg_active_format=%d\n", is->dtg_active_format);
675 #endif
676 #if 0
677 switch(is->video_st->codec->dtg_active_format) {
678 case FF_DTG_AFD_SAME:
679 default:
680 /* nothing to do */
681 break;
682 case FF_DTG_AFD_4_3:
683 aspect_ratio = 4.0 / 3.0;
684 break;
685 case FF_DTG_AFD_16_9:
686 aspect_ratio = 16.0 / 9.0;
687 break;
688 case FF_DTG_AFD_14_9:
689 aspect_ratio = 14.0 / 9.0;
690 break;
691 case FF_DTG_AFD_4_3_SP_14_9:
692 aspect_ratio = 14.0 / 9.0;
693 break;
694 case FF_DTG_AFD_16_9_SP_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_SP_4_3:
698 aspect_ratio = 4.0 / 3.0;
699 break;
701 #endif
703 if (is->subtitle_st)
705 if (is->subpq_size > 0)
707 sp = &is->subpq[is->subpq_rindex];
709 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
711 SDL_LockYUVOverlay (vp->bmp);
713 pict.data[0] = vp->bmp->pixels[0];
714 pict.data[1] = vp->bmp->pixels[2];
715 pict.data[2] = vp->bmp->pixels[1];
717 pict.linesize[0] = vp->bmp->pitches[0];
718 pict.linesize[1] = vp->bmp->pitches[2];
719 pict.linesize[2] = vp->bmp->pitches[1];
721 for (i = 0; i < sp->sub.num_rects; i++)
722 blend_subrect(&pict, &sp->sub.rects[i],
723 vp->bmp->w, vp->bmp->h);
725 SDL_UnlockYUVOverlay (vp->bmp);
731 /* XXX: we suppose the screen has a 1.0 pixel ratio */
732 height = is->height;
733 width = ((int)rint(height * aspect_ratio)) & -3;
734 if (width > is->width) {
735 width = is->width;
736 height = ((int)rint(width / aspect_ratio)) & -3;
738 x = (is->width - width) / 2;
739 y = (is->height - height) / 2;
740 if (!is->no_background) {
741 /* fill the background */
742 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
743 } else {
744 is->no_background = 0;
746 rect.x = is->xleft + x;
747 rect.y = is->ytop + y;
748 rect.w = width;
749 rect.h = height;
750 SDL_DisplayYUVOverlay(vp->bmp, &rect);
751 } else {
752 #if 0
753 fill_rectangle(screen,
754 is->xleft, is->ytop, is->width, is->height,
755 QERGB(0x00, 0x00, 0x00));
756 #endif
760 static inline int compute_mod(int a, int b)
762 a = a % b;
763 if (a >= 0)
764 return a;
765 else
766 return a + b;
769 static void video_audio_display(VideoState *s)
771 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
772 int ch, channels, h, h2, bgcolor, fgcolor;
773 int16_t time_diff;
775 /* compute display index : center on currently output samples */
776 channels = s->audio_st->codec->channels;
777 nb_display_channels = channels;
778 if (!s->paused) {
779 n = 2 * channels;
780 delay = audio_write_get_buf_size(s);
781 delay /= n;
783 /* to be more precise, we take into account the time spent since
784 the last buffer computation */
785 if (audio_callback_time) {
786 time_diff = av_gettime() - audio_callback_time;
787 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
790 delay -= s->width / 2;
791 if (delay < s->width)
792 delay = s->width;
794 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
796 h= INT_MIN;
797 for(i=0; i<1000; i+=channels){
798 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
799 int a= s->sample_array[idx];
800 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
801 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
802 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
803 int score= a-d;
804 if(h<score && (b^c)<0){
805 h= score;
806 i_start= idx;
810 s->last_i_start = i_start;
811 } else {
812 i_start = s->last_i_start;
815 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
816 fill_rectangle(screen,
817 s->xleft, s->ytop, s->width, s->height,
818 bgcolor);
820 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
822 /* total height for one channel */
823 h = s->height / nb_display_channels;
824 /* graph height / 2 */
825 h2 = (h * 9) / 20;
826 for(ch = 0;ch < nb_display_channels; ch++) {
827 i = i_start + ch;
828 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
829 for(x = 0; x < s->width; x++) {
830 y = (s->sample_array[i] * h2) >> 15;
831 if (y < 0) {
832 y = -y;
833 ys = y1 - y;
834 } else {
835 ys = y1;
837 fill_rectangle(screen,
838 s->xleft + x, ys, 1, y,
839 fgcolor);
840 i += channels;
841 if (i >= SAMPLE_ARRAY_SIZE)
842 i -= SAMPLE_ARRAY_SIZE;
846 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
848 for(ch = 1;ch < nb_display_channels; ch++) {
849 y = s->ytop + ch * h;
850 fill_rectangle(screen,
851 s->xleft, y, s->width, 1,
852 fgcolor);
854 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
857 static int video_open(VideoState *is){
858 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
859 int w,h;
861 if(is_full_screen) flags |= SDL_FULLSCREEN;
862 else flags |= SDL_RESIZABLE;
864 if (is_full_screen && fs_screen_width) {
865 w = fs_screen_width;
866 h = fs_screen_height;
867 } else if(!is_full_screen && screen_width){
868 w = screen_width;
869 h = screen_height;
870 }else if (is->video_st && is->video_st->codec->width){
871 w = is->video_st->codec->width;
872 h = is->video_st->codec->height;
873 } else {
874 w = 640;
875 h = 480;
877 #ifndef __APPLE__
878 screen = SDL_SetVideoMode(w, h, 0, flags);
879 #else
880 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
881 screen = SDL_SetVideoMode(w, h, 24, flags);
882 #endif
883 if (!screen) {
884 fprintf(stderr, "SDL: could not set video mode - exiting\n");
885 return -1;
887 SDL_WM_SetCaption("FFplay", "FFplay");
889 is->width = screen->w;
890 is->height = screen->h;
892 return 0;
895 /* display the current picture, if any */
896 static void video_display(VideoState *is)
898 if(!screen)
899 video_open(cur_stream);
900 if (is->audio_st && is->show_audio)
901 video_audio_display(is);
902 else if (is->video_st)
903 video_image_display(is);
906 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
908 SDL_Event event;
909 event.type = FF_REFRESH_EVENT;
910 event.user.data1 = opaque;
911 SDL_PushEvent(&event);
912 return 0; /* 0 means stop timer */
915 /* schedule a video refresh in 'delay' ms */
916 static void schedule_refresh(VideoState *is, int delay)
918 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
921 /* get the current audio clock value */
922 static double get_audio_clock(VideoState *is)
924 double pts;
925 int hw_buf_size, bytes_per_sec;
926 pts = is->audio_clock;
927 hw_buf_size = audio_write_get_buf_size(is);
928 bytes_per_sec = 0;
929 if (is->audio_st) {
930 bytes_per_sec = is->audio_st->codec->sample_rate *
931 2 * is->audio_st->codec->channels;
933 if (bytes_per_sec)
934 pts -= (double)hw_buf_size / bytes_per_sec;
935 return pts;
938 /* get the current video clock value */
939 static double get_video_clock(VideoState *is)
941 double delta;
942 if (is->paused) {
943 delta = 0;
944 } else {
945 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
947 return is->video_current_pts + delta;
950 /* get the current external clock value */
951 static double get_external_clock(VideoState *is)
953 int64_t ti;
954 ti = av_gettime();
955 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
958 /* get the current master clock value */
959 static double get_master_clock(VideoState *is)
961 double val;
963 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
964 if (is->video_st)
965 val = get_video_clock(is);
966 else
967 val = get_audio_clock(is);
968 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
969 if (is->audio_st)
970 val = get_audio_clock(is);
971 else
972 val = get_video_clock(is);
973 } else {
974 val = get_external_clock(is);
976 return val;
979 /* seek in the stream */
980 static void stream_seek(VideoState *is, int64_t pos, int rel)
982 if (!is->seek_req) {
983 is->seek_pos = pos;
984 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
985 if (seek_by_bytes)
986 is->seek_flags |= AVSEEK_FLAG_BYTE;
987 is->seek_req = 1;
991 /* pause or resume the video */
992 static void stream_pause(VideoState *is)
994 is->paused = !is->paused;
995 if (!is->paused) {
996 is->video_current_pts = get_video_clock(is);
997 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1001 /* called to display each frame */
1002 static void video_refresh_timer(void *opaque)
1004 VideoState *is = opaque;
1005 VideoPicture *vp;
1006 double actual_delay, delay, sync_threshold, ref_clock, diff;
1008 SubPicture *sp, *sp2;
1010 if (is->video_st) {
1011 if (is->pictq_size == 0) {
1012 /* if no picture, need to wait */
1013 schedule_refresh(is, 1);
1014 } else {
1015 /* dequeue the picture */
1016 vp = &is->pictq[is->pictq_rindex];
1018 /* update current video pts */
1019 is->video_current_pts = vp->pts;
1020 is->video_current_pts_time = av_gettime();
1022 /* compute nominal delay */
1023 delay = vp->pts - is->frame_last_pts;
1024 if (delay <= 0 || delay >= 2.0) {
1025 /* if incorrect delay, use previous one */
1026 delay = is->frame_last_delay;
1028 is->frame_last_delay = delay;
1029 is->frame_last_pts = vp->pts;
1031 /* update delay to follow master synchronisation source */
1032 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1033 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1034 /* if video is slave, we try to correct big delays by
1035 duplicating or deleting a frame */
1036 ref_clock = get_master_clock(is);
1037 diff = vp->pts - ref_clock;
1039 /* skip or repeat frame. We take into account the
1040 delay to compute the threshold. I still don't know
1041 if it is the best guess */
1042 sync_threshold = AV_SYNC_THRESHOLD;
1043 if (delay > sync_threshold)
1044 sync_threshold = delay;
1045 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1046 if (diff <= -sync_threshold)
1047 delay = 0;
1048 else if (diff >= sync_threshold)
1049 delay = 2 * delay;
1053 is->frame_timer += delay;
1054 /* compute the REAL delay (we need to do that to avoid
1055 long term errors */
1056 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1057 if (actual_delay < 0.010) {
1058 /* XXX: should skip picture */
1059 actual_delay = 0.010;
1061 /* launch timer for next picture */
1062 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1064 #if defined(DEBUG_SYNC)
1065 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1066 delay, actual_delay, vp->pts, -diff);
1067 #endif
1069 if(is->subtitle_st) {
1070 if (is->subtitle_stream_changed) {
1071 SDL_LockMutex(is->subpq_mutex);
1073 while (is->subpq_size) {
1074 free_subpicture(&is->subpq[is->subpq_rindex]);
1076 /* update queue size and signal for next picture */
1077 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1078 is->subpq_rindex = 0;
1080 is->subpq_size--;
1082 is->subtitle_stream_changed = 0;
1084 SDL_CondSignal(is->subpq_cond);
1085 SDL_UnlockMutex(is->subpq_mutex);
1086 } else {
1087 if (is->subpq_size > 0) {
1088 sp = &is->subpq[is->subpq_rindex];
1090 if (is->subpq_size > 1)
1091 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1092 else
1093 sp2 = NULL;
1095 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1096 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1098 free_subpicture(sp);
1100 /* update queue size and signal for next picture */
1101 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1102 is->subpq_rindex = 0;
1104 SDL_LockMutex(is->subpq_mutex);
1105 is->subpq_size--;
1106 SDL_CondSignal(is->subpq_cond);
1107 SDL_UnlockMutex(is->subpq_mutex);
1113 /* display picture */
1114 video_display(is);
1116 /* update queue size and signal for next picture */
1117 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1118 is->pictq_rindex = 0;
1120 SDL_LockMutex(is->pictq_mutex);
1121 is->pictq_size--;
1122 SDL_CondSignal(is->pictq_cond);
1123 SDL_UnlockMutex(is->pictq_mutex);
1125 } else if (is->audio_st) {
1126 /* draw the next audio frame */
1128 schedule_refresh(is, 40);
1130 /* if only audio stream, then display the audio bars (better
1131 than nothing, just to test the implementation */
1133 /* display picture */
1134 video_display(is);
1135 } else {
1136 schedule_refresh(is, 100);
1138 if (show_status) {
1139 static int64_t last_time;
1140 int64_t cur_time;
1141 int aqsize, vqsize, sqsize;
1142 double av_diff;
1144 cur_time = av_gettime();
1145 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1146 aqsize = 0;
1147 vqsize = 0;
1148 sqsize = 0;
1149 if (is->audio_st)
1150 aqsize = is->audioq.size;
1151 if (is->video_st)
1152 vqsize = is->videoq.size;
1153 if (is->subtitle_st)
1154 sqsize = is->subtitleq.size;
1155 av_diff = 0;
1156 if (is->audio_st && is->video_st)
1157 av_diff = get_audio_clock(is) - get_video_clock(is);
1158 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1159 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1160 fflush(stdout);
1161 last_time = cur_time;
1166 /* allocate a picture (needs to do that in main thread to avoid
1167 potential locking problems */
1168 static void alloc_picture(void *opaque)
1170 VideoState *is = opaque;
1171 VideoPicture *vp;
1173 vp = &is->pictq[is->pictq_windex];
1175 if (vp->bmp)
1176 SDL_FreeYUVOverlay(vp->bmp);
1178 #if 0
1179 /* XXX: use generic function */
1180 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1181 switch(is->video_st->codec->pix_fmt) {
1182 case PIX_FMT_YUV420P:
1183 case PIX_FMT_YUV422P:
1184 case PIX_FMT_YUV444P:
1185 case PIX_FMT_YUYV422:
1186 case PIX_FMT_YUV410P:
1187 case PIX_FMT_YUV411P:
1188 is_yuv = 1;
1189 break;
1190 default:
1191 is_yuv = 0;
1192 break;
1194 #endif
1195 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1196 is->video_st->codec->height,
1197 SDL_YV12_OVERLAY,
1198 screen);
1199 vp->width = is->video_st->codec->width;
1200 vp->height = is->video_st->codec->height;
1202 SDL_LockMutex(is->pictq_mutex);
1203 vp->allocated = 1;
1204 SDL_CondSignal(is->pictq_cond);
1205 SDL_UnlockMutex(is->pictq_mutex);
1210 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1212 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1214 VideoPicture *vp;
1215 int dst_pix_fmt;
1216 AVPicture pict;
1217 static struct SwsContext *img_convert_ctx;
1219 /* wait until we have space to put a new picture */
1220 SDL_LockMutex(is->pictq_mutex);
1221 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1222 !is->videoq.abort_request) {
1223 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1225 SDL_UnlockMutex(is->pictq_mutex);
1227 if (is->videoq.abort_request)
1228 return -1;
1230 vp = &is->pictq[is->pictq_windex];
1232 /* alloc or resize hardware picture buffer */
1233 if (!vp->bmp ||
1234 vp->width != is->video_st->codec->width ||
1235 vp->height != is->video_st->codec->height) {
1236 SDL_Event event;
1238 vp->allocated = 0;
1240 /* the allocation must be done in the main thread to avoid
1241 locking problems */
1242 event.type = FF_ALLOC_EVENT;
1243 event.user.data1 = is;
1244 SDL_PushEvent(&event);
1246 /* wait until the picture is allocated */
1247 SDL_LockMutex(is->pictq_mutex);
1248 while (!vp->allocated && !is->videoq.abort_request) {
1249 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1251 SDL_UnlockMutex(is->pictq_mutex);
1253 if (is->videoq.abort_request)
1254 return -1;
1257 /* if the frame is not skipped, then display it */
1258 if (vp->bmp) {
1259 /* get a pointer on the bitmap */
1260 SDL_LockYUVOverlay (vp->bmp);
1262 dst_pix_fmt = PIX_FMT_YUV420P;
1263 pict.data[0] = vp->bmp->pixels[0];
1264 pict.data[1] = vp->bmp->pixels[2];
1265 pict.data[2] = vp->bmp->pixels[1];
1267 pict.linesize[0] = vp->bmp->pitches[0];
1268 pict.linesize[1] = vp->bmp->pitches[2];
1269 pict.linesize[2] = vp->bmp->pitches[1];
1270 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1271 is->video_st->codec->width, is->video_st->codec->height,
1272 is->video_st->codec->pix_fmt,
1273 is->video_st->codec->width, is->video_st->codec->height,
1274 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1275 if (img_convert_ctx == NULL) {
1276 fprintf(stderr, "Cannot initialize the conversion context\n");
1277 exit(1);
1279 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1280 0, is->video_st->codec->height, pict.data, pict.linesize);
1281 /* update the bitmap content */
1282 SDL_UnlockYUVOverlay(vp->bmp);
1284 vp->pts = pts;
1286 /* now we can update the picture count */
1287 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1288 is->pictq_windex = 0;
1289 SDL_LockMutex(is->pictq_mutex);
1290 is->pictq_size++;
1291 SDL_UnlockMutex(is->pictq_mutex);
1293 return 0;
1297 * compute the exact PTS for the picture if it is omitted in the stream
1298 * @param pts1 the dts of the pkt / pts of the frame
1300 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1302 double frame_delay, pts;
1304 pts = pts1;
1306 if (pts != 0) {
1307 /* update video clock with pts, if present */
1308 is->video_clock = pts;
1309 } else {
1310 pts = is->video_clock;
1312 /* update video clock for next frame */
1313 frame_delay = av_q2d(is->video_st->codec->time_base);
1314 /* for MPEG2, the frame can be repeated, so we update the
1315 clock accordingly */
1316 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1317 is->video_clock += frame_delay;
1319 #if defined(DEBUG_SYNC) && 0
1321 int ftype;
1322 if (src_frame->pict_type == FF_B_TYPE)
1323 ftype = 'B';
1324 else if (src_frame->pict_type == FF_I_TYPE)
1325 ftype = 'I';
1326 else
1327 ftype = 'P';
1328 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1329 ftype, pts, pts1);
1331 #endif
1332 return queue_picture(is, src_frame, pts);
1335 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1337 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1338 int ret= avcodec_default_get_buffer(c, pic);
1339 uint64_t *pts= av_malloc(sizeof(uint64_t));
1340 *pts= global_video_pkt_pts;
1341 pic->opaque= pts;
1342 return ret;
1345 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1346 if(pic) av_freep(&pic->opaque);
1347 avcodec_default_release_buffer(c, pic);
1350 static int video_thread(void *arg)
1352 VideoState *is = arg;
1353 AVPacket pkt1, *pkt = &pkt1;
1354 int len1, got_picture;
1355 AVFrame *frame= avcodec_alloc_frame();
1356 double pts;
1358 for(;;) {
1359 while (is->paused && !is->videoq.abort_request) {
1360 SDL_Delay(10);
1362 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1363 break;
1365 if(pkt->data == flush_pkt.data){
1366 avcodec_flush_buffers(is->video_st->codec);
1367 continue;
1370 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1371 this packet, if any */
1372 global_video_pkt_pts= pkt->pts;
1373 len1 = avcodec_decode_video(is->video_st->codec,
1374 frame, &got_picture,
1375 pkt->data, pkt->size);
1377 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1378 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1379 pts= *(uint64_t*)frame->opaque;
1380 else if(pkt->dts != AV_NOPTS_VALUE)
1381 pts= pkt->dts;
1382 else
1383 pts= 0;
1384 pts *= av_q2d(is->video_st->time_base);
1386 // if (len1 < 0)
1387 // break;
1388 if (got_picture) {
1389 if (output_picture2(is, frame, pts) < 0)
1390 goto the_end;
1392 av_free_packet(pkt);
1393 if (step)
1394 if (cur_stream)
1395 stream_pause(cur_stream);
1397 the_end:
1398 av_free(frame);
1399 return 0;
1402 static int subtitle_thread(void *arg)
1404 VideoState *is = arg;
1405 SubPicture *sp;
1406 AVPacket pkt1, *pkt = &pkt1;
1407 int len1, got_subtitle;
1408 double pts;
1409 int i, j;
1410 int r, g, b, y, u, v, a;
1412 for(;;) {
1413 while (is->paused && !is->subtitleq.abort_request) {
1414 SDL_Delay(10);
1416 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1417 break;
1419 if(pkt->data == flush_pkt.data){
1420 avcodec_flush_buffers(is->subtitle_st->codec);
1421 continue;
1423 SDL_LockMutex(is->subpq_mutex);
1424 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1425 !is->subtitleq.abort_request) {
1426 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1428 SDL_UnlockMutex(is->subpq_mutex);
1430 if (is->subtitleq.abort_request)
1431 goto the_end;
1433 sp = &is->subpq[is->subpq_windex];
1435 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1436 this packet, if any */
1437 pts = 0;
1438 if (pkt->pts != AV_NOPTS_VALUE)
1439 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1441 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1442 &sp->sub, &got_subtitle,
1443 pkt->data, pkt->size);
1444 // if (len1 < 0)
1445 // break;
1446 if (got_subtitle && sp->sub.format == 0) {
1447 sp->pts = pts;
1449 for (i = 0; i < sp->sub.num_rects; i++)
1451 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1453 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1454 y = RGB_TO_Y_CCIR(r, g, b);
1455 u = RGB_TO_U_CCIR(r, g, b, 0);
1456 v = RGB_TO_V_CCIR(r, g, b, 0);
1457 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1461 /* now we can update the picture count */
1462 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1463 is->subpq_windex = 0;
1464 SDL_LockMutex(is->subpq_mutex);
1465 is->subpq_size++;
1466 SDL_UnlockMutex(is->subpq_mutex);
1468 av_free_packet(pkt);
1469 // if (step)
1470 // if (cur_stream)
1471 // stream_pause(cur_stream);
1473 the_end:
1474 return 0;
1477 /* copy samples for viewing in editor window */
1478 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1480 int size, len, channels;
1482 channels = is->audio_st->codec->channels;
1484 size = samples_size / sizeof(short);
1485 while (size > 0) {
1486 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1487 if (len > size)
1488 len = size;
1489 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1490 samples += len;
1491 is->sample_array_index += len;
1492 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1493 is->sample_array_index = 0;
1494 size -= len;
1498 /* return the new audio buffer size (samples can be added or deleted
1499 to get better sync if video or external master clock) */
1500 static int synchronize_audio(VideoState *is, short *samples,
1501 int samples_size1, double pts)
1503 int n, samples_size;
1504 double ref_clock;
1506 n = 2 * is->audio_st->codec->channels;
1507 samples_size = samples_size1;
1509 /* if not master, then we try to remove or add samples to correct the clock */
1510 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1511 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1512 double diff, avg_diff;
1513 int wanted_size, min_size, max_size, nb_samples;
1515 ref_clock = get_master_clock(is);
1516 diff = get_audio_clock(is) - ref_clock;
1518 if (diff < AV_NOSYNC_THRESHOLD) {
1519 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1520 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1521 /* not enough measures to have a correct estimate */
1522 is->audio_diff_avg_count++;
1523 } else {
1524 /* estimate the A-V difference */
1525 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1527 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1528 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1529 nb_samples = samples_size / n;
1531 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1532 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1533 if (wanted_size < min_size)
1534 wanted_size = min_size;
1535 else if (wanted_size > max_size)
1536 wanted_size = max_size;
1538 /* add or remove samples to correction the synchro */
1539 if (wanted_size < samples_size) {
1540 /* remove samples */
1541 samples_size = wanted_size;
1542 } else if (wanted_size > samples_size) {
1543 uint8_t *samples_end, *q;
1544 int nb;
1546 /* add samples */
1547 nb = (samples_size - wanted_size);
1548 samples_end = (uint8_t *)samples + samples_size - n;
1549 q = samples_end + n;
1550 while (nb > 0) {
1551 memcpy(q, samples_end, n);
1552 q += n;
1553 nb -= n;
1555 samples_size = wanted_size;
1558 #if 0
1559 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1560 diff, avg_diff, samples_size - samples_size1,
1561 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1562 #endif
1564 } else {
1565 /* too big difference : may be initial PTS errors, so
1566 reset A-V filter */
1567 is->audio_diff_avg_count = 0;
1568 is->audio_diff_cum = 0;
1572 return samples_size;
1575 /* decode one audio frame and returns its uncompressed size */
1576 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1578 AVPacket *pkt = &is->audio_pkt;
1579 AVCodecContext *dec= is->audio_st->codec;
1580 int n, len1, data_size;
1581 double pts;
1583 for(;;) {
1584 /* NOTE: the audio packet can contain several frames */
1585 while (is->audio_pkt_size > 0) {
1586 data_size = sizeof(is->audio_buf1);
1587 len1 = avcodec_decode_audio2(dec,
1588 (int16_t *)is->audio_buf1, &data_size,
1589 is->audio_pkt_data, is->audio_pkt_size);
1590 if (len1 < 0) {
1591 /* if error, we skip the frame */
1592 is->audio_pkt_size = 0;
1593 break;
1596 is->audio_pkt_data += len1;
1597 is->audio_pkt_size -= len1;
1598 if (data_size <= 0)
1599 continue;
1601 if (dec->sample_fmt != is->audio_src_fmt) {
1602 if (is->reformat_ctx)
1603 av_audio_convert_free(is->reformat_ctx);
1604 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1605 dec->sample_fmt, 1, NULL, 0);
1606 if (!is->reformat_ctx) {
1607 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1608 avcodec_get_sample_fmt_name(dec->sample_fmt),
1609 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1610 break;
1612 is->audio_src_fmt= dec->sample_fmt;
1615 if (is->reformat_ctx) {
1616 const void *ibuf[6]= {is->audio_buf1};
1617 void *obuf[6]= {is->audio_buf2};
1618 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1619 int ostride[6]= {2};
1620 int len= data_size/istride[0];
1621 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1622 printf("av_audio_convert() failed\n");
1623 break;
1625 is->audio_buf= is->audio_buf2;
1626 /* FIXME: existing code assume that data_size equals framesize*channels*2
1627 remove this legacy cruft */
1628 data_size= len*2;
1629 }else{
1630 is->audio_buf= is->audio_buf1;
1633 /* if no pts, then compute it */
1634 pts = is->audio_clock;
1635 *pts_ptr = pts;
1636 n = 2 * dec->channels;
1637 is->audio_clock += (double)data_size /
1638 (double)(n * dec->sample_rate);
1639 #if defined(DEBUG_SYNC)
1641 static double last_clock;
1642 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1643 is->audio_clock - last_clock,
1644 is->audio_clock, pts);
1645 last_clock = is->audio_clock;
1647 #endif
1648 return data_size;
1651 /* free the current packet */
1652 if (pkt->data)
1653 av_free_packet(pkt);
1655 if (is->paused || is->audioq.abort_request) {
1656 return -1;
1659 /* read next packet */
1660 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1661 return -1;
1662 if(pkt->data == flush_pkt.data){
1663 avcodec_flush_buffers(dec);
1664 continue;
1667 is->audio_pkt_data = pkt->data;
1668 is->audio_pkt_size = pkt->size;
1670 /* if update the audio clock with the pts */
1671 if (pkt->pts != AV_NOPTS_VALUE) {
1672 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1677 /* get the current audio output buffer size, in samples. With SDL, we
1678 cannot have a precise information */
1679 static int audio_write_get_buf_size(VideoState *is)
1681 return is->audio_buf_size - is->audio_buf_index;
1685 /* prepare a new audio buffer */
1686 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1688 VideoState *is = opaque;
1689 int audio_size, len1;
1690 double pts;
1692 audio_callback_time = av_gettime();
1694 while (len > 0) {
1695 if (is->audio_buf_index >= is->audio_buf_size) {
1696 audio_size = audio_decode_frame(is, &pts);
1697 if (audio_size < 0) {
1698 /* if error, just output silence */
1699 is->audio_buf_size = 1024;
1700 memset(is->audio_buf, 0, is->audio_buf_size);
1701 } else {
1702 if (is->show_audio)
1703 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1704 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1705 pts);
1706 is->audio_buf_size = audio_size;
1708 is->audio_buf_index = 0;
1710 len1 = is->audio_buf_size - is->audio_buf_index;
1711 if (len1 > len)
1712 len1 = len;
1713 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1714 len -= len1;
1715 stream += len1;
1716 is->audio_buf_index += len1;
1720 /* open a given stream. Return 0 if OK */
1721 static int stream_component_open(VideoState *is, int stream_index)
1723 AVFormatContext *ic = is->ic;
1724 AVCodecContext *enc;
1725 AVCodec *codec;
1726 SDL_AudioSpec wanted_spec, spec;
1728 if (stream_index < 0 || stream_index >= ic->nb_streams)
1729 return -1;
1730 enc = ic->streams[stream_index]->codec;
1732 /* prepare audio output */
1733 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1734 if (enc->channels > 0) {
1735 enc->request_channels = FFMIN(2, enc->channels);
1736 } else {
1737 enc->request_channels = 2;
1741 codec = avcodec_find_decoder(enc->codec_id);
1742 enc->debug_mv = debug_mv;
1743 enc->debug = debug;
1744 enc->workaround_bugs = workaround_bugs;
1745 enc->lowres = lowres;
1746 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1747 enc->idct_algo= idct;
1748 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1749 enc->skip_frame= skip_frame;
1750 enc->skip_idct= skip_idct;
1751 enc->skip_loop_filter= skip_loop_filter;
1752 enc->error_resilience= error_resilience;
1753 enc->error_concealment= error_concealment;
1754 if (!codec ||
1755 avcodec_open(enc, codec) < 0)
1756 return -1;
1758 /* prepare audio output */
1759 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1760 wanted_spec.freq = enc->sample_rate;
1761 wanted_spec.format = AUDIO_S16SYS;
1762 wanted_spec.channels = enc->channels;
1763 wanted_spec.silence = 0;
1764 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1765 wanted_spec.callback = sdl_audio_callback;
1766 wanted_spec.userdata = is;
1767 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1768 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1769 return -1;
1771 is->audio_hw_buf_size = spec.size;
1772 is->audio_src_fmt= SAMPLE_FMT_S16;
1775 if(thread_count>1)
1776 avcodec_thread_init(enc, thread_count);
1777 enc->thread_count= thread_count;
1778 switch(enc->codec_type) {
1779 case CODEC_TYPE_AUDIO:
1780 is->audio_stream = stream_index;
1781 is->audio_st = ic->streams[stream_index];
1782 is->audio_buf_size = 0;
1783 is->audio_buf_index = 0;
1785 /* init averaging filter */
1786 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1787 is->audio_diff_avg_count = 0;
1788 /* since we do not have a precise anough audio fifo fullness,
1789 we correct audio sync only if larger than this threshold */
1790 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1792 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1793 packet_queue_init(&is->audioq);
1794 SDL_PauseAudio(0);
1795 break;
1796 case CODEC_TYPE_VIDEO:
1797 is->video_stream = stream_index;
1798 is->video_st = ic->streams[stream_index];
1800 is->frame_last_delay = 40e-3;
1801 is->frame_timer = (double)av_gettime() / 1000000.0;
1802 is->video_current_pts_time = av_gettime();
1804 packet_queue_init(&is->videoq);
1805 is->video_tid = SDL_CreateThread(video_thread, is);
1807 enc-> get_buffer= my_get_buffer;
1808 enc->release_buffer= my_release_buffer;
1809 break;
1810 case CODEC_TYPE_SUBTITLE:
1811 is->subtitle_stream = stream_index;
1812 is->subtitle_st = ic->streams[stream_index];
1813 packet_queue_init(&is->subtitleq);
1815 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1816 break;
1817 default:
1818 break;
1820 return 0;
1823 static void stream_component_close(VideoState *is, int stream_index)
1825 AVFormatContext *ic = is->ic;
1826 AVCodecContext *enc;
1828 if (stream_index < 0 || stream_index >= ic->nb_streams)
1829 return;
1830 enc = ic->streams[stream_index]->codec;
1832 switch(enc->codec_type) {
1833 case CODEC_TYPE_AUDIO:
1834 packet_queue_abort(&is->audioq);
1836 SDL_CloseAudio();
1838 packet_queue_end(&is->audioq);
1839 if (is->reformat_ctx)
1840 av_audio_convert_free(is->reformat_ctx);
1841 break;
1842 case CODEC_TYPE_VIDEO:
1843 packet_queue_abort(&is->videoq);
1845 /* note: we also signal this mutex to make sure we deblock the
1846 video thread in all cases */
1847 SDL_LockMutex(is->pictq_mutex);
1848 SDL_CondSignal(is->pictq_cond);
1849 SDL_UnlockMutex(is->pictq_mutex);
1851 SDL_WaitThread(is->video_tid, NULL);
1853 packet_queue_end(&is->videoq);
1854 break;
1855 case CODEC_TYPE_SUBTITLE:
1856 packet_queue_abort(&is->subtitleq);
1858 /* note: we also signal this mutex to make sure we deblock the
1859 video thread in all cases */
1860 SDL_LockMutex(is->subpq_mutex);
1861 is->subtitle_stream_changed = 1;
1863 SDL_CondSignal(is->subpq_cond);
1864 SDL_UnlockMutex(is->subpq_mutex);
1866 SDL_WaitThread(is->subtitle_tid, NULL);
1868 packet_queue_end(&is->subtitleq);
1869 break;
1870 default:
1871 break;
1874 avcodec_close(enc);
1875 switch(enc->codec_type) {
1876 case CODEC_TYPE_AUDIO:
1877 is->audio_st = NULL;
1878 is->audio_stream = -1;
1879 break;
1880 case CODEC_TYPE_VIDEO:
1881 is->video_st = NULL;
1882 is->video_stream = -1;
1883 break;
1884 case CODEC_TYPE_SUBTITLE:
1885 is->subtitle_st = NULL;
1886 is->subtitle_stream = -1;
1887 break;
1888 default:
1889 break;
1893 static void dump_stream_info(const AVFormatContext *s)
1895 if (s->track != 0)
1896 fprintf(stderr, "Track: %d\n", s->track);
1897 if (s->title[0] != '\0')
1898 fprintf(stderr, "Title: %s\n", s->title);
1899 if (s->author[0] != '\0')
1900 fprintf(stderr, "Author: %s\n", s->author);
1901 if (s->copyright[0] != '\0')
1902 fprintf(stderr, "Copyright: %s\n", s->copyright);
1903 if (s->comment[0] != '\0')
1904 fprintf(stderr, "Comment: %s\n", s->comment);
1905 if (s->album[0] != '\0')
1906 fprintf(stderr, "Album: %s\n", s->album);
1907 if (s->year != 0)
1908 fprintf(stderr, "Year: %d\n", s->year);
1909 if (s->genre[0] != '\0')
1910 fprintf(stderr, "Genre: %s\n", s->genre);
1913 /* since we have only one decoding thread, we can use a global
1914 variable instead of a thread local variable */
1915 static VideoState *global_video_state;
1917 static int decode_interrupt_cb(void)
1919 return (global_video_state && global_video_state->abort_request);
1922 /* this thread gets the stream from the disk or the network */
1923 static int decode_thread(void *arg)
1925 VideoState *is = arg;
1926 AVFormatContext *ic;
1927 int err, i, ret, video_index, audio_index;
1928 AVPacket pkt1, *pkt = &pkt1;
1929 AVFormatParameters params, *ap = &params;
1931 video_index = -1;
1932 audio_index = -1;
1933 is->video_stream = -1;
1934 is->audio_stream = -1;
1935 is->subtitle_stream = -1;
1937 global_video_state = is;
1938 url_set_interrupt_cb(decode_interrupt_cb);
1940 memset(ap, 0, sizeof(*ap));
1942 ap->width = frame_width;
1943 ap->height= frame_height;
1944 ap->time_base= (AVRational){1, 25};
1945 ap->pix_fmt = frame_pix_fmt;
1947 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1948 if (err < 0) {
1949 print_error(is->filename, err);
1950 ret = -1;
1951 goto fail;
1953 is->ic = ic;
1955 if(genpts)
1956 ic->flags |= AVFMT_FLAG_GENPTS;
1958 err = av_find_stream_info(ic);
1959 if (err < 0) {
1960 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1961 ret = -1;
1962 goto fail;
1964 if(ic->pb)
1965 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1967 /* if seeking requested, we execute it */
1968 if (start_time != AV_NOPTS_VALUE) {
1969 int64_t timestamp;
1971 timestamp = start_time;
1972 /* add the stream start time */
1973 if (ic->start_time != AV_NOPTS_VALUE)
1974 timestamp += ic->start_time;
1975 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1976 if (ret < 0) {
1977 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1978 is->filename, (double)timestamp / AV_TIME_BASE);
1982 for(i = 0; i < ic->nb_streams; i++) {
1983 AVCodecContext *enc = ic->streams[i]->codec;
1984 switch(enc->codec_type) {
1985 case CODEC_TYPE_AUDIO:
1986 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1987 audio_index = i;
1988 break;
1989 case CODEC_TYPE_VIDEO:
1990 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1991 video_index = i;
1992 break;
1993 default:
1994 break;
1997 if (show_status) {
1998 dump_format(ic, 0, is->filename, 0);
1999 dump_stream_info(ic);
2002 /* open the streams */
2003 if (audio_index >= 0) {
2004 stream_component_open(is, audio_index);
2007 if (video_index >= 0) {
2008 stream_component_open(is, video_index);
2009 } else {
2010 if (!display_disable)
2011 is->show_audio = 1;
2014 if (is->video_stream < 0 && is->audio_stream < 0) {
2015 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2016 ret = -1;
2017 goto fail;
2020 for(;;) {
2021 if (is->abort_request)
2022 break;
2023 if (is->paused != is->last_paused) {
2024 is->last_paused = is->paused;
2025 if (is->paused)
2026 av_read_pause(ic);
2027 else
2028 av_read_play(ic);
2030 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2031 if (is->paused &&
2032 (!strcmp(ic->iformat->name, "rtsp") ||
2033 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2034 /* wait 10 ms to avoid trying to get another packet */
2035 /* XXX: horrible */
2036 SDL_Delay(10);
2037 continue;
2039 #endif
2040 if (is->seek_req) {
2041 int stream_index= -1;
2042 int64_t seek_target= is->seek_pos;
2044 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2045 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2046 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2048 if(stream_index>=0){
2049 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2052 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2053 if (ret < 0) {
2054 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2055 }else{
2056 if (is->audio_stream >= 0) {
2057 packet_queue_flush(&is->audioq);
2058 packet_queue_put(&is->audioq, &flush_pkt);
2060 if (is->subtitle_stream >= 0) {
2061 packet_queue_flush(&is->subtitleq);
2062 packet_queue_put(&is->subtitleq, &flush_pkt);
2064 if (is->video_stream >= 0) {
2065 packet_queue_flush(&is->videoq);
2066 packet_queue_put(&is->videoq, &flush_pkt);
2069 is->seek_req = 0;
2072 /* if the queue are full, no need to read more */
2073 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2074 is->videoq.size > MAX_VIDEOQ_SIZE ||
2075 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2076 url_feof(ic->pb)) {
2077 /* wait 10 ms */
2078 SDL_Delay(10);
2079 continue;
2081 ret = av_read_frame(ic, pkt);
2082 if (ret < 0) {
2083 if (url_ferror(ic->pb) == 0) {
2084 SDL_Delay(100); /* wait for user event */
2085 continue;
2086 } else
2087 break;
2089 if (pkt->stream_index == is->audio_stream) {
2090 packet_queue_put(&is->audioq, pkt);
2091 } else if (pkt->stream_index == is->video_stream) {
2092 packet_queue_put(&is->videoq, pkt);
2093 } else if (pkt->stream_index == is->subtitle_stream) {
2094 packet_queue_put(&is->subtitleq, pkt);
2095 } else {
2096 av_free_packet(pkt);
2099 /* wait until the end */
2100 while (!is->abort_request) {
2101 SDL_Delay(100);
2104 ret = 0;
2105 fail:
2106 /* disable interrupting */
2107 global_video_state = NULL;
2109 /* close each stream */
2110 if (is->audio_stream >= 0)
2111 stream_component_close(is, is->audio_stream);
2112 if (is->video_stream >= 0)
2113 stream_component_close(is, is->video_stream);
2114 if (is->subtitle_stream >= 0)
2115 stream_component_close(is, is->subtitle_stream);
2116 if (is->ic) {
2117 av_close_input_file(is->ic);
2118 is->ic = NULL; /* safety */
2120 url_set_interrupt_cb(NULL);
2122 if (ret != 0) {
2123 SDL_Event event;
2125 event.type = FF_QUIT_EVENT;
2126 event.user.data1 = is;
2127 SDL_PushEvent(&event);
2129 return 0;
2132 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2134 VideoState *is;
2136 is = av_mallocz(sizeof(VideoState));
2137 if (!is)
2138 return NULL;
2139 av_strlcpy(is->filename, filename, sizeof(is->filename));
2140 is->iformat = iformat;
2141 is->ytop = 0;
2142 is->xleft = 0;
2144 /* start video display */
2145 is->pictq_mutex = SDL_CreateMutex();
2146 is->pictq_cond = SDL_CreateCond();
2148 is->subpq_mutex = SDL_CreateMutex();
2149 is->subpq_cond = SDL_CreateCond();
2151 /* add the refresh timer to draw the picture */
2152 schedule_refresh(is, 40);
2154 is->av_sync_type = av_sync_type;
2155 is->parse_tid = SDL_CreateThread(decode_thread, is);
2156 if (!is->parse_tid) {
2157 av_free(is);
2158 return NULL;
2160 return is;
2163 static void stream_close(VideoState *is)
2165 VideoPicture *vp;
2166 int i;
2167 /* XXX: use a special url_shutdown call to abort parse cleanly */
2168 is->abort_request = 1;
2169 SDL_WaitThread(is->parse_tid, NULL);
2171 /* free all pictures */
2172 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2173 vp = &is->pictq[i];
2174 if (vp->bmp) {
2175 SDL_FreeYUVOverlay(vp->bmp);
2176 vp->bmp = NULL;
2179 SDL_DestroyMutex(is->pictq_mutex);
2180 SDL_DestroyCond(is->pictq_cond);
2181 SDL_DestroyMutex(is->subpq_mutex);
2182 SDL_DestroyCond(is->subpq_cond);
2185 static void stream_cycle_channel(VideoState *is, int codec_type)
2187 AVFormatContext *ic = is->ic;
2188 int start_index, stream_index;
2189 AVStream *st;
2191 if (codec_type == CODEC_TYPE_VIDEO)
2192 start_index = is->video_stream;
2193 else if (codec_type == CODEC_TYPE_AUDIO)
2194 start_index = is->audio_stream;
2195 else
2196 start_index = is->subtitle_stream;
2197 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2198 return;
2199 stream_index = start_index;
2200 for(;;) {
2201 if (++stream_index >= is->ic->nb_streams)
2203 if (codec_type == CODEC_TYPE_SUBTITLE)
2205 stream_index = -1;
2206 goto the_end;
2207 } else
2208 stream_index = 0;
2210 if (stream_index == start_index)
2211 return;
2212 st = ic->streams[stream_index];
2213 if (st->codec->codec_type == codec_type) {
2214 /* check that parameters are OK */
2215 switch(codec_type) {
2216 case CODEC_TYPE_AUDIO:
2217 if (st->codec->sample_rate != 0 &&
2218 st->codec->channels != 0)
2219 goto the_end;
2220 break;
2221 case CODEC_TYPE_VIDEO:
2222 case CODEC_TYPE_SUBTITLE:
2223 goto the_end;
2224 default:
2225 break;
2229 the_end:
2230 stream_component_close(is, start_index);
2231 stream_component_open(is, stream_index);
2235 static void toggle_full_screen(void)
2237 is_full_screen = !is_full_screen;
2238 if (!fs_screen_width) {
2239 /* use default SDL method */
2240 // SDL_WM_ToggleFullScreen(screen);
2242 video_open(cur_stream);
2245 static void toggle_pause(void)
2247 if (cur_stream)
2248 stream_pause(cur_stream);
2249 step = 0;
2252 static void step_to_next_frame(void)
2254 if (cur_stream) {
2255 /* if the stream is paused unpause it, then step */
2256 if (cur_stream->paused)
2257 stream_pause(cur_stream);
2259 step = 1;
2262 static void do_exit(void)
2264 if (cur_stream) {
2265 stream_close(cur_stream);
2266 cur_stream = NULL;
2268 if (show_status)
2269 printf("\n");
2270 SDL_Quit();
2271 exit(0);
2274 static void toggle_audio_display(void)
2276 if (cur_stream) {
2277 cur_stream->show_audio = !cur_stream->show_audio;
2281 /* handle an event sent by the GUI */
2282 static void event_loop(void)
2284 SDL_Event event;
2285 double incr, pos, frac;
2287 for(;;) {
2288 SDL_WaitEvent(&event);
2289 switch(event.type) {
2290 case SDL_KEYDOWN:
2291 switch(event.key.keysym.sym) {
2292 case SDLK_ESCAPE:
2293 case SDLK_q:
2294 do_exit();
2295 break;
2296 case SDLK_f:
2297 toggle_full_screen();
2298 break;
2299 case SDLK_p:
2300 case SDLK_SPACE:
2301 toggle_pause();
2302 break;
2303 case SDLK_s: //S: Step to next frame
2304 step_to_next_frame();
2305 break;
2306 case SDLK_a:
2307 if (cur_stream)
2308 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2309 break;
2310 case SDLK_v:
2311 if (cur_stream)
2312 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2313 break;
2314 case SDLK_t:
2315 if (cur_stream)
2316 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2317 break;
2318 case SDLK_w:
2319 toggle_audio_display();
2320 break;
2321 case SDLK_LEFT:
2322 incr = -10.0;
2323 goto do_seek;
2324 case SDLK_RIGHT:
2325 incr = 10.0;
2326 goto do_seek;
2327 case SDLK_UP:
2328 incr = 60.0;
2329 goto do_seek;
2330 case SDLK_DOWN:
2331 incr = -60.0;
2332 do_seek:
2333 if (cur_stream) {
2334 if (seek_by_bytes) {
2335 pos = url_ftell(cur_stream->ic->pb);
2336 if (cur_stream->ic->bit_rate)
2337 incr *= cur_stream->ic->bit_rate / 60.0;
2338 else
2339 incr *= 180000.0;
2340 pos += incr;
2341 stream_seek(cur_stream, pos, incr);
2342 } else {
2343 pos = get_master_clock(cur_stream);
2344 pos += incr;
2345 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2348 break;
2349 default:
2350 break;
2352 break;
2353 case SDL_MOUSEBUTTONDOWN:
2354 if (cur_stream) {
2355 int ns, hh, mm, ss;
2356 int tns, thh, tmm, tss;
2357 tns = cur_stream->ic->duration/1000000LL;
2358 thh = tns/3600;
2359 tmm = (tns%3600)/60;
2360 tss = (tns%60);
2361 frac = (double)event.button.x/(double)cur_stream->width;
2362 ns = frac*tns;
2363 hh = ns/3600;
2364 mm = (ns%3600)/60;
2365 ss = (ns%60);
2366 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2367 hh, mm, ss, thh, tmm, tss);
2368 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2370 break;
2371 case SDL_VIDEORESIZE:
2372 if (cur_stream) {
2373 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2374 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2375 screen_width = cur_stream->width = event.resize.w;
2376 screen_height= cur_stream->height= event.resize.h;
2378 break;
2379 case SDL_QUIT:
2380 case FF_QUIT_EVENT:
2381 do_exit();
2382 break;
2383 case FF_ALLOC_EVENT:
2384 video_open(event.user.data1);
2385 alloc_picture(event.user.data1);
2386 break;
2387 case FF_REFRESH_EVENT:
2388 video_refresh_timer(event.user.data1);
2389 break;
2390 default:
2391 break;
2396 static void opt_frame_size(const char *arg)
2398 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2399 fprintf(stderr, "Incorrect frame size\n");
2400 exit(1);
2402 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2403 fprintf(stderr, "Frame size must be a multiple of 2\n");
2404 exit(1);
2408 static int opt_width(const char *opt, const char *arg)
2410 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2411 return 0;
2414 static int opt_height(const char *opt, const char *arg)
2416 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2417 return 0;
2420 static void opt_format(const char *arg)
2422 file_iformat = av_find_input_format(arg);
2423 if (!file_iformat) {
2424 fprintf(stderr, "Unknown input format: %s\n", arg);
2425 exit(1);
2429 static void opt_frame_pix_fmt(const char *arg)
2431 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2434 static int opt_sync(const char *opt, const char *arg)
2436 if (!strcmp(arg, "audio"))
2437 av_sync_type = AV_SYNC_AUDIO_MASTER;
2438 else if (!strcmp(arg, "video"))
2439 av_sync_type = AV_SYNC_VIDEO_MASTER;
2440 else if (!strcmp(arg, "ext"))
2441 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2442 else {
2443 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2444 exit(1);
2446 return 0;
2449 static int opt_seek(const char *opt, const char *arg)
2451 start_time = parse_time_or_die(opt, arg, 1);
2452 return 0;
2455 static int opt_debug(const char *opt, const char *arg)
2457 av_log_set_level(99);
2458 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2459 return 0;
2462 static int opt_vismv(const char *opt, const char *arg)
2464 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2465 return 0;
2468 static int opt_thread_count(const char *opt, const char *arg)
2470 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2471 #if !defined(HAVE_THREADS)
2472 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2473 #endif
2474 return 0;
2477 static const OptionDef options[] = {
2478 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2479 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2480 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2481 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2482 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2483 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2484 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2485 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2486 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2487 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2488 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2489 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2490 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2491 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2492 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2493 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2494 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2495 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2496 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2497 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2498 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2499 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2500 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2501 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2502 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2503 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2504 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2505 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2506 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2507 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2508 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2509 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2510 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2511 { NULL, },
2514 static void show_help(void)
2516 printf("usage: ffplay [options] input_file\n"
2517 "Simple media player\n");
2518 printf("\n");
2519 show_help_options(options, "Main options:\n",
2520 OPT_EXPERT, 0);
2521 show_help_options(options, "\nAdvanced options:\n",
2522 OPT_EXPERT, OPT_EXPERT);
2523 printf("\nWhile playing:\n"
2524 "q, ESC quit\n"
2525 "f toggle full screen\n"
2526 "p, SPC pause\n"
2527 "a cycle audio channel\n"
2528 "v cycle video channel\n"
2529 "t cycle subtitle channel\n"
2530 "w show audio waves\n"
2531 "left/right seek backward/forward 10 seconds\n"
2532 "down/up seek backward/forward 1 minute\n"
2533 "mouse click seek to percentage in file corresponding to fraction of width\n"
2537 static void opt_input_file(const char *filename)
2539 if (!strcmp(filename, "-"))
2540 filename = "pipe:";
2541 input_filename = filename;
2544 /* Called from the main */
2545 int main(int argc, char **argv)
2547 int flags;
2549 /* register all codecs, demux and protocols */
2550 avcodec_register_all();
2551 avdevice_register_all();
2552 av_register_all();
2554 show_banner();
2556 parse_options(argc, argv, options, opt_input_file);
2558 if (!input_filename) {
2559 show_help();
2560 exit(1);
2563 if (display_disable) {
2564 video_disable = 1;
2566 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2567 #if !defined(__MINGW32__) && !defined(__APPLE__)
2568 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2569 #endif
2570 if (SDL_Init (flags)) {
2571 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2572 exit(1);
2575 if (!display_disable) {
2576 #ifdef HAVE_SDL_VIDEO_SIZE
2577 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2578 fs_screen_width = vi->current_w;
2579 fs_screen_height = vi->current_h;
2580 #endif
2583 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2584 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2585 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2586 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2588 av_init_packet(&flush_pkt);
2589 flush_pkt.data= "FLUSH";
2591 cur_stream = stream_open(input_filename, file_iformat);
2593 event_loop();
2595 /* never returns */
2597 return 0;