Don't let finalize_packet() touch pkt->stream_index. Instead, let individual
[ffmpeg-lucabe.git] / ffplay.c
blob9551561d649be392af13321aa605418a3e959485
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
116 int audio_stream;
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int wanted_subtitle_stream= -1;
195 static int seek_by_bytes;
196 static int display_disable;
197 static int show_status;
198 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
199 static int64_t start_time = AV_NOPTS_VALUE;
200 static int debug = 0;
201 static int debug_mv = 0;
202 static int step = 0;
203 static int thread_count = 1;
204 static int workaround_bugs = 1;
205 static int fast = 0;
206 static int genpts = 0;
207 static int lowres = 0;
208 static int idct = FF_IDCT_AUTO;
209 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
212 static int error_recognition = FF_ER_CAREFUL;
213 static int error_concealment = 3;
214 static int decoder_reorder_pts= 0;
216 /* current context */
217 static int is_full_screen;
218 static VideoState *cur_stream;
219 static int64_t audio_callback_time;
221 static AVPacket flush_pkt;
223 #define FF_ALLOC_EVENT (SDL_USEREVENT)
224 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
225 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
227 static SDL_Surface *screen;
229 /* packet queue handling */
230 static void packet_queue_init(PacketQueue *q)
232 memset(q, 0, sizeof(PacketQueue));
233 q->mutex = SDL_CreateMutex();
234 q->cond = SDL_CreateCond();
237 static void packet_queue_flush(PacketQueue *q)
239 AVPacketList *pkt, *pkt1;
241 SDL_LockMutex(q->mutex);
242 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
243 pkt1 = pkt->next;
244 av_free_packet(&pkt->pkt);
245 av_freep(&pkt);
247 q->last_pkt = NULL;
248 q->first_pkt = NULL;
249 q->nb_packets = 0;
250 q->size = 0;
251 SDL_UnlockMutex(q->mutex);
254 static void packet_queue_end(PacketQueue *q)
256 packet_queue_flush(q);
257 SDL_DestroyMutex(q->mutex);
258 SDL_DestroyCond(q->cond);
261 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
263 AVPacketList *pkt1;
265 /* duplicate the packet */
266 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
267 return -1;
269 pkt1 = av_malloc(sizeof(AVPacketList));
270 if (!pkt1)
271 return -1;
272 pkt1->pkt = *pkt;
273 pkt1->next = NULL;
276 SDL_LockMutex(q->mutex);
278 if (!q->last_pkt)
280 q->first_pkt = pkt1;
281 else
282 q->last_pkt->next = pkt1;
283 q->last_pkt = pkt1;
284 q->nb_packets++;
285 q->size += pkt1->pkt.size + sizeof(*pkt1);
286 /* XXX: should duplicate packet data in DV case */
287 SDL_CondSignal(q->cond);
289 SDL_UnlockMutex(q->mutex);
290 return 0;
293 static void packet_queue_abort(PacketQueue *q)
295 SDL_LockMutex(q->mutex);
297 q->abort_request = 1;
299 SDL_CondSignal(q->cond);
301 SDL_UnlockMutex(q->mutex);
304 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
305 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
307 AVPacketList *pkt1;
308 int ret;
310 SDL_LockMutex(q->mutex);
312 for(;;) {
313 if (q->abort_request) {
314 ret = -1;
315 break;
318 pkt1 = q->first_pkt;
319 if (pkt1) {
320 q->first_pkt = pkt1->next;
321 if (!q->first_pkt)
322 q->last_pkt = NULL;
323 q->nb_packets--;
324 q->size -= pkt1->pkt.size + sizeof(*pkt1);
325 *pkt = pkt1->pkt;
326 av_free(pkt1);
327 ret = 1;
328 break;
329 } else if (!block) {
330 ret = 0;
331 break;
332 } else {
333 SDL_CondWait(q->cond, q->mutex);
336 SDL_UnlockMutex(q->mutex);
337 return ret;
340 static inline void fill_rectangle(SDL_Surface *screen,
341 int x, int y, int w, int h, int color)
343 SDL_Rect rect;
344 rect.x = x;
345 rect.y = y;
346 rect.w = w;
347 rect.h = h;
348 SDL_FillRect(screen, &rect, color);
351 #if 0
352 /* draw only the border of a rectangle */
353 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
355 int w1, w2, h1, h2;
357 /* fill the background */
358 w1 = x;
359 if (w1 < 0)
360 w1 = 0;
361 w2 = s->width - (x + w);
362 if (w2 < 0)
363 w2 = 0;
364 h1 = y;
365 if (h1 < 0)
366 h1 = 0;
367 h2 = s->height - (y + h);
368 if (h2 < 0)
369 h2 = 0;
370 fill_rectangle(screen,
371 s->xleft, s->ytop,
372 w1, s->height,
373 color);
374 fill_rectangle(screen,
375 s->xleft + s->width - w2, s->ytop,
376 w2, s->height,
377 color);
378 fill_rectangle(screen,
379 s->xleft + w1, s->ytop,
380 s->width - w1 - w2, h1,
381 color);
382 fill_rectangle(screen,
383 s->xleft + w1, s->ytop + s->height - h2,
384 s->width - w1 - w2, h2,
385 color);
387 #endif
391 #define SCALEBITS 10
392 #define ONE_HALF (1 << (SCALEBITS - 1))
393 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
395 #define RGB_TO_Y_CCIR(r, g, b) \
396 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
397 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
399 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
400 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
401 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
403 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
404 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
405 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410 #define RGBA_IN(r, g, b, a, s)\
412 unsigned int v = ((const uint32_t *)(s))[0];\
413 a = (v >> 24) & 0xff;\
414 r = (v >> 16) & 0xff;\
415 g = (v >> 8) & 0xff;\
416 b = v & 0xff;\
419 #define YUVA_IN(y, u, v, a, s, pal)\
421 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422 a = (val >> 24) & 0xff;\
423 y = (val >> 16) & 0xff;\
424 u = (val >> 8) & 0xff;\
425 v = val & 0xff;\
428 #define YUVA_OUT(d, y, u, v, a)\
430 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 #define BPP 1
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 int wrap, wrap3, width2, skip2;
439 int y, u, v, a, u1, v1, a1, w, h;
440 uint8_t *lum, *cb, *cr;
441 const uint8_t *p;
442 const uint32_t *pal;
443 int dstx, dsty, dstw, dsth;
445 dstw = av_clip(rect->w, 0, imgw);
446 dsth = av_clip(rect->h, 0, imgh);
447 dstx = av_clip(rect->x, 0, imgw - dstw);
448 dsty = av_clip(rect->y, 0, imgh - dsth);
449 lum = dst->data[0] + dsty * dst->linesize[0];
450 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454 skip2 = dstx >> 1;
455 wrap = dst->linesize[0];
456 wrap3 = rect->pict.linesize[0];
457 p = rect->pict.data[0];
458 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
460 if (dsty & 1) {
461 lum += dstx;
462 cb += skip2;
463 cr += skip2;
465 if (dstx & 1) {
466 YUVA_IN(y, u, v, a, p, pal);
467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470 cb++;
471 cr++;
472 lum++;
473 p += BPP;
475 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476 YUVA_IN(y, u, v, a, p, pal);
477 u1 = u;
478 v1 = v;
479 a1 = a;
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482 YUVA_IN(y, u, v, a, p + BPP, pal);
483 u1 += u;
484 v1 += v;
485 a1 += a;
486 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489 cb++;
490 cr++;
491 p += 2 * BPP;
492 lum += 2;
494 if (w) {
495 YUVA_IN(y, u, v, a, p, pal);
496 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499 p++;
500 lum++;
502 p += wrap3 - dstw * BPP;
503 lum += wrap - dstw - dstx;
504 cb += dst->linesize[1] - width2 - skip2;
505 cr += dst->linesize[2] - width2 - skip2;
507 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508 lum += dstx;
509 cb += skip2;
510 cr += skip2;
512 if (dstx & 1) {
513 YUVA_IN(y, u, v, a, p, pal);
514 u1 = u;
515 v1 = v;
516 a1 = a;
517 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518 p += wrap3;
519 lum += wrap;
520 YUVA_IN(y, u, v, a, p, pal);
521 u1 += u;
522 v1 += v;
523 a1 += a;
524 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527 cb++;
528 cr++;
529 p += -wrap3 + BPP;
530 lum += -wrap + 1;
532 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533 YUVA_IN(y, u, v, a, p, pal);
534 u1 = u;
535 v1 = v;
536 a1 = a;
537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539 YUVA_IN(y, u, v, a, p + BPP, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544 p += wrap3;
545 lum += wrap;
547 YUVA_IN(y, u, v, a, p, pal);
548 u1 += u;
549 v1 += v;
550 a1 += a;
551 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553 YUVA_IN(y, u, v, a, p + BPP, pal);
554 u1 += u;
555 v1 += v;
556 a1 += a;
557 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562 cb++;
563 cr++;
564 p += -wrap3 + 2 * BPP;
565 lum += -wrap + 2;
567 if (w) {
568 YUVA_IN(y, u, v, a, p, pal);
569 u1 = u;
570 v1 = v;
571 a1 = a;
572 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573 p += wrap3;
574 lum += wrap;
575 YUVA_IN(y, u, v, a, p, pal);
576 u1 += u;
577 v1 += v;
578 a1 += a;
579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582 cb++;
583 cr++;
584 p += -wrap3 + BPP;
585 lum += -wrap + 1;
587 p += wrap3 + (wrap3 - dstw * BPP);
588 lum += wrap + (wrap - dstw - dstx);
589 cb += dst->linesize[1] - width2 - skip2;
590 cr += dst->linesize[2] - width2 - skip2;
592 /* handle odd height */
593 if (h) {
594 lum += dstx;
595 cb += skip2;
596 cr += skip2;
598 if (dstx & 1) {
599 YUVA_IN(y, u, v, a, p, pal);
600 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603 cb++;
604 cr++;
605 lum++;
606 p += BPP;
608 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609 YUVA_IN(y, u, v, a, p, pal);
610 u1 = u;
611 v1 = v;
612 a1 = a;
613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 YUVA_IN(y, u, v, a, p + BPP, pal);
616 u1 += u;
617 v1 += v;
618 a1 += a;
619 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622 cb++;
623 cr++;
624 p += 2 * BPP;
625 lum += 2;
627 if (w) {
628 YUVA_IN(y, u, v, a, p, pal);
629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636 static void free_subpicture(SubPicture *sp)
638 int i;
640 for (i = 0; i < sp->sub.num_rects; i++)
642 av_freep(&sp->sub.rects[i]->pict.data[0]);
643 av_freep(&sp->sub.rects[i]->pict.data[1]);
644 av_freep(&sp->sub.rects[i]);
647 av_free(sp->sub.rects);
649 memset(&sp->sub, 0, sizeof(AVSubtitle));
652 static void video_image_display(VideoState *is)
654 VideoPicture *vp;
655 SubPicture *sp;
656 AVPicture pict;
657 float aspect_ratio;
658 int width, height, x, y;
659 SDL_Rect rect;
660 int i;
662 vp = &is->pictq[is->pictq_rindex];
663 if (vp->bmp) {
664 /* XXX: use variable in the frame */
665 if (is->video_st->sample_aspect_ratio.num)
666 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667 else if (is->video_st->codec->sample_aspect_ratio.num)
668 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669 else
670 aspect_ratio = 0;
671 if (aspect_ratio <= 0.0)
672 aspect_ratio = 1.0;
673 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
674 /* if an active format is indicated, then it overrides the
675 mpeg format */
676 #if 0
677 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
678 is->dtg_active_format = is->video_st->codec->dtg_active_format;
679 printf("dtg_active_format=%d\n", is->dtg_active_format);
681 #endif
682 #if 0
683 switch(is->video_st->codec->dtg_active_format) {
684 case FF_DTG_AFD_SAME:
685 default:
686 /* nothing to do */
687 break;
688 case FF_DTG_AFD_4_3:
689 aspect_ratio = 4.0 / 3.0;
690 break;
691 case FF_DTG_AFD_16_9:
692 aspect_ratio = 16.0 / 9.0;
693 break;
694 case FF_DTG_AFD_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_4_3_SP_14_9:
698 aspect_ratio = 14.0 / 9.0;
699 break;
700 case FF_DTG_AFD_16_9_SP_14_9:
701 aspect_ratio = 14.0 / 9.0;
702 break;
703 case FF_DTG_AFD_SP_4_3:
704 aspect_ratio = 4.0 / 3.0;
705 break;
707 #endif
709 if (is->subtitle_st)
711 if (is->subpq_size > 0)
713 sp = &is->subpq[is->subpq_rindex];
715 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
717 SDL_LockYUVOverlay (vp->bmp);
719 pict.data[0] = vp->bmp->pixels[0];
720 pict.data[1] = vp->bmp->pixels[2];
721 pict.data[2] = vp->bmp->pixels[1];
723 pict.linesize[0] = vp->bmp->pitches[0];
724 pict.linesize[1] = vp->bmp->pitches[2];
725 pict.linesize[2] = vp->bmp->pitches[1];
727 for (i = 0; i < sp->sub.num_rects; i++)
728 blend_subrect(&pict, sp->sub.rects[i],
729 vp->bmp->w, vp->bmp->h);
731 SDL_UnlockYUVOverlay (vp->bmp);
737 /* XXX: we suppose the screen has a 1.0 pixel ratio */
738 height = is->height;
739 width = ((int)rint(height * aspect_ratio)) & ~1;
740 if (width > is->width) {
741 width = is->width;
742 height = ((int)rint(width / aspect_ratio)) & ~1;
744 x = (is->width - width) / 2;
745 y = (is->height - height) / 2;
746 if (!is->no_background) {
747 /* fill the background */
748 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749 } else {
750 is->no_background = 0;
752 rect.x = is->xleft + x;
753 rect.y = is->ytop + y;
754 rect.w = width;
755 rect.h = height;
756 SDL_DisplayYUVOverlay(vp->bmp, &rect);
757 } else {
758 #if 0
759 fill_rectangle(screen,
760 is->xleft, is->ytop, is->width, is->height,
761 QERGB(0x00, 0x00, 0x00));
762 #endif
766 static inline int compute_mod(int a, int b)
768 a = a % b;
769 if (a >= 0)
770 return a;
771 else
772 return a + b;
775 static void video_audio_display(VideoState *s)
777 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
778 int ch, channels, h, h2, bgcolor, fgcolor;
779 int16_t time_diff;
781 /* compute display index : center on currently output samples */
782 channels = s->audio_st->codec->channels;
783 nb_display_channels = channels;
784 if (!s->paused) {
785 n = 2 * channels;
786 delay = audio_write_get_buf_size(s);
787 delay /= n;
789 /* to be more precise, we take into account the time spent since
790 the last buffer computation */
791 if (audio_callback_time) {
792 time_diff = av_gettime() - audio_callback_time;
793 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
796 delay -= s->width / 2;
797 if (delay < s->width)
798 delay = s->width;
800 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
802 h= INT_MIN;
803 for(i=0; i<1000; i+=channels){
804 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
805 int a= s->sample_array[idx];
806 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
807 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
808 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
809 int score= a-d;
810 if(h<score && (b^c)<0){
811 h= score;
812 i_start= idx;
816 s->last_i_start = i_start;
817 } else {
818 i_start = s->last_i_start;
821 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
822 fill_rectangle(screen,
823 s->xleft, s->ytop, s->width, s->height,
824 bgcolor);
826 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
828 /* total height for one channel */
829 h = s->height / nb_display_channels;
830 /* graph height / 2 */
831 h2 = (h * 9) / 20;
832 for(ch = 0;ch < nb_display_channels; ch++) {
833 i = i_start + ch;
834 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
835 for(x = 0; x < s->width; x++) {
836 y = (s->sample_array[i] * h2) >> 15;
837 if (y < 0) {
838 y = -y;
839 ys = y1 - y;
840 } else {
841 ys = y1;
843 fill_rectangle(screen,
844 s->xleft + x, ys, 1, y,
845 fgcolor);
846 i += channels;
847 if (i >= SAMPLE_ARRAY_SIZE)
848 i -= SAMPLE_ARRAY_SIZE;
852 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
854 for(ch = 1;ch < nb_display_channels; ch++) {
855 y = s->ytop + ch * h;
856 fill_rectangle(screen,
857 s->xleft, y, s->width, 1,
858 fgcolor);
860 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
863 static int video_open(VideoState *is){
864 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
865 int w,h;
867 if(is_full_screen) flags |= SDL_FULLSCREEN;
868 else flags |= SDL_RESIZABLE;
870 if (is_full_screen && fs_screen_width) {
871 w = fs_screen_width;
872 h = fs_screen_height;
873 } else if(!is_full_screen && screen_width){
874 w = screen_width;
875 h = screen_height;
876 }else if (is->video_st && is->video_st->codec->width){
877 w = is->video_st->codec->width;
878 h = is->video_st->codec->height;
879 } else {
880 w = 640;
881 h = 480;
883 #ifndef __APPLE__
884 screen = SDL_SetVideoMode(w, h, 0, flags);
885 #else
886 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
887 screen = SDL_SetVideoMode(w, h, 24, flags);
888 #endif
889 if (!screen) {
890 fprintf(stderr, "SDL: could not set video mode - exiting\n");
891 return -1;
893 SDL_WM_SetCaption("FFplay", "FFplay");
895 is->width = screen->w;
896 is->height = screen->h;
898 return 0;
901 /* display the current picture, if any */
902 static void video_display(VideoState *is)
904 if(!screen)
905 video_open(cur_stream);
906 if (is->audio_st && is->show_audio)
907 video_audio_display(is);
908 else if (is->video_st)
909 video_image_display(is);
912 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
914 SDL_Event event;
915 event.type = FF_REFRESH_EVENT;
916 event.user.data1 = opaque;
917 SDL_PushEvent(&event);
918 return 0; /* 0 means stop timer */
921 /* schedule a video refresh in 'delay' ms */
922 static void schedule_refresh(VideoState *is, int delay)
924 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
925 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
928 /* get the current audio clock value */
929 static double get_audio_clock(VideoState *is)
931 double pts;
932 int hw_buf_size, bytes_per_sec;
933 pts = is->audio_clock;
934 hw_buf_size = audio_write_get_buf_size(is);
935 bytes_per_sec = 0;
936 if (is->audio_st) {
937 bytes_per_sec = is->audio_st->codec->sample_rate *
938 2 * is->audio_st->codec->channels;
940 if (bytes_per_sec)
941 pts -= (double)hw_buf_size / bytes_per_sec;
942 return pts;
945 /* get the current video clock value */
946 static double get_video_clock(VideoState *is)
948 double delta;
949 if (is->paused) {
950 delta = 0;
951 } else {
952 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
954 return is->video_current_pts + delta;
957 /* get the current external clock value */
958 static double get_external_clock(VideoState *is)
960 int64_t ti;
961 ti = av_gettime();
962 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
965 /* get the current master clock value */
966 static double get_master_clock(VideoState *is)
968 double val;
970 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
971 if (is->video_st)
972 val = get_video_clock(is);
973 else
974 val = get_audio_clock(is);
975 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
976 if (is->audio_st)
977 val = get_audio_clock(is);
978 else
979 val = get_video_clock(is);
980 } else {
981 val = get_external_clock(is);
983 return val;
986 /* seek in the stream */
987 static void stream_seek(VideoState *is, int64_t pos, int rel)
989 if (!is->seek_req) {
990 is->seek_pos = pos;
991 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
992 if (seek_by_bytes)
993 is->seek_flags |= AVSEEK_FLAG_BYTE;
994 is->seek_req = 1;
998 /* pause or resume the video */
999 static void stream_pause(VideoState *is)
1001 is->paused = !is->paused;
1002 if (!is->paused) {
1003 is->video_current_pts = get_video_clock(is);
1004 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1008 static double compute_frame_delay(double frame_current_pts, VideoState *is)
1010 double actual_delay, delay, sync_threshold, ref_clock, diff;
1012 /* compute nominal delay */
1013 delay = frame_current_pts - is->frame_last_pts;
1014 if (delay <= 0 || delay >= 10.0) {
1015 /* if incorrect delay, use previous one */
1016 delay = is->frame_last_delay;
1017 } else {
1018 is->frame_last_delay = delay;
1020 is->frame_last_pts = frame_current_pts;
1022 /* update delay to follow master synchronisation source */
1023 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1024 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1025 /* if video is slave, we try to correct big delays by
1026 duplicating or deleting a frame */
1027 ref_clock = get_master_clock(is);
1028 diff = frame_current_pts - ref_clock;
1030 /* skip or repeat frame. We take into account the
1031 delay to compute the threshold. I still don't know
1032 if it is the best guess */
1033 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1034 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1035 if (diff <= -sync_threshold)
1036 delay = 0;
1037 else if (diff >= sync_threshold)
1038 delay = 2 * delay;
1042 is->frame_timer += delay;
1043 /* compute the REAL delay (we need to do that to avoid
1044 long term errors */
1045 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1046 if (actual_delay < 0.010) {
1047 /* XXX: should skip picture */
1048 actual_delay = 0.010;
1051 #if defined(DEBUG_SYNC)
1052 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1053 delay, actual_delay, frame_current_pts, -diff);
1054 #endif
1056 return actual_delay;
1059 /* called to display each frame */
1060 static void video_refresh_timer(void *opaque)
1062 VideoState *is = opaque;
1063 VideoPicture *vp;
1065 SubPicture *sp, *sp2;
1067 if (is->video_st) {
1068 if (is->pictq_size == 0) {
1069 /* if no picture, need to wait */
1070 schedule_refresh(is, 1);
1071 } else {
1072 /* dequeue the picture */
1073 vp = &is->pictq[is->pictq_rindex];
1075 /* update current video pts */
1076 is->video_current_pts = vp->pts;
1077 is->video_current_pts_time = av_gettime();
1079 /* launch timer for next picture */
1080 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1082 if(is->subtitle_st) {
1083 if (is->subtitle_stream_changed) {
1084 SDL_LockMutex(is->subpq_mutex);
1086 while (is->subpq_size) {
1087 free_subpicture(&is->subpq[is->subpq_rindex]);
1089 /* update queue size and signal for next picture */
1090 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1091 is->subpq_rindex = 0;
1093 is->subpq_size--;
1095 is->subtitle_stream_changed = 0;
1097 SDL_CondSignal(is->subpq_cond);
1098 SDL_UnlockMutex(is->subpq_mutex);
1099 } else {
1100 if (is->subpq_size > 0) {
1101 sp = &is->subpq[is->subpq_rindex];
1103 if (is->subpq_size > 1)
1104 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1105 else
1106 sp2 = NULL;
1108 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1109 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1111 free_subpicture(sp);
1113 /* update queue size and signal for next picture */
1114 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1115 is->subpq_rindex = 0;
1117 SDL_LockMutex(is->subpq_mutex);
1118 is->subpq_size--;
1119 SDL_CondSignal(is->subpq_cond);
1120 SDL_UnlockMutex(is->subpq_mutex);
1126 /* display picture */
1127 video_display(is);
1129 /* update queue size and signal for next picture */
1130 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1131 is->pictq_rindex = 0;
1133 SDL_LockMutex(is->pictq_mutex);
1134 is->pictq_size--;
1135 SDL_CondSignal(is->pictq_cond);
1136 SDL_UnlockMutex(is->pictq_mutex);
1138 } else if (is->audio_st) {
1139 /* draw the next audio frame */
1141 schedule_refresh(is, 40);
1143 /* if only audio stream, then display the audio bars (better
1144 than nothing, just to test the implementation */
1146 /* display picture */
1147 video_display(is);
1148 } else {
1149 schedule_refresh(is, 100);
1151 if (show_status) {
1152 static int64_t last_time;
1153 int64_t cur_time;
1154 int aqsize, vqsize, sqsize;
1155 double av_diff;
1157 cur_time = av_gettime();
1158 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1159 aqsize = 0;
1160 vqsize = 0;
1161 sqsize = 0;
1162 if (is->audio_st)
1163 aqsize = is->audioq.size;
1164 if (is->video_st)
1165 vqsize = is->videoq.size;
1166 if (is->subtitle_st)
1167 sqsize = is->subtitleq.size;
1168 av_diff = 0;
1169 if (is->audio_st && is->video_st)
1170 av_diff = get_audio_clock(is) - get_video_clock(is);
1171 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1172 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1173 fflush(stdout);
1174 last_time = cur_time;
1179 /* allocate a picture (needs to do that in main thread to avoid
1180 potential locking problems */
1181 static void alloc_picture(void *opaque)
1183 VideoState *is = opaque;
1184 VideoPicture *vp;
1186 vp = &is->pictq[is->pictq_windex];
1188 if (vp->bmp)
1189 SDL_FreeYUVOverlay(vp->bmp);
1191 #if 0
1192 /* XXX: use generic function */
1193 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1194 switch(is->video_st->codec->pix_fmt) {
1195 case PIX_FMT_YUV420P:
1196 case PIX_FMT_YUV422P:
1197 case PIX_FMT_YUV444P:
1198 case PIX_FMT_YUYV422:
1199 case PIX_FMT_YUV410P:
1200 case PIX_FMT_YUV411P:
1201 is_yuv = 1;
1202 break;
1203 default:
1204 is_yuv = 0;
1205 break;
1207 #endif
1208 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1209 is->video_st->codec->height,
1210 SDL_YV12_OVERLAY,
1211 screen);
1212 vp->width = is->video_st->codec->width;
1213 vp->height = is->video_st->codec->height;
1215 SDL_LockMutex(is->pictq_mutex);
1216 vp->allocated = 1;
1217 SDL_CondSignal(is->pictq_cond);
1218 SDL_UnlockMutex(is->pictq_mutex);
1223 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1225 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1227 VideoPicture *vp;
1228 int dst_pix_fmt;
1229 AVPicture pict;
1230 static struct SwsContext *img_convert_ctx;
1232 /* wait until we have space to put a new picture */
1233 SDL_LockMutex(is->pictq_mutex);
1234 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1235 !is->videoq.abort_request) {
1236 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1238 SDL_UnlockMutex(is->pictq_mutex);
1240 if (is->videoq.abort_request)
1241 return -1;
1243 vp = &is->pictq[is->pictq_windex];
1245 /* alloc or resize hardware picture buffer */
1246 if (!vp->bmp ||
1247 vp->width != is->video_st->codec->width ||
1248 vp->height != is->video_st->codec->height) {
1249 SDL_Event event;
1251 vp->allocated = 0;
1253 /* the allocation must be done in the main thread to avoid
1254 locking problems */
1255 event.type = FF_ALLOC_EVENT;
1256 event.user.data1 = is;
1257 SDL_PushEvent(&event);
1259 /* wait until the picture is allocated */
1260 SDL_LockMutex(is->pictq_mutex);
1261 while (!vp->allocated && !is->videoq.abort_request) {
1262 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1264 SDL_UnlockMutex(is->pictq_mutex);
1266 if (is->videoq.abort_request)
1267 return -1;
1270 /* if the frame is not skipped, then display it */
1271 if (vp->bmp) {
1272 /* get a pointer on the bitmap */
1273 SDL_LockYUVOverlay (vp->bmp);
1275 dst_pix_fmt = PIX_FMT_YUV420P;
1276 pict.data[0] = vp->bmp->pixels[0];
1277 pict.data[1] = vp->bmp->pixels[2];
1278 pict.data[2] = vp->bmp->pixels[1];
1280 pict.linesize[0] = vp->bmp->pitches[0];
1281 pict.linesize[1] = vp->bmp->pitches[2];
1282 pict.linesize[2] = vp->bmp->pitches[1];
1283 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1284 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1285 is->video_st->codec->width, is->video_st->codec->height,
1286 is->video_st->codec->pix_fmt,
1287 is->video_st->codec->width, is->video_st->codec->height,
1288 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1289 if (img_convert_ctx == NULL) {
1290 fprintf(stderr, "Cannot initialize the conversion context\n");
1291 exit(1);
1293 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1294 0, is->video_st->codec->height, pict.data, pict.linesize);
1295 /* update the bitmap content */
1296 SDL_UnlockYUVOverlay(vp->bmp);
1298 vp->pts = pts;
1300 /* now we can update the picture count */
1301 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1302 is->pictq_windex = 0;
1303 SDL_LockMutex(is->pictq_mutex);
1304 is->pictq_size++;
1305 SDL_UnlockMutex(is->pictq_mutex);
1307 return 0;
1311 * compute the exact PTS for the picture if it is omitted in the stream
1312 * @param pts1 the dts of the pkt / pts of the frame
1314 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1316 double frame_delay, pts;
1318 pts = pts1;
1320 if (pts != 0) {
1321 /* update video clock with pts, if present */
1322 is->video_clock = pts;
1323 } else {
1324 pts = is->video_clock;
1326 /* update video clock for next frame */
1327 frame_delay = av_q2d(is->video_st->codec->time_base);
1328 /* for MPEG2, the frame can be repeated, so we update the
1329 clock accordingly */
1330 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1331 is->video_clock += frame_delay;
1333 #if defined(DEBUG_SYNC) && 0
1335 int ftype;
1336 if (src_frame->pict_type == FF_B_TYPE)
1337 ftype = 'B';
1338 else if (src_frame->pict_type == FF_I_TYPE)
1339 ftype = 'I';
1340 else
1341 ftype = 'P';
1342 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1343 ftype, pts, pts1);
1345 #endif
1346 return queue_picture(is, src_frame, pts);
1349 static int video_thread(void *arg)
1351 VideoState *is = arg;
1352 AVPacket pkt1, *pkt = &pkt1;
1353 int len1, got_picture;
1354 AVFrame *frame= avcodec_alloc_frame();
1355 double pts;
1357 for(;;) {
1358 while (is->paused && !is->videoq.abort_request) {
1359 SDL_Delay(10);
1361 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1362 break;
1364 if(pkt->data == flush_pkt.data){
1365 avcodec_flush_buffers(is->video_st->codec);
1366 continue;
1369 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1370 this packet, if any */
1371 is->video_st->codec->reordered_opaque= pkt->pts;
1372 len1 = avcodec_decode_video(is->video_st->codec,
1373 frame, &got_picture,
1374 pkt->data, pkt->size);
1376 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1377 && frame->reordered_opaque != AV_NOPTS_VALUE)
1378 pts= frame->reordered_opaque;
1379 else if(pkt->dts != AV_NOPTS_VALUE)
1380 pts= pkt->dts;
1381 else
1382 pts= 0;
1383 pts *= av_q2d(is->video_st->time_base);
1385 // if (len1 < 0)
1386 // break;
1387 if (got_picture) {
1388 if (output_picture2(is, frame, pts) < 0)
1389 goto the_end;
1391 av_free_packet(pkt);
1392 if (step)
1393 if (cur_stream)
1394 stream_pause(cur_stream);
1396 the_end:
1397 av_free(frame);
1398 return 0;
1401 static int subtitle_thread(void *arg)
1403 VideoState *is = arg;
1404 SubPicture *sp;
1405 AVPacket pkt1, *pkt = &pkt1;
1406 int len1, got_subtitle;
1407 double pts;
1408 int i, j;
1409 int r, g, b, y, u, v, a;
1411 for(;;) {
1412 while (is->paused && !is->subtitleq.abort_request) {
1413 SDL_Delay(10);
1415 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1416 break;
1418 if(pkt->data == flush_pkt.data){
1419 avcodec_flush_buffers(is->subtitle_st->codec);
1420 continue;
1422 SDL_LockMutex(is->subpq_mutex);
1423 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1424 !is->subtitleq.abort_request) {
1425 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1427 SDL_UnlockMutex(is->subpq_mutex);
1429 if (is->subtitleq.abort_request)
1430 goto the_end;
1432 sp = &is->subpq[is->subpq_windex];
1434 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1435 this packet, if any */
1436 pts = 0;
1437 if (pkt->pts != AV_NOPTS_VALUE)
1438 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1440 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1441 &sp->sub, &got_subtitle,
1442 pkt->data, pkt->size);
1443 // if (len1 < 0)
1444 // break;
1445 if (got_subtitle && sp->sub.format == 0) {
1446 sp->pts = pts;
1448 for (i = 0; i < sp->sub.num_rects; i++)
1450 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1452 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1453 y = RGB_TO_Y_CCIR(r, g, b);
1454 u = RGB_TO_U_CCIR(r, g, b, 0);
1455 v = RGB_TO_V_CCIR(r, g, b, 0);
1456 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1460 /* now we can update the picture count */
1461 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1462 is->subpq_windex = 0;
1463 SDL_LockMutex(is->subpq_mutex);
1464 is->subpq_size++;
1465 SDL_UnlockMutex(is->subpq_mutex);
1467 av_free_packet(pkt);
1468 // if (step)
1469 // if (cur_stream)
1470 // stream_pause(cur_stream);
1472 the_end:
1473 return 0;
1476 /* copy samples for viewing in editor window */
1477 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1479 int size, len, channels;
1481 channels = is->audio_st->codec->channels;
1483 size = samples_size / sizeof(short);
1484 while (size > 0) {
1485 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1486 if (len > size)
1487 len = size;
1488 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1489 samples += len;
1490 is->sample_array_index += len;
1491 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1492 is->sample_array_index = 0;
1493 size -= len;
1497 /* return the new audio buffer size (samples can be added or deleted
1498 to get better sync if video or external master clock) */
1499 static int synchronize_audio(VideoState *is, short *samples,
1500 int samples_size1, double pts)
1502 int n, samples_size;
1503 double ref_clock;
1505 n = 2 * is->audio_st->codec->channels;
1506 samples_size = samples_size1;
1508 /* if not master, then we try to remove or add samples to correct the clock */
1509 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1510 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1511 double diff, avg_diff;
1512 int wanted_size, min_size, max_size, nb_samples;
1514 ref_clock = get_master_clock(is);
1515 diff = get_audio_clock(is) - ref_clock;
1517 if (diff < AV_NOSYNC_THRESHOLD) {
1518 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1519 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1520 /* not enough measures to have a correct estimate */
1521 is->audio_diff_avg_count++;
1522 } else {
1523 /* estimate the A-V difference */
1524 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1526 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1527 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1528 nb_samples = samples_size / n;
1530 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1531 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1532 if (wanted_size < min_size)
1533 wanted_size = min_size;
1534 else if (wanted_size > max_size)
1535 wanted_size = max_size;
1537 /* add or remove samples to correction the synchro */
1538 if (wanted_size < samples_size) {
1539 /* remove samples */
1540 samples_size = wanted_size;
1541 } else if (wanted_size > samples_size) {
1542 uint8_t *samples_end, *q;
1543 int nb;
1545 /* add samples */
1546 nb = (samples_size - wanted_size);
1547 samples_end = (uint8_t *)samples + samples_size - n;
1548 q = samples_end + n;
1549 while (nb > 0) {
1550 memcpy(q, samples_end, n);
1551 q += n;
1552 nb -= n;
1554 samples_size = wanted_size;
1557 #if 0
1558 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1559 diff, avg_diff, samples_size - samples_size1,
1560 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1561 #endif
1563 } else {
1564 /* too big difference : may be initial PTS errors, so
1565 reset A-V filter */
1566 is->audio_diff_avg_count = 0;
1567 is->audio_diff_cum = 0;
1571 return samples_size;
1574 /* decode one audio frame and returns its uncompressed size */
1575 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1577 AVPacket *pkt = &is->audio_pkt;
1578 AVCodecContext *dec= is->audio_st->codec;
1579 int n, len1, data_size;
1580 double pts;
1582 for(;;) {
1583 /* NOTE: the audio packet can contain several frames */
1584 while (is->audio_pkt_size > 0) {
1585 data_size = sizeof(is->audio_buf1);
1586 len1 = avcodec_decode_audio2(dec,
1587 (int16_t *)is->audio_buf1, &data_size,
1588 is->audio_pkt_data, is->audio_pkt_size);
1589 if (len1 < 0) {
1590 /* if error, we skip the frame */
1591 is->audio_pkt_size = 0;
1592 break;
1595 is->audio_pkt_data += len1;
1596 is->audio_pkt_size -= len1;
1597 if (data_size <= 0)
1598 continue;
1600 if (dec->sample_fmt != is->audio_src_fmt) {
1601 if (is->reformat_ctx)
1602 av_audio_convert_free(is->reformat_ctx);
1603 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1604 dec->sample_fmt, 1, NULL, 0);
1605 if (!is->reformat_ctx) {
1606 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1607 avcodec_get_sample_fmt_name(dec->sample_fmt),
1608 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1609 break;
1611 is->audio_src_fmt= dec->sample_fmt;
1614 if (is->reformat_ctx) {
1615 const void *ibuf[6]= {is->audio_buf1};
1616 void *obuf[6]= {is->audio_buf2};
1617 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1618 int ostride[6]= {2};
1619 int len= data_size/istride[0];
1620 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1621 printf("av_audio_convert() failed\n");
1622 break;
1624 is->audio_buf= is->audio_buf2;
1625 /* FIXME: existing code assume that data_size equals framesize*channels*2
1626 remove this legacy cruft */
1627 data_size= len*2;
1628 }else{
1629 is->audio_buf= is->audio_buf1;
1632 /* if no pts, then compute it */
1633 pts = is->audio_clock;
1634 *pts_ptr = pts;
1635 n = 2 * dec->channels;
1636 is->audio_clock += (double)data_size /
1637 (double)(n * dec->sample_rate);
1638 #if defined(DEBUG_SYNC)
1640 static double last_clock;
1641 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1642 is->audio_clock - last_clock,
1643 is->audio_clock, pts);
1644 last_clock = is->audio_clock;
1646 #endif
1647 return data_size;
1650 /* free the current packet */
1651 if (pkt->data)
1652 av_free_packet(pkt);
1654 if (is->paused || is->audioq.abort_request) {
1655 return -1;
1658 /* read next packet */
1659 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1660 return -1;
1661 if(pkt->data == flush_pkt.data){
1662 avcodec_flush_buffers(dec);
1663 continue;
1666 is->audio_pkt_data = pkt->data;
1667 is->audio_pkt_size = pkt->size;
1669 /* if update the audio clock with the pts */
1670 if (pkt->pts != AV_NOPTS_VALUE) {
1671 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1676 /* get the current audio output buffer size, in samples. With SDL, we
1677 cannot have a precise information */
1678 static int audio_write_get_buf_size(VideoState *is)
1680 return is->audio_buf_size - is->audio_buf_index;
1684 /* prepare a new audio buffer */
1685 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1687 VideoState *is = opaque;
1688 int audio_size, len1;
1689 double pts;
1691 audio_callback_time = av_gettime();
1693 while (len > 0) {
1694 if (is->audio_buf_index >= is->audio_buf_size) {
1695 audio_size = audio_decode_frame(is, &pts);
1696 if (audio_size < 0) {
1697 /* if error, just output silence */
1698 is->audio_buf = is->audio_buf1;
1699 is->audio_buf_size = 1024;
1700 memset(is->audio_buf, 0, is->audio_buf_size);
1701 } else {
1702 if (is->show_audio)
1703 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1704 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1705 pts);
1706 is->audio_buf_size = audio_size;
1708 is->audio_buf_index = 0;
1710 len1 = is->audio_buf_size - is->audio_buf_index;
1711 if (len1 > len)
1712 len1 = len;
1713 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1714 len -= len1;
1715 stream += len1;
1716 is->audio_buf_index += len1;
1720 /* open a given stream. Return 0 if OK */
1721 static int stream_component_open(VideoState *is, int stream_index)
1723 AVFormatContext *ic = is->ic;
1724 AVCodecContext *enc;
1725 AVCodec *codec;
1726 SDL_AudioSpec wanted_spec, spec;
1728 if (stream_index < 0 || stream_index >= ic->nb_streams)
1729 return -1;
1730 enc = ic->streams[stream_index]->codec;
1732 /* prepare audio output */
1733 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1734 if (enc->channels > 0) {
1735 enc->request_channels = FFMIN(2, enc->channels);
1736 } else {
1737 enc->request_channels = 2;
1741 codec = avcodec_find_decoder(enc->codec_id);
1742 enc->debug_mv = debug_mv;
1743 enc->debug = debug;
1744 enc->workaround_bugs = workaround_bugs;
1745 enc->lowres = lowres;
1746 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1747 enc->idct_algo= idct;
1748 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1749 enc->skip_frame= skip_frame;
1750 enc->skip_idct= skip_idct;
1751 enc->skip_loop_filter= skip_loop_filter;
1752 enc->error_recognition= error_recognition;
1753 enc->error_concealment= error_concealment;
1755 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1757 if (!codec ||
1758 avcodec_open(enc, codec) < 0)
1759 return -1;
1761 /* prepare audio output */
1762 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1763 wanted_spec.freq = enc->sample_rate;
1764 wanted_spec.format = AUDIO_S16SYS;
1765 wanted_spec.channels = enc->channels;
1766 wanted_spec.silence = 0;
1767 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1768 wanted_spec.callback = sdl_audio_callback;
1769 wanted_spec.userdata = is;
1770 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1771 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1772 return -1;
1774 is->audio_hw_buf_size = spec.size;
1775 is->audio_src_fmt= SAMPLE_FMT_S16;
1778 if(thread_count>1)
1779 avcodec_thread_init(enc, thread_count);
1780 enc->thread_count= thread_count;
1781 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1782 switch(enc->codec_type) {
1783 case CODEC_TYPE_AUDIO:
1784 is->audio_stream = stream_index;
1785 is->audio_st = ic->streams[stream_index];
1786 is->audio_buf_size = 0;
1787 is->audio_buf_index = 0;
1789 /* init averaging filter */
1790 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1791 is->audio_diff_avg_count = 0;
1792 /* since we do not have a precise anough audio fifo fullness,
1793 we correct audio sync only if larger than this threshold */
1794 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1796 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1797 packet_queue_init(&is->audioq);
1798 SDL_PauseAudio(0);
1799 break;
1800 case CODEC_TYPE_VIDEO:
1801 is->video_stream = stream_index;
1802 is->video_st = ic->streams[stream_index];
1804 is->frame_last_delay = 40e-3;
1805 is->frame_timer = (double)av_gettime() / 1000000.0;
1806 is->video_current_pts_time = av_gettime();
1808 packet_queue_init(&is->videoq);
1809 is->video_tid = SDL_CreateThread(video_thread, is);
1810 break;
1811 case CODEC_TYPE_SUBTITLE:
1812 is->subtitle_stream = stream_index;
1813 is->subtitle_st = ic->streams[stream_index];
1814 packet_queue_init(&is->subtitleq);
1816 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1817 break;
1818 default:
1819 break;
1821 return 0;
1824 static void stream_component_close(VideoState *is, int stream_index)
1826 AVFormatContext *ic = is->ic;
1827 AVCodecContext *enc;
1829 if (stream_index < 0 || stream_index >= ic->nb_streams)
1830 return;
1831 enc = ic->streams[stream_index]->codec;
1833 switch(enc->codec_type) {
1834 case CODEC_TYPE_AUDIO:
1835 packet_queue_abort(&is->audioq);
1837 SDL_CloseAudio();
1839 packet_queue_end(&is->audioq);
1840 if (is->reformat_ctx)
1841 av_audio_convert_free(is->reformat_ctx);
1842 break;
1843 case CODEC_TYPE_VIDEO:
1844 packet_queue_abort(&is->videoq);
1846 /* note: we also signal this mutex to make sure we deblock the
1847 video thread in all cases */
1848 SDL_LockMutex(is->pictq_mutex);
1849 SDL_CondSignal(is->pictq_cond);
1850 SDL_UnlockMutex(is->pictq_mutex);
1852 SDL_WaitThread(is->video_tid, NULL);
1854 packet_queue_end(&is->videoq);
1855 break;
1856 case CODEC_TYPE_SUBTITLE:
1857 packet_queue_abort(&is->subtitleq);
1859 /* note: we also signal this mutex to make sure we deblock the
1860 video thread in all cases */
1861 SDL_LockMutex(is->subpq_mutex);
1862 is->subtitle_stream_changed = 1;
1864 SDL_CondSignal(is->subpq_cond);
1865 SDL_UnlockMutex(is->subpq_mutex);
1867 SDL_WaitThread(is->subtitle_tid, NULL);
1869 packet_queue_end(&is->subtitleq);
1870 break;
1871 default:
1872 break;
1875 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1876 avcodec_close(enc);
1877 switch(enc->codec_type) {
1878 case CODEC_TYPE_AUDIO:
1879 is->audio_st = NULL;
1880 is->audio_stream = -1;
1881 break;
1882 case CODEC_TYPE_VIDEO:
1883 is->video_st = NULL;
1884 is->video_stream = -1;
1885 break;
1886 case CODEC_TYPE_SUBTITLE:
1887 is->subtitle_st = NULL;
1888 is->subtitle_stream = -1;
1889 break;
1890 default:
1891 break;
1895 static void dump_stream_info(const AVFormatContext *s)
1897 AVMetadataTag *tag = NULL;
1898 while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1899 fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1902 /* since we have only one decoding thread, we can use a global
1903 variable instead of a thread local variable */
1904 static VideoState *global_video_state;
1906 static int decode_interrupt_cb(void)
1908 return (global_video_state && global_video_state->abort_request);
1911 /* this thread gets the stream from the disk or the network */
1912 static int decode_thread(void *arg)
1914 VideoState *is = arg;
1915 AVFormatContext *ic;
1916 int err, i, ret, video_index, audio_index, subtitle_index;
1917 AVPacket pkt1, *pkt = &pkt1;
1918 AVFormatParameters params, *ap = &params;
1920 video_index = -1;
1921 audio_index = -1;
1922 subtitle_index = -1;
1923 is->video_stream = -1;
1924 is->audio_stream = -1;
1925 is->subtitle_stream = -1;
1927 global_video_state = is;
1928 url_set_interrupt_cb(decode_interrupt_cb);
1930 memset(ap, 0, sizeof(*ap));
1932 ap->width = frame_width;
1933 ap->height= frame_height;
1934 ap->time_base= (AVRational){1, 25};
1935 ap->pix_fmt = frame_pix_fmt;
1937 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1938 if (err < 0) {
1939 print_error(is->filename, err);
1940 ret = -1;
1941 goto fail;
1943 is->ic = ic;
1945 if(genpts)
1946 ic->flags |= AVFMT_FLAG_GENPTS;
1948 err = av_find_stream_info(ic);
1949 if (err < 0) {
1950 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1951 ret = -1;
1952 goto fail;
1954 if(ic->pb)
1955 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1957 /* if seeking requested, we execute it */
1958 if (start_time != AV_NOPTS_VALUE) {
1959 int64_t timestamp;
1961 timestamp = start_time;
1962 /* add the stream start time */
1963 if (ic->start_time != AV_NOPTS_VALUE)
1964 timestamp += ic->start_time;
1965 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1966 if (ret < 0) {
1967 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1968 is->filename, (double)timestamp / AV_TIME_BASE);
1972 for(i = 0; i < ic->nb_streams; i++) {
1973 AVCodecContext *enc = ic->streams[i]->codec;
1974 ic->streams[i]->discard = AVDISCARD_ALL;
1975 switch(enc->codec_type) {
1976 case CODEC_TYPE_AUDIO:
1977 if (wanted_audio_stream-- >= 0 && !audio_disable)
1978 audio_index = i;
1979 break;
1980 case CODEC_TYPE_VIDEO:
1981 if (wanted_video_stream-- >= 0 && !video_disable)
1982 video_index = i;
1983 break;
1984 case CODEC_TYPE_SUBTITLE:
1985 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1986 subtitle_index = i;
1987 break;
1988 default:
1989 break;
1992 if (show_status) {
1993 dump_format(ic, 0, is->filename, 0);
1994 dump_stream_info(ic);
1997 /* open the streams */
1998 if (audio_index >= 0) {
1999 stream_component_open(is, audio_index);
2002 if (video_index >= 0) {
2003 stream_component_open(is, video_index);
2004 } else {
2005 if (!display_disable)
2006 is->show_audio = 1;
2009 if (subtitle_index >= 0) {
2010 stream_component_open(is, subtitle_index);
2013 if (is->video_stream < 0 && is->audio_stream < 0) {
2014 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2015 ret = -1;
2016 goto fail;
2019 for(;;) {
2020 if (is->abort_request)
2021 break;
2022 if (is->paused != is->last_paused) {
2023 is->last_paused = is->paused;
2024 if (is->paused)
2025 av_read_pause(ic);
2026 else
2027 av_read_play(ic);
2029 #if CONFIG_RTSP_DEMUXER
2030 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2031 /* wait 10 ms to avoid trying to get another packet */
2032 /* XXX: horrible */
2033 SDL_Delay(10);
2034 continue;
2036 #endif
2037 if (is->seek_req) {
2038 int stream_index= -1;
2039 int64_t seek_target= is->seek_pos;
2041 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2042 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2043 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2045 if(stream_index>=0){
2046 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2049 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2050 if (ret < 0) {
2051 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2052 }else{
2053 if (is->audio_stream >= 0) {
2054 packet_queue_flush(&is->audioq);
2055 packet_queue_put(&is->audioq, &flush_pkt);
2057 if (is->subtitle_stream >= 0) {
2058 packet_queue_flush(&is->subtitleq);
2059 packet_queue_put(&is->subtitleq, &flush_pkt);
2061 if (is->video_stream >= 0) {
2062 packet_queue_flush(&is->videoq);
2063 packet_queue_put(&is->videoq, &flush_pkt);
2066 is->seek_req = 0;
2069 /* if the queue are full, no need to read more */
2070 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2071 is->videoq.size > MAX_VIDEOQ_SIZE ||
2072 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2073 /* wait 10 ms */
2074 SDL_Delay(10);
2075 continue;
2077 if(url_feof(ic->pb)) {
2078 av_init_packet(pkt);
2079 pkt->data=NULL;
2080 pkt->size=0;
2081 pkt->stream_index= is->video_stream;
2082 packet_queue_put(&is->videoq, pkt);
2083 continue;
2085 ret = av_read_frame(ic, pkt);
2086 if (ret < 0) {
2087 if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) {
2088 SDL_Delay(100); /* wait for user event */
2089 continue;
2090 } else
2091 break;
2093 if (pkt->stream_index == is->audio_stream) {
2094 packet_queue_put(&is->audioq, pkt);
2095 } else if (pkt->stream_index == is->video_stream) {
2096 packet_queue_put(&is->videoq, pkt);
2097 } else if (pkt->stream_index == is->subtitle_stream) {
2098 packet_queue_put(&is->subtitleq, pkt);
2099 } else {
2100 av_free_packet(pkt);
2103 /* wait until the end */
2104 while (!is->abort_request) {
2105 SDL_Delay(100);
2108 ret = 0;
2109 fail:
2110 /* disable interrupting */
2111 global_video_state = NULL;
2113 /* close each stream */
2114 if (is->audio_stream >= 0)
2115 stream_component_close(is, is->audio_stream);
2116 if (is->video_stream >= 0)
2117 stream_component_close(is, is->video_stream);
2118 if (is->subtitle_stream >= 0)
2119 stream_component_close(is, is->subtitle_stream);
2120 if (is->ic) {
2121 av_close_input_file(is->ic);
2122 is->ic = NULL; /* safety */
2124 url_set_interrupt_cb(NULL);
2126 if (ret != 0) {
2127 SDL_Event event;
2129 event.type = FF_QUIT_EVENT;
2130 event.user.data1 = is;
2131 SDL_PushEvent(&event);
2133 return 0;
2136 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2138 VideoState *is;
2140 is = av_mallocz(sizeof(VideoState));
2141 if (!is)
2142 return NULL;
2143 av_strlcpy(is->filename, filename, sizeof(is->filename));
2144 is->iformat = iformat;
2145 is->ytop = 0;
2146 is->xleft = 0;
2148 /* start video display */
2149 is->pictq_mutex = SDL_CreateMutex();
2150 is->pictq_cond = SDL_CreateCond();
2152 is->subpq_mutex = SDL_CreateMutex();
2153 is->subpq_cond = SDL_CreateCond();
2155 /* add the refresh timer to draw the picture */
2156 schedule_refresh(is, 40);
2158 is->av_sync_type = av_sync_type;
2159 is->parse_tid = SDL_CreateThread(decode_thread, is);
2160 if (!is->parse_tid) {
2161 av_free(is);
2162 return NULL;
2164 return is;
2167 static void stream_close(VideoState *is)
2169 VideoPicture *vp;
2170 int i;
2171 /* XXX: use a special url_shutdown call to abort parse cleanly */
2172 is->abort_request = 1;
2173 SDL_WaitThread(is->parse_tid, NULL);
2175 /* free all pictures */
2176 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2177 vp = &is->pictq[i];
2178 if (vp->bmp) {
2179 SDL_FreeYUVOverlay(vp->bmp);
2180 vp->bmp = NULL;
2183 SDL_DestroyMutex(is->pictq_mutex);
2184 SDL_DestroyCond(is->pictq_cond);
2185 SDL_DestroyMutex(is->subpq_mutex);
2186 SDL_DestroyCond(is->subpq_cond);
2189 static void stream_cycle_channel(VideoState *is, int codec_type)
2191 AVFormatContext *ic = is->ic;
2192 int start_index, stream_index;
2193 AVStream *st;
2195 if (codec_type == CODEC_TYPE_VIDEO)
2196 start_index = is->video_stream;
2197 else if (codec_type == CODEC_TYPE_AUDIO)
2198 start_index = is->audio_stream;
2199 else
2200 start_index = is->subtitle_stream;
2201 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2202 return;
2203 stream_index = start_index;
2204 for(;;) {
2205 if (++stream_index >= is->ic->nb_streams)
2207 if (codec_type == CODEC_TYPE_SUBTITLE)
2209 stream_index = -1;
2210 goto the_end;
2211 } else
2212 stream_index = 0;
2214 if (stream_index == start_index)
2215 return;
2216 st = ic->streams[stream_index];
2217 if (st->codec->codec_type == codec_type) {
2218 /* check that parameters are OK */
2219 switch(codec_type) {
2220 case CODEC_TYPE_AUDIO:
2221 if (st->codec->sample_rate != 0 &&
2222 st->codec->channels != 0)
2223 goto the_end;
2224 break;
2225 case CODEC_TYPE_VIDEO:
2226 case CODEC_TYPE_SUBTITLE:
2227 goto the_end;
2228 default:
2229 break;
2233 the_end:
2234 stream_component_close(is, start_index);
2235 stream_component_open(is, stream_index);
2239 static void toggle_full_screen(void)
2241 is_full_screen = !is_full_screen;
2242 if (!fs_screen_width) {
2243 /* use default SDL method */
2244 // SDL_WM_ToggleFullScreen(screen);
2246 video_open(cur_stream);
2249 static void toggle_pause(void)
2251 if (cur_stream)
2252 stream_pause(cur_stream);
2253 step = 0;
2256 static void step_to_next_frame(void)
2258 if (cur_stream) {
2259 /* if the stream is paused unpause it, then step */
2260 if (cur_stream->paused)
2261 stream_pause(cur_stream);
2263 step = 1;
2266 static void do_exit(void)
2268 if (cur_stream) {
2269 stream_close(cur_stream);
2270 cur_stream = NULL;
2272 if (show_status)
2273 printf("\n");
2274 SDL_Quit();
2275 exit(0);
2278 static void toggle_audio_display(void)
2280 if (cur_stream) {
2281 cur_stream->show_audio = !cur_stream->show_audio;
2285 /* handle an event sent by the GUI */
2286 static void event_loop(void)
2288 SDL_Event event;
2289 double incr, pos, frac;
2291 for(;;) {
2292 SDL_WaitEvent(&event);
2293 switch(event.type) {
2294 case SDL_KEYDOWN:
2295 switch(event.key.keysym.sym) {
2296 case SDLK_ESCAPE:
2297 case SDLK_q:
2298 do_exit();
2299 break;
2300 case SDLK_f:
2301 toggle_full_screen();
2302 break;
2303 case SDLK_p:
2304 case SDLK_SPACE:
2305 toggle_pause();
2306 break;
2307 case SDLK_s: //S: Step to next frame
2308 step_to_next_frame();
2309 break;
2310 case SDLK_a:
2311 if (cur_stream)
2312 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2313 break;
2314 case SDLK_v:
2315 if (cur_stream)
2316 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2317 break;
2318 case SDLK_t:
2319 if (cur_stream)
2320 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2321 break;
2322 case SDLK_w:
2323 toggle_audio_display();
2324 break;
2325 case SDLK_LEFT:
2326 incr = -10.0;
2327 goto do_seek;
2328 case SDLK_RIGHT:
2329 incr = 10.0;
2330 goto do_seek;
2331 case SDLK_UP:
2332 incr = 60.0;
2333 goto do_seek;
2334 case SDLK_DOWN:
2335 incr = -60.0;
2336 do_seek:
2337 if (cur_stream) {
2338 if (seek_by_bytes) {
2339 pos = url_ftell(cur_stream->ic->pb);
2340 if (cur_stream->ic->bit_rate)
2341 incr *= cur_stream->ic->bit_rate / 60.0;
2342 else
2343 incr *= 180000.0;
2344 pos += incr;
2345 stream_seek(cur_stream, pos, incr);
2346 } else {
2347 pos = get_master_clock(cur_stream);
2348 pos += incr;
2349 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2352 break;
2353 default:
2354 break;
2356 break;
2357 case SDL_MOUSEBUTTONDOWN:
2358 if (cur_stream) {
2359 int ns, hh, mm, ss;
2360 int tns, thh, tmm, tss;
2361 tns = cur_stream->ic->duration/1000000LL;
2362 thh = tns/3600;
2363 tmm = (tns%3600)/60;
2364 tss = (tns%60);
2365 frac = (double)event.button.x/(double)cur_stream->width;
2366 ns = frac*tns;
2367 hh = ns/3600;
2368 mm = (ns%3600)/60;
2369 ss = (ns%60);
2370 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2371 hh, mm, ss, thh, tmm, tss);
2372 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2374 break;
2375 case SDL_VIDEORESIZE:
2376 if (cur_stream) {
2377 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2378 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2379 screen_width = cur_stream->width = event.resize.w;
2380 screen_height= cur_stream->height= event.resize.h;
2382 break;
2383 case SDL_QUIT:
2384 case FF_QUIT_EVENT:
2385 do_exit();
2386 break;
2387 case FF_ALLOC_EVENT:
2388 video_open(event.user.data1);
2389 alloc_picture(event.user.data1);
2390 break;
2391 case FF_REFRESH_EVENT:
2392 video_refresh_timer(event.user.data1);
2393 break;
2394 default:
2395 break;
2400 static void opt_frame_size(const char *arg)
2402 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2403 fprintf(stderr, "Incorrect frame size\n");
2404 exit(1);
2406 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2407 fprintf(stderr, "Frame size must be a multiple of 2\n");
2408 exit(1);
2412 static int opt_width(const char *opt, const char *arg)
2414 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2415 return 0;
2418 static int opt_height(const char *opt, const char *arg)
2420 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2421 return 0;
2424 static void opt_format(const char *arg)
2426 file_iformat = av_find_input_format(arg);
2427 if (!file_iformat) {
2428 fprintf(stderr, "Unknown input format: %s\n", arg);
2429 exit(1);
2433 static void opt_frame_pix_fmt(const char *arg)
2435 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2438 static int opt_sync(const char *opt, const char *arg)
2440 if (!strcmp(arg, "audio"))
2441 av_sync_type = AV_SYNC_AUDIO_MASTER;
2442 else if (!strcmp(arg, "video"))
2443 av_sync_type = AV_SYNC_VIDEO_MASTER;
2444 else if (!strcmp(arg, "ext"))
2445 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2446 else {
2447 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2448 exit(1);
2450 return 0;
2453 static int opt_seek(const char *opt, const char *arg)
2455 start_time = parse_time_or_die(opt, arg, 1);
2456 return 0;
2459 static int opt_debug(const char *opt, const char *arg)
2461 av_log_set_level(99);
2462 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2463 return 0;
2466 static int opt_vismv(const char *opt, const char *arg)
2468 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2469 return 0;
2472 static int opt_thread_count(const char *opt, const char *arg)
2474 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2475 #if !HAVE_THREADS
2476 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2477 #endif
2478 return 0;
2481 static const OptionDef options[] = {
2482 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2483 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2484 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2485 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2486 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2487 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2488 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2489 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2490 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2491 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2492 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2493 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2494 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2495 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2496 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2497 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2498 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2499 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2500 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2501 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2502 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2503 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2504 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2505 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2506 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2507 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2508 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2509 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2510 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2511 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2512 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2513 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2514 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2515 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2516 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2517 { NULL, },
2520 static void show_help(void)
2522 printf("usage: ffplay [options] input_file\n"
2523 "Simple media player\n");
2524 printf("\n");
2525 show_help_options(options, "Main options:\n",
2526 OPT_EXPERT, 0);
2527 show_help_options(options, "\nAdvanced options:\n",
2528 OPT_EXPERT, OPT_EXPERT);
2529 printf("\nWhile playing:\n"
2530 "q, ESC quit\n"
2531 "f toggle full screen\n"
2532 "p, SPC pause\n"
2533 "a cycle audio channel\n"
2534 "v cycle video channel\n"
2535 "t cycle subtitle channel\n"
2536 "w show audio waves\n"
2537 "left/right seek backward/forward 10 seconds\n"
2538 "down/up seek backward/forward 1 minute\n"
2539 "mouse click seek to percentage in file corresponding to fraction of width\n"
2543 static void opt_input_file(const char *filename)
2545 if (!strcmp(filename, "-"))
2546 filename = "pipe:";
2547 input_filename = filename;
2550 /* Called from the main */
2551 int main(int argc, char **argv)
2553 int flags, i;
2555 /* register all codecs, demux and protocols */
2556 avcodec_register_all();
2557 avdevice_register_all();
2558 av_register_all();
2560 for(i=0; i<CODEC_TYPE_NB; i++){
2561 avctx_opts[i]= avcodec_alloc_context2(i);
2563 avformat_opts = avformat_alloc_context();
2564 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2566 show_banner();
2568 parse_options(argc, argv, options, opt_input_file);
2570 if (!input_filename) {
2571 fprintf(stderr, "An input file must be specified\n");
2572 exit(1);
2575 if (display_disable) {
2576 video_disable = 1;
2578 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2579 #if !defined(__MINGW32__) && !defined(__APPLE__)
2580 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2581 #endif
2582 if (SDL_Init (flags)) {
2583 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2584 exit(1);
2587 if (!display_disable) {
2588 #if HAVE_SDL_VIDEO_SIZE
2589 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2590 fs_screen_width = vi->current_w;
2591 fs_screen_height = vi->current_h;
2592 #endif
2595 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2596 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2597 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2598 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2600 av_init_packet(&flush_pkt);
2601 flush_pkt.data= "FLUSH";
2603 cur_stream = stream_open(input_filename, file_iformat);
2605 event_loop();
2607 /* never returns */
2609 return 0;