Add known BMP header sizes.
[FFMpeg-mirror/ffmpeg-vdpau.git] / ffplay.c
blobf7ff48d2e86a7b89575b32245988ca4979c2e5ce
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
116 int audio_stream;
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int seek_by_bytes;
195 static int display_disable;
196 static int show_status;
197 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
198 static int64_t start_time = AV_NOPTS_VALUE;
199 static int debug = 0;
200 static int debug_mv = 0;
201 static int step = 0;
202 static int thread_count = 1;
203 static int workaround_bugs = 1;
204 static int fast = 0;
205 static int genpts = 0;
206 static int lowres = 0;
207 static int idct = FF_IDCT_AUTO;
208 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
211 static int error_recognition = FF_ER_CAREFUL;
212 static int error_concealment = 3;
213 static int decoder_reorder_pts= 0;
215 /* current context */
216 static int is_full_screen;
217 static VideoState *cur_stream;
218 static int64_t audio_callback_time;
220 static AVPacket flush_pkt;
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
226 static SDL_Surface *screen;
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
236 static void packet_queue_flush(PacketQueue *q)
238 AVPacketList *pkt, *pkt1;
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
253 static void packet_queue_end(PacketQueue *q)
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
262 AVPacketList *pkt1;
264 /* duplicate the packet */
265 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266 return -1;
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
275 SDL_LockMutex(q->mutex);
277 if (!q->last_pkt)
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
288 SDL_UnlockMutex(q->mutex);
289 return 0;
292 static void packet_queue_abort(PacketQueue *q)
294 SDL_LockMutex(q->mutex);
296 q->abort_request = 1;
298 SDL_CondSignal(q->cond);
300 SDL_UnlockMutex(q->mutex);
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
306 AVPacketList *pkt1;
307 int ret;
309 SDL_LockMutex(q->mutex);
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
335 SDL_UnlockMutex(q->mutex);
336 return ret;
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
354 int w1, w2, h1, h2;
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
386 #endif
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409 #define RGBA_IN(r, g, b, a, s)\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
418 #define YUVA_IN(y, u, v, a, s, pal)\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
427 #define YUVA_OUT(d, y, u, v, a)\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 #define BPP 1
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442 int dstx, dsty, dstw, dsth;
444 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
445 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
446 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
447 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
448 lum = dst->data[0] + dsty * dst->linesize[0];
449 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452 width2 = (dstw + 1) >> 1;
453 skip2 = dstx >> 1;
454 wrap = dst->linesize[0];
455 wrap3 = rect->linesize;
456 p = rect->bitmap;
457 pal = rect->rgba_palette; /* Now in YCrCb! */
459 if (dsty & 1) {
460 lum += dstx;
461 cb += skip2;
462 cr += skip2;
464 if (dstx & 1) {
465 YUVA_IN(y, u, v, a, p, pal);
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469 cb++;
470 cr++;
471 lum++;
472 p += BPP;
474 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475 YUVA_IN(y, u, v, a, p, pal);
476 u1 = u;
477 v1 = v;
478 a1 = a;
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481 YUVA_IN(y, u, v, a, p + BPP, pal);
482 u1 += u;
483 v1 += v;
484 a1 += a;
485 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488 cb++;
489 cr++;
490 p += 2 * BPP;
491 lum += 2;
493 if (w) {
494 YUVA_IN(y, u, v, a, p, pal);
495 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499 p += wrap3 + (wrap3 - dstw * BPP);
500 lum += wrap + (wrap - dstw - dstx);
501 cb += dst->linesize[1] - width2 - skip2;
502 cr += dst->linesize[2] - width2 - skip2;
504 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505 lum += dstx;
506 cb += skip2;
507 cr += skip2;
509 if (dstx & 1) {
510 YUVA_IN(y, u, v, a, p, pal);
511 u1 = u;
512 v1 = v;
513 a1 = a;
514 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515 p += wrap3;
516 lum += wrap;
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 += u;
519 v1 += v;
520 a1 += a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524 cb++;
525 cr++;
526 p += -wrap3 + BPP;
527 lum += -wrap + 1;
529 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 = u;
532 v1 = v;
533 a1 = a;
534 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536 YUVA_IN(y, u, v, a, p, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541 p += wrap3;
542 lum += wrap;
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 += u;
552 v1 += v;
553 a1 += a;
554 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
556 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
559 cb++;
560 cr++;
561 p += -wrap3 + 2 * BPP;
562 lum += -wrap + 2;
564 if (w) {
565 YUVA_IN(y, u, v, a, p, pal);
566 u1 = u;
567 v1 = v;
568 a1 = a;
569 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570 p += wrap3;
571 lum += wrap;
572 YUVA_IN(y, u, v, a, p, pal);
573 u1 += u;
574 v1 += v;
575 a1 += a;
576 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579 cb++;
580 cr++;
581 p += -wrap3 + BPP;
582 lum += -wrap + 1;
584 p += wrap3 + (wrap3 - dstw * BPP);
585 lum += wrap + (wrap - dstw - dstx);
586 cb += dst->linesize[1] - width2 - skip2;
587 cr += dst->linesize[2] - width2 - skip2;
589 /* handle odd height */
590 if (h) {
591 lum += dstx;
592 cb += skip2;
593 cr += skip2;
595 if (dstx & 1) {
596 YUVA_IN(y, u, v, a, p, pal);
597 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600 cb++;
601 cr++;
602 lum++;
603 p += BPP;
605 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606 YUVA_IN(y, u, v, a, p, pal);
607 u1 = u;
608 v1 = v;
609 a1 = a;
610 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 YUVA_IN(y, u, v, a, p + BPP, pal);
613 u1 += u;
614 v1 += v;
615 a1 += a;
616 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619 cb++;
620 cr++;
621 p += 2 * BPP;
622 lum += 2;
624 if (w) {
625 YUVA_IN(y, u, v, a, p, pal);
626 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633 static void free_subpicture(SubPicture *sp)
635 int i;
637 for (i = 0; i < sp->sub.num_rects; i++)
639 av_free(sp->sub.rects[i].bitmap);
640 av_free(sp->sub.rects[i].rgba_palette);
643 av_free(sp->sub.rects);
645 memset(&sp->sub, 0, sizeof(AVSubtitle));
648 static void video_image_display(VideoState *is)
650 VideoPicture *vp;
651 SubPicture *sp;
652 AVPicture pict;
653 float aspect_ratio;
654 int width, height, x, y;
655 SDL_Rect rect;
656 int i;
658 vp = &is->pictq[is->pictq_rindex];
659 if (vp->bmp) {
660 /* XXX: use variable in the frame */
661 if (is->video_st->sample_aspect_ratio.num)
662 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663 else if (is->video_st->codec->sample_aspect_ratio.num)
664 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665 else
666 aspect_ratio = 0;
667 if (aspect_ratio <= 0.0)
668 aspect_ratio = 1.0;
669 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
670 /* if an active format is indicated, then it overrides the
671 mpeg format */
672 #if 0
673 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
674 is->dtg_active_format = is->video_st->codec->dtg_active_format;
675 printf("dtg_active_format=%d\n", is->dtg_active_format);
677 #endif
678 #if 0
679 switch(is->video_st->codec->dtg_active_format) {
680 case FF_DTG_AFD_SAME:
681 default:
682 /* nothing to do */
683 break;
684 case FF_DTG_AFD_4_3:
685 aspect_ratio = 4.0 / 3.0;
686 break;
687 case FF_DTG_AFD_16_9:
688 aspect_ratio = 16.0 / 9.0;
689 break;
690 case FF_DTG_AFD_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_4_3_SP_14_9:
694 aspect_ratio = 14.0 / 9.0;
695 break;
696 case FF_DTG_AFD_16_9_SP_14_9:
697 aspect_ratio = 14.0 / 9.0;
698 break;
699 case FF_DTG_AFD_SP_4_3:
700 aspect_ratio = 4.0 / 3.0;
701 break;
703 #endif
705 if (is->subtitle_st)
707 if (is->subpq_size > 0)
709 sp = &is->subpq[is->subpq_rindex];
711 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
713 SDL_LockYUVOverlay (vp->bmp);
715 pict.data[0] = vp->bmp->pixels[0];
716 pict.data[1] = vp->bmp->pixels[2];
717 pict.data[2] = vp->bmp->pixels[1];
719 pict.linesize[0] = vp->bmp->pitches[0];
720 pict.linesize[1] = vp->bmp->pitches[2];
721 pict.linesize[2] = vp->bmp->pitches[1];
723 for (i = 0; i < sp->sub.num_rects; i++)
724 blend_subrect(&pict, &sp->sub.rects[i],
725 vp->bmp->w, vp->bmp->h);
727 SDL_UnlockYUVOverlay (vp->bmp);
733 /* XXX: we suppose the screen has a 1.0 pixel ratio */
734 height = is->height;
735 width = ((int)rint(height * aspect_ratio)) & ~1;
736 if (width > is->width) {
737 width = is->width;
738 height = ((int)rint(width / aspect_ratio)) & ~1;
740 x = (is->width - width) / 2;
741 y = (is->height - height) / 2;
742 if (!is->no_background) {
743 /* fill the background */
744 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
745 } else {
746 is->no_background = 0;
748 rect.x = is->xleft + x;
749 rect.y = is->ytop + y;
750 rect.w = width;
751 rect.h = height;
752 SDL_DisplayYUVOverlay(vp->bmp, &rect);
753 } else {
754 #if 0
755 fill_rectangle(screen,
756 is->xleft, is->ytop, is->width, is->height,
757 QERGB(0x00, 0x00, 0x00));
758 #endif
762 static inline int compute_mod(int a, int b)
764 a = a % b;
765 if (a >= 0)
766 return a;
767 else
768 return a + b;
771 static void video_audio_display(VideoState *s)
773 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
774 int ch, channels, h, h2, bgcolor, fgcolor;
775 int16_t time_diff;
777 /* compute display index : center on currently output samples */
778 channels = s->audio_st->codec->channels;
779 nb_display_channels = channels;
780 if (!s->paused) {
781 n = 2 * channels;
782 delay = audio_write_get_buf_size(s);
783 delay /= n;
785 /* to be more precise, we take into account the time spent since
786 the last buffer computation */
787 if (audio_callback_time) {
788 time_diff = av_gettime() - audio_callback_time;
789 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
792 delay -= s->width / 2;
793 if (delay < s->width)
794 delay = s->width;
796 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
798 h= INT_MIN;
799 for(i=0; i<1000; i+=channels){
800 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
801 int a= s->sample_array[idx];
802 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
803 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
804 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
805 int score= a-d;
806 if(h<score && (b^c)<0){
807 h= score;
808 i_start= idx;
812 s->last_i_start = i_start;
813 } else {
814 i_start = s->last_i_start;
817 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
818 fill_rectangle(screen,
819 s->xleft, s->ytop, s->width, s->height,
820 bgcolor);
822 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
824 /* total height for one channel */
825 h = s->height / nb_display_channels;
826 /* graph height / 2 */
827 h2 = (h * 9) / 20;
828 for(ch = 0;ch < nb_display_channels; ch++) {
829 i = i_start + ch;
830 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
831 for(x = 0; x < s->width; x++) {
832 y = (s->sample_array[i] * h2) >> 15;
833 if (y < 0) {
834 y = -y;
835 ys = y1 - y;
836 } else {
837 ys = y1;
839 fill_rectangle(screen,
840 s->xleft + x, ys, 1, y,
841 fgcolor);
842 i += channels;
843 if (i >= SAMPLE_ARRAY_SIZE)
844 i -= SAMPLE_ARRAY_SIZE;
848 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
850 for(ch = 1;ch < nb_display_channels; ch++) {
851 y = s->ytop + ch * h;
852 fill_rectangle(screen,
853 s->xleft, y, s->width, 1,
854 fgcolor);
856 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
859 static int video_open(VideoState *is){
860 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
861 int w,h;
863 if(is_full_screen) flags |= SDL_FULLSCREEN;
864 else flags |= SDL_RESIZABLE;
866 if (is_full_screen && fs_screen_width) {
867 w = fs_screen_width;
868 h = fs_screen_height;
869 } else if(!is_full_screen && screen_width){
870 w = screen_width;
871 h = screen_height;
872 }else if (is->video_st && is->video_st->codec->width){
873 w = is->video_st->codec->width;
874 h = is->video_st->codec->height;
875 } else {
876 w = 640;
877 h = 480;
879 #ifndef __APPLE__
880 screen = SDL_SetVideoMode(w, h, 0, flags);
881 #else
882 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
883 screen = SDL_SetVideoMode(w, h, 24, flags);
884 #endif
885 if (!screen) {
886 fprintf(stderr, "SDL: could not set video mode - exiting\n");
887 return -1;
889 SDL_WM_SetCaption("FFplay", "FFplay");
891 is->width = screen->w;
892 is->height = screen->h;
894 return 0;
897 /* display the current picture, if any */
898 static void video_display(VideoState *is)
900 if(!screen)
901 video_open(cur_stream);
902 if (is->audio_st && is->show_audio)
903 video_audio_display(is);
904 else if (is->video_st)
905 video_image_display(is);
908 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
910 SDL_Event event;
911 event.type = FF_REFRESH_EVENT;
912 event.user.data1 = opaque;
913 SDL_PushEvent(&event);
914 return 0; /* 0 means stop timer */
917 /* schedule a video refresh in 'delay' ms */
918 static void schedule_refresh(VideoState *is, int delay)
920 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
921 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
924 /* get the current audio clock value */
925 static double get_audio_clock(VideoState *is)
927 double pts;
928 int hw_buf_size, bytes_per_sec;
929 pts = is->audio_clock;
930 hw_buf_size = audio_write_get_buf_size(is);
931 bytes_per_sec = 0;
932 if (is->audio_st) {
933 bytes_per_sec = is->audio_st->codec->sample_rate *
934 2 * is->audio_st->codec->channels;
936 if (bytes_per_sec)
937 pts -= (double)hw_buf_size / bytes_per_sec;
938 return pts;
941 /* get the current video clock value */
942 static double get_video_clock(VideoState *is)
944 double delta;
945 if (is->paused) {
946 delta = 0;
947 } else {
948 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
950 return is->video_current_pts + delta;
953 /* get the current external clock value */
954 static double get_external_clock(VideoState *is)
956 int64_t ti;
957 ti = av_gettime();
958 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
961 /* get the current master clock value */
962 static double get_master_clock(VideoState *is)
964 double val;
966 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
967 if (is->video_st)
968 val = get_video_clock(is);
969 else
970 val = get_audio_clock(is);
971 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
972 if (is->audio_st)
973 val = get_audio_clock(is);
974 else
975 val = get_video_clock(is);
976 } else {
977 val = get_external_clock(is);
979 return val;
982 /* seek in the stream */
983 static void stream_seek(VideoState *is, int64_t pos, int rel)
985 if (!is->seek_req) {
986 is->seek_pos = pos;
987 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
988 if (seek_by_bytes)
989 is->seek_flags |= AVSEEK_FLAG_BYTE;
990 is->seek_req = 1;
994 /* pause or resume the video */
995 static void stream_pause(VideoState *is)
997 is->paused = !is->paused;
998 if (!is->paused) {
999 is->video_current_pts = get_video_clock(is);
1000 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1004 /* called to display each frame */
1005 static void video_refresh_timer(void *opaque)
1007 VideoState *is = opaque;
1008 VideoPicture *vp;
1009 double actual_delay, delay, sync_threshold, ref_clock, diff;
1011 SubPicture *sp, *sp2;
1013 if (is->video_st) {
1014 if (is->pictq_size == 0) {
1015 /* if no picture, need to wait */
1016 schedule_refresh(is, 1);
1017 } else {
1018 /* dequeue the picture */
1019 vp = &is->pictq[is->pictq_rindex];
1021 /* update current video pts */
1022 is->video_current_pts = vp->pts;
1023 is->video_current_pts_time = av_gettime();
1025 /* compute nominal delay */
1026 delay = vp->pts - is->frame_last_pts;
1027 if (delay <= 0 || delay >= 2.0) {
1028 /* if incorrect delay, use previous one */
1029 delay = is->frame_last_delay;
1031 is->frame_last_delay = delay;
1032 is->frame_last_pts = vp->pts;
1034 /* update delay to follow master synchronisation source */
1035 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1036 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1037 /* if video is slave, we try to correct big delays by
1038 duplicating or deleting a frame */
1039 ref_clock = get_master_clock(is);
1040 diff = vp->pts - ref_clock;
1042 /* skip or repeat frame. We take into account the
1043 delay to compute the threshold. I still don't know
1044 if it is the best guess */
1045 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1046 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1047 if (diff <= -sync_threshold)
1048 delay = 0;
1049 else if (diff >= sync_threshold)
1050 delay = 2 * delay;
1054 is->frame_timer += delay;
1055 /* compute the REAL delay (we need to do that to avoid
1056 long term errors */
1057 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1058 if (actual_delay < 0.010) {
1059 /* XXX: should skip picture */
1060 actual_delay = 0.010;
1062 /* launch timer for next picture */
1063 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1065 #if defined(DEBUG_SYNC)
1066 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1067 delay, actual_delay, vp->pts, -diff);
1068 #endif
1070 if(is->subtitle_st) {
1071 if (is->subtitle_stream_changed) {
1072 SDL_LockMutex(is->subpq_mutex);
1074 while (is->subpq_size) {
1075 free_subpicture(&is->subpq[is->subpq_rindex]);
1077 /* update queue size and signal for next picture */
1078 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1079 is->subpq_rindex = 0;
1081 is->subpq_size--;
1083 is->subtitle_stream_changed = 0;
1085 SDL_CondSignal(is->subpq_cond);
1086 SDL_UnlockMutex(is->subpq_mutex);
1087 } else {
1088 if (is->subpq_size > 0) {
1089 sp = &is->subpq[is->subpq_rindex];
1091 if (is->subpq_size > 1)
1092 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1093 else
1094 sp2 = NULL;
1096 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1097 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1099 free_subpicture(sp);
1101 /* update queue size and signal for next picture */
1102 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1103 is->subpq_rindex = 0;
1105 SDL_LockMutex(is->subpq_mutex);
1106 is->subpq_size--;
1107 SDL_CondSignal(is->subpq_cond);
1108 SDL_UnlockMutex(is->subpq_mutex);
1114 /* display picture */
1115 video_display(is);
1117 /* update queue size and signal for next picture */
1118 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1119 is->pictq_rindex = 0;
1121 SDL_LockMutex(is->pictq_mutex);
1122 is->pictq_size--;
1123 SDL_CondSignal(is->pictq_cond);
1124 SDL_UnlockMutex(is->pictq_mutex);
1126 } else if (is->audio_st) {
1127 /* draw the next audio frame */
1129 schedule_refresh(is, 40);
1131 /* if only audio stream, then display the audio bars (better
1132 than nothing, just to test the implementation */
1134 /* display picture */
1135 video_display(is);
1136 } else {
1137 schedule_refresh(is, 100);
1139 if (show_status) {
1140 static int64_t last_time;
1141 int64_t cur_time;
1142 int aqsize, vqsize, sqsize;
1143 double av_diff;
1145 cur_time = av_gettime();
1146 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1147 aqsize = 0;
1148 vqsize = 0;
1149 sqsize = 0;
1150 if (is->audio_st)
1151 aqsize = is->audioq.size;
1152 if (is->video_st)
1153 vqsize = is->videoq.size;
1154 if (is->subtitle_st)
1155 sqsize = is->subtitleq.size;
1156 av_diff = 0;
1157 if (is->audio_st && is->video_st)
1158 av_diff = get_audio_clock(is) - get_video_clock(is);
1159 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1160 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1161 fflush(stdout);
1162 last_time = cur_time;
1167 /* allocate a picture (needs to do that in main thread to avoid
1168 potential locking problems */
1169 static void alloc_picture(void *opaque)
1171 VideoState *is = opaque;
1172 VideoPicture *vp;
1174 vp = &is->pictq[is->pictq_windex];
1176 if (vp->bmp)
1177 SDL_FreeYUVOverlay(vp->bmp);
1179 #if 0
1180 /* XXX: use generic function */
1181 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1182 switch(is->video_st->codec->pix_fmt) {
1183 case PIX_FMT_YUV420P:
1184 case PIX_FMT_YUV422P:
1185 case PIX_FMT_YUV444P:
1186 case PIX_FMT_YUYV422:
1187 case PIX_FMT_YUV410P:
1188 case PIX_FMT_YUV411P:
1189 is_yuv = 1;
1190 break;
1191 default:
1192 is_yuv = 0;
1193 break;
1195 #endif
1196 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1197 is->video_st->codec->height,
1198 SDL_YV12_OVERLAY,
1199 screen);
1200 vp->width = is->video_st->codec->width;
1201 vp->height = is->video_st->codec->height;
1203 SDL_LockMutex(is->pictq_mutex);
1204 vp->allocated = 1;
1205 SDL_CondSignal(is->pictq_cond);
1206 SDL_UnlockMutex(is->pictq_mutex);
1211 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1213 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1215 VideoPicture *vp;
1216 int dst_pix_fmt;
1217 AVPicture pict;
1218 static struct SwsContext *img_convert_ctx;
1220 /* wait until we have space to put a new picture */
1221 SDL_LockMutex(is->pictq_mutex);
1222 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1223 !is->videoq.abort_request) {
1224 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1226 SDL_UnlockMutex(is->pictq_mutex);
1228 if (is->videoq.abort_request)
1229 return -1;
1231 vp = &is->pictq[is->pictq_windex];
1233 /* alloc or resize hardware picture buffer */
1234 if (!vp->bmp ||
1235 vp->width != is->video_st->codec->width ||
1236 vp->height != is->video_st->codec->height) {
1237 SDL_Event event;
1239 vp->allocated = 0;
1241 /* the allocation must be done in the main thread to avoid
1242 locking problems */
1243 event.type = FF_ALLOC_EVENT;
1244 event.user.data1 = is;
1245 SDL_PushEvent(&event);
1247 /* wait until the picture is allocated */
1248 SDL_LockMutex(is->pictq_mutex);
1249 while (!vp->allocated && !is->videoq.abort_request) {
1250 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1252 SDL_UnlockMutex(is->pictq_mutex);
1254 if (is->videoq.abort_request)
1255 return -1;
1258 /* if the frame is not skipped, then display it */
1259 if (vp->bmp) {
1260 /* get a pointer on the bitmap */
1261 SDL_LockYUVOverlay (vp->bmp);
1263 dst_pix_fmt = PIX_FMT_YUV420P;
1264 pict.data[0] = vp->bmp->pixels[0];
1265 pict.data[1] = vp->bmp->pixels[2];
1266 pict.data[2] = vp->bmp->pixels[1];
1268 pict.linesize[0] = vp->bmp->pitches[0];
1269 pict.linesize[1] = vp->bmp->pitches[2];
1270 pict.linesize[2] = vp->bmp->pitches[1];
1271 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1272 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1273 is->video_st->codec->width, is->video_st->codec->height,
1274 is->video_st->codec->pix_fmt,
1275 is->video_st->codec->width, is->video_st->codec->height,
1276 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1277 if (img_convert_ctx == NULL) {
1278 fprintf(stderr, "Cannot initialize the conversion context\n");
1279 exit(1);
1281 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1282 0, is->video_st->codec->height, pict.data, pict.linesize);
1283 /* update the bitmap content */
1284 SDL_UnlockYUVOverlay(vp->bmp);
1286 vp->pts = pts;
1288 /* now we can update the picture count */
1289 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1290 is->pictq_windex = 0;
1291 SDL_LockMutex(is->pictq_mutex);
1292 is->pictq_size++;
1293 SDL_UnlockMutex(is->pictq_mutex);
1295 return 0;
1299 * compute the exact PTS for the picture if it is omitted in the stream
1300 * @param pts1 the dts of the pkt / pts of the frame
1302 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1304 double frame_delay, pts;
1306 pts = pts1;
1308 if (pts != 0) {
1309 /* update video clock with pts, if present */
1310 is->video_clock = pts;
1311 } else {
1312 pts = is->video_clock;
1314 /* update video clock for next frame */
1315 frame_delay = av_q2d(is->video_st->codec->time_base);
1316 /* for MPEG2, the frame can be repeated, so we update the
1317 clock accordingly */
1318 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1319 is->video_clock += frame_delay;
1321 #if defined(DEBUG_SYNC) && 0
1323 int ftype;
1324 if (src_frame->pict_type == FF_B_TYPE)
1325 ftype = 'B';
1326 else if (src_frame->pict_type == FF_I_TYPE)
1327 ftype = 'I';
1328 else
1329 ftype = 'P';
1330 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1331 ftype, pts, pts1);
1333 #endif
1334 return queue_picture(is, src_frame, pts);
1337 static int video_thread(void *arg)
1339 VideoState *is = arg;
1340 AVPacket pkt1, *pkt = &pkt1;
1341 int len1, got_picture;
1342 AVFrame *frame= avcodec_alloc_frame();
1343 double pts;
1345 for(;;) {
1346 while (is->paused && !is->videoq.abort_request) {
1347 SDL_Delay(10);
1349 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1350 break;
1352 if(pkt->data == flush_pkt.data){
1353 avcodec_flush_buffers(is->video_st->codec);
1354 continue;
1357 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1358 this packet, if any */
1359 is->video_st->codec->reordered_opaque= pkt->pts;
1360 len1 = avcodec_decode_video(is->video_st->codec,
1361 frame, &got_picture,
1362 pkt->data, pkt->size);
1364 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1365 && frame->reordered_opaque != AV_NOPTS_VALUE)
1366 pts= frame->reordered_opaque;
1367 else if(pkt->dts != AV_NOPTS_VALUE)
1368 pts= pkt->dts;
1369 else
1370 pts= 0;
1371 pts *= av_q2d(is->video_st->time_base);
1373 // if (len1 < 0)
1374 // break;
1375 if (got_picture) {
1376 if (output_picture2(is, frame, pts) < 0)
1377 goto the_end;
1379 av_free_packet(pkt);
1380 if (step)
1381 if (cur_stream)
1382 stream_pause(cur_stream);
1384 the_end:
1385 av_free(frame);
1386 return 0;
1389 static int subtitle_thread(void *arg)
1391 VideoState *is = arg;
1392 SubPicture *sp;
1393 AVPacket pkt1, *pkt = &pkt1;
1394 int len1, got_subtitle;
1395 double pts;
1396 int i, j;
1397 int r, g, b, y, u, v, a;
1399 for(;;) {
1400 while (is->paused && !is->subtitleq.abort_request) {
1401 SDL_Delay(10);
1403 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1404 break;
1406 if(pkt->data == flush_pkt.data){
1407 avcodec_flush_buffers(is->subtitle_st->codec);
1408 continue;
1410 SDL_LockMutex(is->subpq_mutex);
1411 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1412 !is->subtitleq.abort_request) {
1413 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1415 SDL_UnlockMutex(is->subpq_mutex);
1417 if (is->subtitleq.abort_request)
1418 goto the_end;
1420 sp = &is->subpq[is->subpq_windex];
1422 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1423 this packet, if any */
1424 pts = 0;
1425 if (pkt->pts != AV_NOPTS_VALUE)
1426 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1428 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1429 &sp->sub, &got_subtitle,
1430 pkt->data, pkt->size);
1431 // if (len1 < 0)
1432 // break;
1433 if (got_subtitle && sp->sub.format == 0) {
1434 sp->pts = pts;
1436 for (i = 0; i < sp->sub.num_rects; i++)
1438 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1440 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1441 y = RGB_TO_Y_CCIR(r, g, b);
1442 u = RGB_TO_U_CCIR(r, g, b, 0);
1443 v = RGB_TO_V_CCIR(r, g, b, 0);
1444 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1448 /* now we can update the picture count */
1449 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1450 is->subpq_windex = 0;
1451 SDL_LockMutex(is->subpq_mutex);
1452 is->subpq_size++;
1453 SDL_UnlockMutex(is->subpq_mutex);
1455 av_free_packet(pkt);
1456 // if (step)
1457 // if (cur_stream)
1458 // stream_pause(cur_stream);
1460 the_end:
1461 return 0;
1464 /* copy samples for viewing in editor window */
1465 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1467 int size, len, channels;
1469 channels = is->audio_st->codec->channels;
1471 size = samples_size / sizeof(short);
1472 while (size > 0) {
1473 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1474 if (len > size)
1475 len = size;
1476 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1477 samples += len;
1478 is->sample_array_index += len;
1479 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1480 is->sample_array_index = 0;
1481 size -= len;
1485 /* return the new audio buffer size (samples can be added or deleted
1486 to get better sync if video or external master clock) */
1487 static int synchronize_audio(VideoState *is, short *samples,
1488 int samples_size1, double pts)
1490 int n, samples_size;
1491 double ref_clock;
1493 n = 2 * is->audio_st->codec->channels;
1494 samples_size = samples_size1;
1496 /* if not master, then we try to remove or add samples to correct the clock */
1497 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1498 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1499 double diff, avg_diff;
1500 int wanted_size, min_size, max_size, nb_samples;
1502 ref_clock = get_master_clock(is);
1503 diff = get_audio_clock(is) - ref_clock;
1505 if (diff < AV_NOSYNC_THRESHOLD) {
1506 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1507 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1508 /* not enough measures to have a correct estimate */
1509 is->audio_diff_avg_count++;
1510 } else {
1511 /* estimate the A-V difference */
1512 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1514 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1515 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1516 nb_samples = samples_size / n;
1518 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1519 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1520 if (wanted_size < min_size)
1521 wanted_size = min_size;
1522 else if (wanted_size > max_size)
1523 wanted_size = max_size;
1525 /* add or remove samples to correction the synchro */
1526 if (wanted_size < samples_size) {
1527 /* remove samples */
1528 samples_size = wanted_size;
1529 } else if (wanted_size > samples_size) {
1530 uint8_t *samples_end, *q;
1531 int nb;
1533 /* add samples */
1534 nb = (samples_size - wanted_size);
1535 samples_end = (uint8_t *)samples + samples_size - n;
1536 q = samples_end + n;
1537 while (nb > 0) {
1538 memcpy(q, samples_end, n);
1539 q += n;
1540 nb -= n;
1542 samples_size = wanted_size;
1545 #if 0
1546 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1547 diff, avg_diff, samples_size - samples_size1,
1548 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1549 #endif
1551 } else {
1552 /* too big difference : may be initial PTS errors, so
1553 reset A-V filter */
1554 is->audio_diff_avg_count = 0;
1555 is->audio_diff_cum = 0;
1559 return samples_size;
1562 /* decode one audio frame and returns its uncompressed size */
1563 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1565 AVPacket *pkt = &is->audio_pkt;
1566 AVCodecContext *dec= is->audio_st->codec;
1567 int n, len1, data_size;
1568 double pts;
1570 for(;;) {
1571 /* NOTE: the audio packet can contain several frames */
1572 while (is->audio_pkt_size > 0) {
1573 data_size = sizeof(is->audio_buf1);
1574 len1 = avcodec_decode_audio2(dec,
1575 (int16_t *)is->audio_buf1, &data_size,
1576 is->audio_pkt_data, is->audio_pkt_size);
1577 if (len1 < 0) {
1578 /* if error, we skip the frame */
1579 is->audio_pkt_size = 0;
1580 break;
1583 is->audio_pkt_data += len1;
1584 is->audio_pkt_size -= len1;
1585 if (data_size <= 0)
1586 continue;
1588 if (dec->sample_fmt != is->audio_src_fmt) {
1589 if (is->reformat_ctx)
1590 av_audio_convert_free(is->reformat_ctx);
1591 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1592 dec->sample_fmt, 1, NULL, 0);
1593 if (!is->reformat_ctx) {
1594 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1595 avcodec_get_sample_fmt_name(dec->sample_fmt),
1596 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1597 break;
1599 is->audio_src_fmt= dec->sample_fmt;
1602 if (is->reformat_ctx) {
1603 const void *ibuf[6]= {is->audio_buf1};
1604 void *obuf[6]= {is->audio_buf2};
1605 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1606 int ostride[6]= {2};
1607 int len= data_size/istride[0];
1608 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1609 printf("av_audio_convert() failed\n");
1610 break;
1612 is->audio_buf= is->audio_buf2;
1613 /* FIXME: existing code assume that data_size equals framesize*channels*2
1614 remove this legacy cruft */
1615 data_size= len*2;
1616 }else{
1617 is->audio_buf= is->audio_buf1;
1620 /* if no pts, then compute it */
1621 pts = is->audio_clock;
1622 *pts_ptr = pts;
1623 n = 2 * dec->channels;
1624 is->audio_clock += (double)data_size /
1625 (double)(n * dec->sample_rate);
1626 #if defined(DEBUG_SYNC)
1628 static double last_clock;
1629 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1630 is->audio_clock - last_clock,
1631 is->audio_clock, pts);
1632 last_clock = is->audio_clock;
1634 #endif
1635 return data_size;
1638 /* free the current packet */
1639 if (pkt->data)
1640 av_free_packet(pkt);
1642 if (is->paused || is->audioq.abort_request) {
1643 return -1;
1646 /* read next packet */
1647 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1648 return -1;
1649 if(pkt->data == flush_pkt.data){
1650 avcodec_flush_buffers(dec);
1651 continue;
1654 is->audio_pkt_data = pkt->data;
1655 is->audio_pkt_size = pkt->size;
1657 /* if update the audio clock with the pts */
1658 if (pkt->pts != AV_NOPTS_VALUE) {
1659 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1664 /* get the current audio output buffer size, in samples. With SDL, we
1665 cannot have a precise information */
1666 static int audio_write_get_buf_size(VideoState *is)
1668 return is->audio_buf_size - is->audio_buf_index;
1672 /* prepare a new audio buffer */
1673 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1675 VideoState *is = opaque;
1676 int audio_size, len1;
1677 double pts;
1679 audio_callback_time = av_gettime();
1681 while (len > 0) {
1682 if (is->audio_buf_index >= is->audio_buf_size) {
1683 audio_size = audio_decode_frame(is, &pts);
1684 if (audio_size < 0) {
1685 /* if error, just output silence */
1686 is->audio_buf_size = 1024;
1687 memset(is->audio_buf, 0, is->audio_buf_size);
1688 } else {
1689 if (is->show_audio)
1690 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1691 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1692 pts);
1693 is->audio_buf_size = audio_size;
1695 is->audio_buf_index = 0;
1697 len1 = is->audio_buf_size - is->audio_buf_index;
1698 if (len1 > len)
1699 len1 = len;
1700 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1701 len -= len1;
1702 stream += len1;
1703 is->audio_buf_index += len1;
1707 /* open a given stream. Return 0 if OK */
1708 static int stream_component_open(VideoState *is, int stream_index)
1710 AVFormatContext *ic = is->ic;
1711 AVCodecContext *enc;
1712 AVCodec *codec;
1713 SDL_AudioSpec wanted_spec, spec;
1715 if (stream_index < 0 || stream_index >= ic->nb_streams)
1716 return -1;
1717 enc = ic->streams[stream_index]->codec;
1719 /* prepare audio output */
1720 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1721 if (enc->channels > 0) {
1722 enc->request_channels = FFMIN(2, enc->channels);
1723 } else {
1724 enc->request_channels = 2;
1728 codec = avcodec_find_decoder(enc->codec_id);
1729 enc->debug_mv = debug_mv;
1730 enc->debug = debug;
1731 enc->workaround_bugs = workaround_bugs;
1732 enc->lowres = lowres;
1733 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1734 enc->idct_algo= idct;
1735 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1736 enc->skip_frame= skip_frame;
1737 enc->skip_idct= skip_idct;
1738 enc->skip_loop_filter= skip_loop_filter;
1739 enc->error_recognition= error_recognition;
1740 enc->error_concealment= error_concealment;
1742 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1744 if (!codec ||
1745 avcodec_open(enc, codec) < 0)
1746 return -1;
1748 /* prepare audio output */
1749 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1750 wanted_spec.freq = enc->sample_rate;
1751 wanted_spec.format = AUDIO_S16SYS;
1752 wanted_spec.channels = enc->channels;
1753 wanted_spec.silence = 0;
1754 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1755 wanted_spec.callback = sdl_audio_callback;
1756 wanted_spec.userdata = is;
1757 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1758 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1759 return -1;
1761 is->audio_hw_buf_size = spec.size;
1762 is->audio_src_fmt= SAMPLE_FMT_S16;
1765 if(thread_count>1)
1766 avcodec_thread_init(enc, thread_count);
1767 enc->thread_count= thread_count;
1768 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1769 switch(enc->codec_type) {
1770 case CODEC_TYPE_AUDIO:
1771 is->audio_stream = stream_index;
1772 is->audio_st = ic->streams[stream_index];
1773 is->audio_buf_size = 0;
1774 is->audio_buf_index = 0;
1776 /* init averaging filter */
1777 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1778 is->audio_diff_avg_count = 0;
1779 /* since we do not have a precise anough audio fifo fullness,
1780 we correct audio sync only if larger than this threshold */
1781 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1783 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1784 packet_queue_init(&is->audioq);
1785 SDL_PauseAudio(0);
1786 break;
1787 case CODEC_TYPE_VIDEO:
1788 is->video_stream = stream_index;
1789 is->video_st = ic->streams[stream_index];
1791 is->frame_last_delay = 40e-3;
1792 is->frame_timer = (double)av_gettime() / 1000000.0;
1793 is->video_current_pts_time = av_gettime();
1795 packet_queue_init(&is->videoq);
1796 is->video_tid = SDL_CreateThread(video_thread, is);
1797 break;
1798 case CODEC_TYPE_SUBTITLE:
1799 is->subtitle_stream = stream_index;
1800 is->subtitle_st = ic->streams[stream_index];
1801 packet_queue_init(&is->subtitleq);
1803 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1804 break;
1805 default:
1806 break;
1808 return 0;
1811 static void stream_component_close(VideoState *is, int stream_index)
1813 AVFormatContext *ic = is->ic;
1814 AVCodecContext *enc;
1816 if (stream_index < 0 || stream_index >= ic->nb_streams)
1817 return;
1818 enc = ic->streams[stream_index]->codec;
1820 switch(enc->codec_type) {
1821 case CODEC_TYPE_AUDIO:
1822 packet_queue_abort(&is->audioq);
1824 SDL_CloseAudio();
1826 packet_queue_end(&is->audioq);
1827 if (is->reformat_ctx)
1828 av_audio_convert_free(is->reformat_ctx);
1829 break;
1830 case CODEC_TYPE_VIDEO:
1831 packet_queue_abort(&is->videoq);
1833 /* note: we also signal this mutex to make sure we deblock the
1834 video thread in all cases */
1835 SDL_LockMutex(is->pictq_mutex);
1836 SDL_CondSignal(is->pictq_cond);
1837 SDL_UnlockMutex(is->pictq_mutex);
1839 SDL_WaitThread(is->video_tid, NULL);
1841 packet_queue_end(&is->videoq);
1842 break;
1843 case CODEC_TYPE_SUBTITLE:
1844 packet_queue_abort(&is->subtitleq);
1846 /* note: we also signal this mutex to make sure we deblock the
1847 video thread in all cases */
1848 SDL_LockMutex(is->subpq_mutex);
1849 is->subtitle_stream_changed = 1;
1851 SDL_CondSignal(is->subpq_cond);
1852 SDL_UnlockMutex(is->subpq_mutex);
1854 SDL_WaitThread(is->subtitle_tid, NULL);
1856 packet_queue_end(&is->subtitleq);
1857 break;
1858 default:
1859 break;
1862 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1863 avcodec_close(enc);
1864 switch(enc->codec_type) {
1865 case CODEC_TYPE_AUDIO:
1866 is->audio_st = NULL;
1867 is->audio_stream = -1;
1868 break;
1869 case CODEC_TYPE_VIDEO:
1870 is->video_st = NULL;
1871 is->video_stream = -1;
1872 break;
1873 case CODEC_TYPE_SUBTITLE:
1874 is->subtitle_st = NULL;
1875 is->subtitle_stream = -1;
1876 break;
1877 default:
1878 break;
1882 static void dump_stream_info(const AVFormatContext *s)
1884 if (s->track != 0)
1885 fprintf(stderr, "Track: %d\n", s->track);
1886 if (s->title[0] != '\0')
1887 fprintf(stderr, "Title: %s\n", s->title);
1888 if (s->author[0] != '\0')
1889 fprintf(stderr, "Author: %s\n", s->author);
1890 if (s->copyright[0] != '\0')
1891 fprintf(stderr, "Copyright: %s\n", s->copyright);
1892 if (s->comment[0] != '\0')
1893 fprintf(stderr, "Comment: %s\n", s->comment);
1894 if (s->album[0] != '\0')
1895 fprintf(stderr, "Album: %s\n", s->album);
1896 if (s->year != 0)
1897 fprintf(stderr, "Year: %d\n", s->year);
1898 if (s->genre[0] != '\0')
1899 fprintf(stderr, "Genre: %s\n", s->genre);
1902 /* since we have only one decoding thread, we can use a global
1903 variable instead of a thread local variable */
1904 static VideoState *global_video_state;
1906 static int decode_interrupt_cb(void)
1908 return (global_video_state && global_video_state->abort_request);
1911 /* this thread gets the stream from the disk or the network */
1912 static int decode_thread(void *arg)
1914 VideoState *is = arg;
1915 AVFormatContext *ic;
1916 int err, i, ret, video_index, audio_index;
1917 AVPacket pkt1, *pkt = &pkt1;
1918 AVFormatParameters params, *ap = &params;
1920 video_index = -1;
1921 audio_index = -1;
1922 is->video_stream = -1;
1923 is->audio_stream = -1;
1924 is->subtitle_stream = -1;
1926 global_video_state = is;
1927 url_set_interrupt_cb(decode_interrupt_cb);
1929 memset(ap, 0, sizeof(*ap));
1931 ap->width = frame_width;
1932 ap->height= frame_height;
1933 ap->time_base= (AVRational){1, 25};
1934 ap->pix_fmt = frame_pix_fmt;
1936 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1937 if (err < 0) {
1938 print_error(is->filename, err);
1939 ret = -1;
1940 goto fail;
1942 is->ic = ic;
1944 if(genpts)
1945 ic->flags |= AVFMT_FLAG_GENPTS;
1947 err = av_find_stream_info(ic);
1948 if (err < 0) {
1949 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1950 ret = -1;
1951 goto fail;
1953 if(ic->pb)
1954 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1956 /* if seeking requested, we execute it */
1957 if (start_time != AV_NOPTS_VALUE) {
1958 int64_t timestamp;
1960 timestamp = start_time;
1961 /* add the stream start time */
1962 if (ic->start_time != AV_NOPTS_VALUE)
1963 timestamp += ic->start_time;
1964 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1965 if (ret < 0) {
1966 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1967 is->filename, (double)timestamp / AV_TIME_BASE);
1971 for(i = 0; i < ic->nb_streams; i++) {
1972 AVCodecContext *enc = ic->streams[i]->codec;
1973 ic->streams[i]->discard = AVDISCARD_ALL;
1974 switch(enc->codec_type) {
1975 case CODEC_TYPE_AUDIO:
1976 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1977 audio_index = i;
1978 break;
1979 case CODEC_TYPE_VIDEO:
1980 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1981 video_index = i;
1982 break;
1983 default:
1984 break;
1987 if (show_status) {
1988 dump_format(ic, 0, is->filename, 0);
1989 dump_stream_info(ic);
1992 /* open the streams */
1993 if (audio_index >= 0) {
1994 stream_component_open(is, audio_index);
1997 if (video_index >= 0) {
1998 stream_component_open(is, video_index);
1999 } else {
2000 if (!display_disable)
2001 is->show_audio = 1;
2004 if (is->video_stream < 0 && is->audio_stream < 0) {
2005 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2006 ret = -1;
2007 goto fail;
2010 for(;;) {
2011 if (is->abort_request)
2012 break;
2013 if (is->paused != is->last_paused) {
2014 is->last_paused = is->paused;
2015 if (is->paused)
2016 av_read_pause(ic);
2017 else
2018 av_read_play(ic);
2020 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2021 if (is->paused &&
2022 (!strcmp(ic->iformat->name, "rtsp") ||
2023 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2024 /* wait 10 ms to avoid trying to get another packet */
2025 /* XXX: horrible */
2026 SDL_Delay(10);
2027 continue;
2029 #endif
2030 if (is->seek_req) {
2031 int stream_index= -1;
2032 int64_t seek_target= is->seek_pos;
2034 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2035 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2036 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2038 if(stream_index>=0){
2039 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2042 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2043 if (ret < 0) {
2044 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2045 }else{
2046 if (is->audio_stream >= 0) {
2047 packet_queue_flush(&is->audioq);
2048 packet_queue_put(&is->audioq, &flush_pkt);
2050 if (is->subtitle_stream >= 0) {
2051 packet_queue_flush(&is->subtitleq);
2052 packet_queue_put(&is->subtitleq, &flush_pkt);
2054 if (is->video_stream >= 0) {
2055 packet_queue_flush(&is->videoq);
2056 packet_queue_put(&is->videoq, &flush_pkt);
2059 is->seek_req = 0;
2062 /* if the queue are full, no need to read more */
2063 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2064 is->videoq.size > MAX_VIDEOQ_SIZE ||
2065 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2066 url_feof(ic->pb)) {
2067 /* wait 10 ms */
2068 SDL_Delay(10);
2069 continue;
2071 ret = av_read_frame(ic, pkt);
2072 if (ret < 0) {
2073 if (url_ferror(ic->pb) == 0) {
2074 SDL_Delay(100); /* wait for user event */
2075 continue;
2076 } else
2077 break;
2079 if (pkt->stream_index == is->audio_stream) {
2080 packet_queue_put(&is->audioq, pkt);
2081 } else if (pkt->stream_index == is->video_stream) {
2082 packet_queue_put(&is->videoq, pkt);
2083 } else if (pkt->stream_index == is->subtitle_stream) {
2084 packet_queue_put(&is->subtitleq, pkt);
2085 } else {
2086 av_free_packet(pkt);
2089 /* wait until the end */
2090 while (!is->abort_request) {
2091 SDL_Delay(100);
2094 ret = 0;
2095 fail:
2096 /* disable interrupting */
2097 global_video_state = NULL;
2099 /* close each stream */
2100 if (is->audio_stream >= 0)
2101 stream_component_close(is, is->audio_stream);
2102 if (is->video_stream >= 0)
2103 stream_component_close(is, is->video_stream);
2104 if (is->subtitle_stream >= 0)
2105 stream_component_close(is, is->subtitle_stream);
2106 if (is->ic) {
2107 av_close_input_file(is->ic);
2108 is->ic = NULL; /* safety */
2110 url_set_interrupt_cb(NULL);
2112 if (ret != 0) {
2113 SDL_Event event;
2115 event.type = FF_QUIT_EVENT;
2116 event.user.data1 = is;
2117 SDL_PushEvent(&event);
2119 return 0;
2122 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2124 VideoState *is;
2126 is = av_mallocz(sizeof(VideoState));
2127 if (!is)
2128 return NULL;
2129 av_strlcpy(is->filename, filename, sizeof(is->filename));
2130 is->iformat = iformat;
2131 is->ytop = 0;
2132 is->xleft = 0;
2134 /* start video display */
2135 is->pictq_mutex = SDL_CreateMutex();
2136 is->pictq_cond = SDL_CreateCond();
2138 is->subpq_mutex = SDL_CreateMutex();
2139 is->subpq_cond = SDL_CreateCond();
2141 /* add the refresh timer to draw the picture */
2142 schedule_refresh(is, 40);
2144 is->av_sync_type = av_sync_type;
2145 is->parse_tid = SDL_CreateThread(decode_thread, is);
2146 if (!is->parse_tid) {
2147 av_free(is);
2148 return NULL;
2150 return is;
2153 static void stream_close(VideoState *is)
2155 VideoPicture *vp;
2156 int i;
2157 /* XXX: use a special url_shutdown call to abort parse cleanly */
2158 is->abort_request = 1;
2159 SDL_WaitThread(is->parse_tid, NULL);
2161 /* free all pictures */
2162 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2163 vp = &is->pictq[i];
2164 if (vp->bmp) {
2165 SDL_FreeYUVOverlay(vp->bmp);
2166 vp->bmp = NULL;
2169 SDL_DestroyMutex(is->pictq_mutex);
2170 SDL_DestroyCond(is->pictq_cond);
2171 SDL_DestroyMutex(is->subpq_mutex);
2172 SDL_DestroyCond(is->subpq_cond);
2175 static void stream_cycle_channel(VideoState *is, int codec_type)
2177 AVFormatContext *ic = is->ic;
2178 int start_index, stream_index;
2179 AVStream *st;
2181 if (codec_type == CODEC_TYPE_VIDEO)
2182 start_index = is->video_stream;
2183 else if (codec_type == CODEC_TYPE_AUDIO)
2184 start_index = is->audio_stream;
2185 else
2186 start_index = is->subtitle_stream;
2187 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2188 return;
2189 stream_index = start_index;
2190 for(;;) {
2191 if (++stream_index >= is->ic->nb_streams)
2193 if (codec_type == CODEC_TYPE_SUBTITLE)
2195 stream_index = -1;
2196 goto the_end;
2197 } else
2198 stream_index = 0;
2200 if (stream_index == start_index)
2201 return;
2202 st = ic->streams[stream_index];
2203 if (st->codec->codec_type == codec_type) {
2204 /* check that parameters are OK */
2205 switch(codec_type) {
2206 case CODEC_TYPE_AUDIO:
2207 if (st->codec->sample_rate != 0 &&
2208 st->codec->channels != 0)
2209 goto the_end;
2210 break;
2211 case CODEC_TYPE_VIDEO:
2212 case CODEC_TYPE_SUBTITLE:
2213 goto the_end;
2214 default:
2215 break;
2219 the_end:
2220 stream_component_close(is, start_index);
2221 stream_component_open(is, stream_index);
2225 static void toggle_full_screen(void)
2227 is_full_screen = !is_full_screen;
2228 if (!fs_screen_width) {
2229 /* use default SDL method */
2230 // SDL_WM_ToggleFullScreen(screen);
2232 video_open(cur_stream);
2235 static void toggle_pause(void)
2237 if (cur_stream)
2238 stream_pause(cur_stream);
2239 step = 0;
2242 static void step_to_next_frame(void)
2244 if (cur_stream) {
2245 /* if the stream is paused unpause it, then step */
2246 if (cur_stream->paused)
2247 stream_pause(cur_stream);
2249 step = 1;
2252 static void do_exit(void)
2254 if (cur_stream) {
2255 stream_close(cur_stream);
2256 cur_stream = NULL;
2258 if (show_status)
2259 printf("\n");
2260 SDL_Quit();
2261 exit(0);
2264 static void toggle_audio_display(void)
2266 if (cur_stream) {
2267 cur_stream->show_audio = !cur_stream->show_audio;
2271 /* handle an event sent by the GUI */
2272 static void event_loop(void)
2274 SDL_Event event;
2275 double incr, pos, frac;
2277 for(;;) {
2278 SDL_WaitEvent(&event);
2279 switch(event.type) {
2280 case SDL_KEYDOWN:
2281 switch(event.key.keysym.sym) {
2282 case SDLK_ESCAPE:
2283 case SDLK_q:
2284 do_exit();
2285 break;
2286 case SDLK_f:
2287 toggle_full_screen();
2288 break;
2289 case SDLK_p:
2290 case SDLK_SPACE:
2291 toggle_pause();
2292 break;
2293 case SDLK_s: //S: Step to next frame
2294 step_to_next_frame();
2295 break;
2296 case SDLK_a:
2297 if (cur_stream)
2298 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2299 break;
2300 case SDLK_v:
2301 if (cur_stream)
2302 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2303 break;
2304 case SDLK_t:
2305 if (cur_stream)
2306 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2307 break;
2308 case SDLK_w:
2309 toggle_audio_display();
2310 break;
2311 case SDLK_LEFT:
2312 incr = -10.0;
2313 goto do_seek;
2314 case SDLK_RIGHT:
2315 incr = 10.0;
2316 goto do_seek;
2317 case SDLK_UP:
2318 incr = 60.0;
2319 goto do_seek;
2320 case SDLK_DOWN:
2321 incr = -60.0;
2322 do_seek:
2323 if (cur_stream) {
2324 if (seek_by_bytes) {
2325 pos = url_ftell(cur_stream->ic->pb);
2326 if (cur_stream->ic->bit_rate)
2327 incr *= cur_stream->ic->bit_rate / 60.0;
2328 else
2329 incr *= 180000.0;
2330 pos += incr;
2331 stream_seek(cur_stream, pos, incr);
2332 } else {
2333 pos = get_master_clock(cur_stream);
2334 pos += incr;
2335 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2338 break;
2339 default:
2340 break;
2342 break;
2343 case SDL_MOUSEBUTTONDOWN:
2344 if (cur_stream) {
2345 int ns, hh, mm, ss;
2346 int tns, thh, tmm, tss;
2347 tns = cur_stream->ic->duration/1000000LL;
2348 thh = tns/3600;
2349 tmm = (tns%3600)/60;
2350 tss = (tns%60);
2351 frac = (double)event.button.x/(double)cur_stream->width;
2352 ns = frac*tns;
2353 hh = ns/3600;
2354 mm = (ns%3600)/60;
2355 ss = (ns%60);
2356 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2357 hh, mm, ss, thh, tmm, tss);
2358 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2360 break;
2361 case SDL_VIDEORESIZE:
2362 if (cur_stream) {
2363 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2364 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2365 screen_width = cur_stream->width = event.resize.w;
2366 screen_height= cur_stream->height= event.resize.h;
2368 break;
2369 case SDL_QUIT:
2370 case FF_QUIT_EVENT:
2371 do_exit();
2372 break;
2373 case FF_ALLOC_EVENT:
2374 video_open(event.user.data1);
2375 alloc_picture(event.user.data1);
2376 break;
2377 case FF_REFRESH_EVENT:
2378 video_refresh_timer(event.user.data1);
2379 break;
2380 default:
2381 break;
2386 static void opt_frame_size(const char *arg)
2388 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2389 fprintf(stderr, "Incorrect frame size\n");
2390 exit(1);
2392 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2393 fprintf(stderr, "Frame size must be a multiple of 2\n");
2394 exit(1);
2398 static int opt_width(const char *opt, const char *arg)
2400 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2401 return 0;
2404 static int opt_height(const char *opt, const char *arg)
2406 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2407 return 0;
2410 static void opt_format(const char *arg)
2412 file_iformat = av_find_input_format(arg);
2413 if (!file_iformat) {
2414 fprintf(stderr, "Unknown input format: %s\n", arg);
2415 exit(1);
2419 static void opt_frame_pix_fmt(const char *arg)
2421 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2424 static int opt_sync(const char *opt, const char *arg)
2426 if (!strcmp(arg, "audio"))
2427 av_sync_type = AV_SYNC_AUDIO_MASTER;
2428 else if (!strcmp(arg, "video"))
2429 av_sync_type = AV_SYNC_VIDEO_MASTER;
2430 else if (!strcmp(arg, "ext"))
2431 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2432 else {
2433 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2434 exit(1);
2436 return 0;
2439 static int opt_seek(const char *opt, const char *arg)
2441 start_time = parse_time_or_die(opt, arg, 1);
2442 return 0;
2445 static int opt_debug(const char *opt, const char *arg)
2447 av_log_set_level(99);
2448 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2449 return 0;
2452 static int opt_vismv(const char *opt, const char *arg)
2454 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2455 return 0;
2458 static int opt_thread_count(const char *opt, const char *arg)
2460 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2461 #if !defined(HAVE_THREADS)
2462 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2463 #endif
2464 return 0;
2467 static const OptionDef options[] = {
2468 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2469 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2470 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2471 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2472 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2473 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2474 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2475 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2476 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2477 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2478 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2479 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2480 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2481 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2482 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2483 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2484 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2485 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2486 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2487 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2488 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2489 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2490 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2491 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2492 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2493 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2494 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2495 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2496 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2497 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2498 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2499 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2500 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2501 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2502 { NULL, },
2505 static void show_help(void)
2507 printf("usage: ffplay [options] input_file\n"
2508 "Simple media player\n");
2509 printf("\n");
2510 show_help_options(options, "Main options:\n",
2511 OPT_EXPERT, 0);
2512 show_help_options(options, "\nAdvanced options:\n",
2513 OPT_EXPERT, OPT_EXPERT);
2514 printf("\nWhile playing:\n"
2515 "q, ESC quit\n"
2516 "f toggle full screen\n"
2517 "p, SPC pause\n"
2518 "a cycle audio channel\n"
2519 "v cycle video channel\n"
2520 "t cycle subtitle channel\n"
2521 "w show audio waves\n"
2522 "left/right seek backward/forward 10 seconds\n"
2523 "down/up seek backward/forward 1 minute\n"
2524 "mouse click seek to percentage in file corresponding to fraction of width\n"
2528 static void opt_input_file(const char *filename)
2530 if (!strcmp(filename, "-"))
2531 filename = "pipe:";
2532 input_filename = filename;
2535 /* Called from the main */
2536 int main(int argc, char **argv)
2538 int flags, i;
2540 /* register all codecs, demux and protocols */
2541 avcodec_register_all();
2542 avdevice_register_all();
2543 av_register_all();
2545 for(i=0; i<CODEC_TYPE_NB; i++){
2546 avctx_opts[i]= avcodec_alloc_context2(i);
2548 avformat_opts = av_alloc_format_context();
2549 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2551 show_banner();
2553 parse_options(argc, argv, options, opt_input_file);
2555 if (!input_filename) {
2556 fprintf(stderr, "An input file must be specified\n");
2557 exit(1);
2560 if (display_disable) {
2561 video_disable = 1;
2563 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2564 #if !defined(__MINGW32__) && !defined(__APPLE__)
2565 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2566 #endif
2567 if (SDL_Init (flags)) {
2568 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2569 exit(1);
2572 if (!display_disable) {
2573 #ifdef HAVE_SDL_VIDEO_SIZE
2574 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2575 fs_screen_width = vi->current_w;
2576 fs_screen_height = vi->current_h;
2577 #endif
2580 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2581 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2582 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2583 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2585 av_init_packet(&flush_pkt);
2586 flush_pkt.data= "FLUSH";
2588 cur_stream = stream_open(input_filename, file_iformat);
2590 event_loop();
2592 /* never returns */
2594 return 0;