Cosmetics after previous patches.
[ffmpeg-lucabe.git] / ffplay.c
blob38080e70dd3a52bc884d5117d49dd2d0af4d9cae
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
31 #include "cmdutils.h"
33 #include <SDL.h>
34 #include <SDL_thread.h>
36 #ifdef __MINGW32__
37 #undef main /* We don't want SDL to override our main() */
38 #endif
40 #undef exit
42 const char program_name[] = "FFplay";
43 const int program_birth_year = 2003;
45 //#define DEBUG_SYNC
47 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51 /* SDL audio buffer size, in samples. Should be small to have precise
52 A/V sync as SDL does not have hardware buffer fullness info. */
53 #define SDL_AUDIO_BUFFER_SIZE 1024
55 /* no AV sync correction is done if below the AV sync threshold */
56 #define AV_SYNC_THRESHOLD 0.01
57 /* no AV correction is done if too big error */
58 #define AV_NOSYNC_THRESHOLD 10.0
60 /* maximum audio speed change to get correct sync */
61 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64 #define AUDIO_DIFF_AVG_NB 20
66 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67 #define SAMPLE_ARRAY_SIZE (2*65536)
69 static int sws_flags = SWS_BICUBIC;
71 typedef struct PacketQueue {
72 AVPacketList *first_pkt, *last_pkt;
73 int nb_packets;
74 int size;
75 int abort_request;
76 SDL_mutex *mutex;
77 SDL_cond *cond;
78 } PacketQueue;
80 #define VIDEO_PICTURE_QUEUE_SIZE 1
81 #define SUBPICTURE_QUEUE_SIZE 4
83 typedef struct VideoPicture {
84 double pts; ///<presentation time stamp for this picture
85 SDL_Overlay *bmp;
86 int width, height; /* source height & width */
87 int allocated;
88 } VideoPicture;
90 typedef struct SubPicture {
91 double pts; /* presentation time stamp for this picture */
92 AVSubtitle sub;
93 } SubPicture;
95 enum {
96 AV_SYNC_AUDIO_MASTER, /* default choice */
97 AV_SYNC_VIDEO_MASTER,
98 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101 typedef struct VideoState {
102 SDL_Thread *parse_tid;
103 SDL_Thread *video_tid;
104 AVInputFormat *iformat;
105 int no_background;
106 int abort_request;
107 int paused;
108 int last_paused;
109 int seek_req;
110 int seek_flags;
111 int64_t seek_pos;
112 AVFormatContext *ic;
113 int dtg_active_format;
115 int audio_stream;
117 int av_sync_type;
118 double external_clock; /* external clock base */
119 int64_t external_clock_time;
121 double audio_clock;
122 double audio_diff_cum; /* used for AV difference average computation */
123 double audio_diff_avg_coef;
124 double audio_diff_threshold;
125 int audio_diff_avg_count;
126 AVStream *audio_st;
127 PacketQueue audioq;
128 int audio_hw_buf_size;
129 /* samples output by the codec. we reserve more space for avsync
130 compensation */
131 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 uint8_t *audio_buf;
134 unsigned int audio_buf_size; /* in bytes */
135 int audio_buf_index; /* in bytes */
136 AVPacket audio_pkt;
137 uint8_t *audio_pkt_data;
138 int audio_pkt_size;
139 enum SampleFormat audio_src_fmt;
140 AVAudioConvert *reformat_ctx;
142 int show_audio; /* if true, display audio samples */
143 int16_t sample_array[SAMPLE_ARRAY_SIZE];
144 int sample_array_index;
145 int last_i_start;
147 SDL_Thread *subtitle_tid;
148 int subtitle_stream;
149 int subtitle_stream_changed;
150 AVStream *subtitle_st;
151 PacketQueue subtitleq;
152 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
153 int subpq_size, subpq_rindex, subpq_windex;
154 SDL_mutex *subpq_mutex;
155 SDL_cond *subpq_cond;
157 double frame_timer;
158 double frame_last_pts;
159 double frame_last_delay;
160 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
161 int video_stream;
162 AVStream *video_st;
163 PacketQueue videoq;
164 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
165 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
166 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
167 int pictq_size, pictq_rindex, pictq_windex;
168 SDL_mutex *pictq_mutex;
169 SDL_cond *pictq_cond;
171 // QETimer *video_timer;
172 char filename[1024];
173 int width, height, xleft, ytop;
174 } VideoState;
176 static void show_help(void);
177 static int audio_write_get_buf_size(VideoState *is);
179 /* options specified by the user */
180 static AVInputFormat *file_iformat;
181 static const char *input_filename;
182 static int fs_screen_width;
183 static int fs_screen_height;
184 static int screen_width = 0;
185 static int screen_height = 0;
186 static int frame_width = 0;
187 static int frame_height = 0;
188 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
189 static int audio_disable;
190 static int video_disable;
191 static int wanted_audio_stream= 0;
192 static int wanted_video_stream= 0;
193 static int seek_by_bytes;
194 static int display_disable;
195 static int show_status;
196 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
197 static int64_t start_time = AV_NOPTS_VALUE;
198 static int debug = 0;
199 static int debug_mv = 0;
200 static int step = 0;
201 static int thread_count = 1;
202 static int workaround_bugs = 1;
203 static int fast = 0;
204 static int genpts = 0;
205 static int lowres = 0;
206 static int idct = FF_IDCT_AUTO;
207 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
208 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
210 static int error_resilience = FF_ER_CAREFUL;
211 static int error_concealment = 3;
212 static int decoder_reorder_pts= 0;
214 /* current context */
215 static int is_full_screen;
216 static VideoState *cur_stream;
217 static int64_t audio_callback_time;
219 AVPacket flush_pkt;
221 #define FF_ALLOC_EVENT (SDL_USEREVENT)
222 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
223 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225 SDL_Surface *screen;
227 /* packet queue handling */
228 static void packet_queue_init(PacketQueue *q)
230 memset(q, 0, sizeof(PacketQueue));
231 q->mutex = SDL_CreateMutex();
232 q->cond = SDL_CreateCond();
235 static void packet_queue_flush(PacketQueue *q)
237 AVPacketList *pkt, *pkt1;
239 SDL_LockMutex(q->mutex);
240 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
241 pkt1 = pkt->next;
242 av_free_packet(&pkt->pkt);
243 av_freep(&pkt);
245 q->last_pkt = NULL;
246 q->first_pkt = NULL;
247 q->nb_packets = 0;
248 q->size = 0;
249 SDL_UnlockMutex(q->mutex);
252 static void packet_queue_end(PacketQueue *q)
254 packet_queue_flush(q);
255 SDL_DestroyMutex(q->mutex);
256 SDL_DestroyCond(q->cond);
259 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 AVPacketList *pkt1;
263 /* duplicate the packet */
264 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
265 return -1;
267 pkt1 = av_malloc(sizeof(AVPacketList));
268 if (!pkt1)
269 return -1;
270 pkt1->pkt = *pkt;
271 pkt1->next = NULL;
274 SDL_LockMutex(q->mutex);
276 if (!q->last_pkt)
278 q->first_pkt = pkt1;
279 else
280 q->last_pkt->next = pkt1;
281 q->last_pkt = pkt1;
282 q->nb_packets++;
283 q->size += pkt1->pkt.size;
284 /* XXX: should duplicate packet data in DV case */
285 SDL_CondSignal(q->cond);
287 SDL_UnlockMutex(q->mutex);
288 return 0;
291 static void packet_queue_abort(PacketQueue *q)
293 SDL_LockMutex(q->mutex);
295 q->abort_request = 1;
297 SDL_CondSignal(q->cond);
299 SDL_UnlockMutex(q->mutex);
302 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
303 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 AVPacketList *pkt1;
306 int ret;
308 SDL_LockMutex(q->mutex);
310 for(;;) {
311 if (q->abort_request) {
312 ret = -1;
313 break;
316 pkt1 = q->first_pkt;
317 if (pkt1) {
318 q->first_pkt = pkt1->next;
319 if (!q->first_pkt)
320 q->last_pkt = NULL;
321 q->nb_packets--;
322 q->size -= pkt1->pkt.size;
323 *pkt = pkt1->pkt;
324 av_free(pkt1);
325 ret = 1;
326 break;
327 } else if (!block) {
328 ret = 0;
329 break;
330 } else {
331 SDL_CondWait(q->cond, q->mutex);
334 SDL_UnlockMutex(q->mutex);
335 return ret;
338 static inline void fill_rectangle(SDL_Surface *screen,
339 int x, int y, int w, int h, int color)
341 SDL_Rect rect;
342 rect.x = x;
343 rect.y = y;
344 rect.w = w;
345 rect.h = h;
346 SDL_FillRect(screen, &rect, color);
349 #if 0
350 /* draw only the border of a rectangle */
351 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 int w1, w2, h1, h2;
355 /* fill the background */
356 w1 = x;
357 if (w1 < 0)
358 w1 = 0;
359 w2 = s->width - (x + w);
360 if (w2 < 0)
361 w2 = 0;
362 h1 = y;
363 if (h1 < 0)
364 h1 = 0;
365 h2 = s->height - (y + h);
366 if (h2 < 0)
367 h2 = 0;
368 fill_rectangle(screen,
369 s->xleft, s->ytop,
370 w1, s->height,
371 color);
372 fill_rectangle(screen,
373 s->xleft + s->width - w2, s->ytop,
374 w2, s->height,
375 color);
376 fill_rectangle(screen,
377 s->xleft + w1, s->ytop,
378 s->width - w1 - w2, h1,
379 color);
380 fill_rectangle(screen,
381 s->xleft + w1, s->ytop + s->height - h2,
382 s->width - w1 - w2, h2,
383 color);
385 #endif
389 #define SCALEBITS 10
390 #define ONE_HALF (1 << (SCALEBITS - 1))
391 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393 #define RGB_TO_Y_CCIR(r, g, b) \
394 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
395 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
398 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
399 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
402 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
403 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408 #define RGBA_IN(r, g, b, a, s)\
410 unsigned int v = ((const uint32_t *)(s))[0];\
411 a = (v >> 24) & 0xff;\
412 r = (v >> 16) & 0xff;\
413 g = (v >> 8) & 0xff;\
414 b = v & 0xff;\
417 #define YUVA_IN(y, u, v, a, s, pal)\
419 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420 a = (val >> 24) & 0xff;\
421 y = (val >> 16) & 0xff;\
422 u = (val >> 8) & 0xff;\
423 v = val & 0xff;\
426 #define YUVA_OUT(d, y, u, v, a)\
428 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 #define BPP 1
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 int wrap, wrap3, width2, skip2;
437 int y, u, v, a, u1, v1, a1, w, h;
438 uint8_t *lum, *cb, *cr;
439 const uint8_t *p;
440 const uint32_t *pal;
441 int dstx, dsty, dstw, dsth;
443 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
444 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
445 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
446 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
447 lum = dst->data[0] + dsty * dst->linesize[0];
448 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451 width2 = (dstw + 1) >> 1;
452 skip2 = dstx >> 1;
453 wrap = dst->linesize[0];
454 wrap3 = rect->linesize;
455 p = rect->bitmap;
456 pal = rect->rgba_palette; /* Now in YCrCb! */
458 if (dsty & 1) {
459 lum += dstx;
460 cb += skip2;
461 cr += skip2;
463 if (dstx & 1) {
464 YUVA_IN(y, u, v, a, p, pal);
465 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468 cb++;
469 cr++;
470 lum++;
471 p += BPP;
473 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474 YUVA_IN(y, u, v, a, p, pal);
475 u1 = u;
476 v1 = v;
477 a1 = a;
478 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480 YUVA_IN(y, u, v, a, p + BPP, pal);
481 u1 += u;
482 v1 += v;
483 a1 += a;
484 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487 cb++;
488 cr++;
489 p += 2 * BPP;
490 lum += 2;
492 if (w) {
493 YUVA_IN(y, u, v, a, p, pal);
494 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498 p += wrap3 + (wrap3 - dstw * BPP);
499 lum += wrap + (wrap - dstw - dstx);
500 cb += dst->linesize[1] - width2 - skip2;
501 cr += dst->linesize[2] - width2 - skip2;
503 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504 lum += dstx;
505 cb += skip2;
506 cr += skip2;
508 if (dstx & 1) {
509 YUVA_IN(y, u, v, a, p, pal);
510 u1 = u;
511 v1 = v;
512 a1 = a;
513 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514 p += wrap3;
515 lum += wrap;
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 += u;
518 v1 += v;
519 a1 += a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523 cb++;
524 cr++;
525 p += -wrap3 + BPP;
526 lum += -wrap + 1;
528 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529 YUVA_IN(y, u, v, a, p, pal);
530 u1 = u;
531 v1 = v;
532 a1 = a;
533 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540 p += wrap3;
541 lum += wrap;
543 YUVA_IN(y, u, v, a, p, pal);
544 u1 += u;
545 v1 += v;
546 a1 += a;
547 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549 YUVA_IN(y, u, v, a, p, pal);
550 u1 += u;
551 v1 += v;
552 a1 += a;
553 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558 cb++;
559 cr++;
560 p += -wrap3 + 2 * BPP;
561 lum += -wrap + 2;
563 if (w) {
564 YUVA_IN(y, u, v, a, p, pal);
565 u1 = u;
566 v1 = v;
567 a1 = a;
568 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569 p += wrap3;
570 lum += wrap;
571 YUVA_IN(y, u, v, a, p, pal);
572 u1 += u;
573 v1 += v;
574 a1 += a;
575 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578 cb++;
579 cr++;
580 p += -wrap3 + BPP;
581 lum += -wrap + 1;
583 p += wrap3 + (wrap3 - dstw * BPP);
584 lum += wrap + (wrap - dstw - dstx);
585 cb += dst->linesize[1] - width2 - skip2;
586 cr += dst->linesize[2] - width2 - skip2;
588 /* handle odd height */
589 if (h) {
590 lum += dstx;
591 cb += skip2;
592 cr += skip2;
594 if (dstx & 1) {
595 YUVA_IN(y, u, v, a, p, pal);
596 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599 cb++;
600 cr++;
601 lum++;
602 p += BPP;
604 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605 YUVA_IN(y, u, v, a, p, pal);
606 u1 = u;
607 v1 = v;
608 a1 = a;
609 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611 YUVA_IN(y, u, v, a, p + BPP, pal);
612 u1 += u;
613 v1 += v;
614 a1 += a;
615 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618 cb++;
619 cr++;
620 p += 2 * BPP;
621 lum += 2;
623 if (w) {
624 YUVA_IN(y, u, v, a, p, pal);
625 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632 static void free_subpicture(SubPicture *sp)
634 int i;
636 for (i = 0; i < sp->sub.num_rects; i++)
638 av_free(sp->sub.rects[i].bitmap);
639 av_free(sp->sub.rects[i].rgba_palette);
642 av_free(sp->sub.rects);
644 memset(&sp->sub, 0, sizeof(AVSubtitle));
647 static void video_image_display(VideoState *is)
649 VideoPicture *vp;
650 SubPicture *sp;
651 AVPicture pict;
652 float aspect_ratio;
653 int width, height, x, y;
654 SDL_Rect rect;
655 int i;
657 vp = &is->pictq[is->pictq_rindex];
658 if (vp->bmp) {
659 /* XXX: use variable in the frame */
660 if (is->video_st->codec->sample_aspect_ratio.num == 0)
661 aspect_ratio = 0;
662 else
663 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
664 * is->video_st->codec->width / is->video_st->codec->height;
665 if (aspect_ratio <= 0.0)
666 aspect_ratio = (float)is->video_st->codec->width /
667 (float)is->video_st->codec->height;
668 /* if an active format is indicated, then it overrides the
669 mpeg format */
670 #if 0
671 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
672 is->dtg_active_format = is->video_st->codec->dtg_active_format;
673 printf("dtg_active_format=%d\n", is->dtg_active_format);
675 #endif
676 #if 0
677 switch(is->video_st->codec->dtg_active_format) {
678 case FF_DTG_AFD_SAME:
679 default:
680 /* nothing to do */
681 break;
682 case FF_DTG_AFD_4_3:
683 aspect_ratio = 4.0 / 3.0;
684 break;
685 case FF_DTG_AFD_16_9:
686 aspect_ratio = 16.0 / 9.0;
687 break;
688 case FF_DTG_AFD_14_9:
689 aspect_ratio = 14.0 / 9.0;
690 break;
691 case FF_DTG_AFD_4_3_SP_14_9:
692 aspect_ratio = 14.0 / 9.0;
693 break;
694 case FF_DTG_AFD_16_9_SP_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_SP_4_3:
698 aspect_ratio = 4.0 / 3.0;
699 break;
701 #endif
703 if (is->subtitle_st)
705 if (is->subpq_size > 0)
707 sp = &is->subpq[is->subpq_rindex];
709 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
711 SDL_LockYUVOverlay (vp->bmp);
713 pict.data[0] = vp->bmp->pixels[0];
714 pict.data[1] = vp->bmp->pixels[2];
715 pict.data[2] = vp->bmp->pixels[1];
717 pict.linesize[0] = vp->bmp->pitches[0];
718 pict.linesize[1] = vp->bmp->pitches[2];
719 pict.linesize[2] = vp->bmp->pitches[1];
721 for (i = 0; i < sp->sub.num_rects; i++)
722 blend_subrect(&pict, &sp->sub.rects[i],
723 vp->bmp->w, vp->bmp->h);
725 SDL_UnlockYUVOverlay (vp->bmp);
731 /* XXX: we suppose the screen has a 1.0 pixel ratio */
732 height = is->height;
733 width = ((int)rint(height * aspect_ratio)) & -3;
734 if (width > is->width) {
735 width = is->width;
736 height = ((int)rint(width / aspect_ratio)) & -3;
738 x = (is->width - width) / 2;
739 y = (is->height - height) / 2;
740 if (!is->no_background) {
741 /* fill the background */
742 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
743 } else {
744 is->no_background = 0;
746 rect.x = is->xleft + x;
747 rect.y = is->ytop + y;
748 rect.w = width;
749 rect.h = height;
750 SDL_DisplayYUVOverlay(vp->bmp, &rect);
751 } else {
752 #if 0
753 fill_rectangle(screen,
754 is->xleft, is->ytop, is->width, is->height,
755 QERGB(0x00, 0x00, 0x00));
756 #endif
760 static inline int compute_mod(int a, int b)
762 a = a % b;
763 if (a >= 0)
764 return a;
765 else
766 return a + b;
769 static void video_audio_display(VideoState *s)
771 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
772 int ch, channels, h, h2, bgcolor, fgcolor;
773 int16_t time_diff;
775 /* compute display index : center on currently output samples */
776 channels = s->audio_st->codec->channels;
777 nb_display_channels = channels;
778 if (!s->paused) {
779 n = 2 * channels;
780 delay = audio_write_get_buf_size(s);
781 delay /= n;
783 /* to be more precise, we take into account the time spent since
784 the last buffer computation */
785 if (audio_callback_time) {
786 time_diff = av_gettime() - audio_callback_time;
787 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
790 delay -= s->width / 2;
791 if (delay < s->width)
792 delay = s->width;
794 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
796 h= INT_MIN;
797 for(i=0; i<1000; i+=channels){
798 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
799 int a= s->sample_array[idx];
800 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
801 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
802 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
803 int score= a-d;
804 if(h<score && (b^c)<0){
805 h= score;
806 i_start= idx;
810 s->last_i_start = i_start;
811 } else {
812 i_start = s->last_i_start;
815 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
816 fill_rectangle(screen,
817 s->xleft, s->ytop, s->width, s->height,
818 bgcolor);
820 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
822 /* total height for one channel */
823 h = s->height / nb_display_channels;
824 /* graph height / 2 */
825 h2 = (h * 9) / 20;
826 for(ch = 0;ch < nb_display_channels; ch++) {
827 i = i_start + ch;
828 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
829 for(x = 0; x < s->width; x++) {
830 y = (s->sample_array[i] * h2) >> 15;
831 if (y < 0) {
832 y = -y;
833 ys = y1 - y;
834 } else {
835 ys = y1;
837 fill_rectangle(screen,
838 s->xleft + x, ys, 1, y,
839 fgcolor);
840 i += channels;
841 if (i >= SAMPLE_ARRAY_SIZE)
842 i -= SAMPLE_ARRAY_SIZE;
846 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
848 for(ch = 1;ch < nb_display_channels; ch++) {
849 y = s->ytop + ch * h;
850 fill_rectangle(screen,
851 s->xleft, y, s->width, 1,
852 fgcolor);
854 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
857 static int video_open(VideoState *is){
858 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
859 int w,h;
861 if(is_full_screen) flags |= SDL_FULLSCREEN;
862 else flags |= SDL_RESIZABLE;
864 if (is_full_screen && fs_screen_width) {
865 w = fs_screen_width;
866 h = fs_screen_height;
867 } else if(!is_full_screen && screen_width){
868 w = screen_width;
869 h = screen_height;
870 }else if (is->video_st && is->video_st->codec->width){
871 w = is->video_st->codec->width;
872 h = is->video_st->codec->height;
873 } else {
874 w = 640;
875 h = 480;
877 #ifndef __APPLE__
878 screen = SDL_SetVideoMode(w, h, 0, flags);
879 #else
880 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
881 screen = SDL_SetVideoMode(w, h, 24, flags);
882 #endif
883 if (!screen) {
884 fprintf(stderr, "SDL: could not set video mode - exiting\n");
885 return -1;
887 SDL_WM_SetCaption("FFplay", "FFplay");
889 is->width = screen->w;
890 is->height = screen->h;
892 return 0;
895 /* display the current picture, if any */
896 static void video_display(VideoState *is)
898 if(!screen)
899 video_open(cur_stream);
900 if (is->audio_st && is->show_audio)
901 video_audio_display(is);
902 else if (is->video_st)
903 video_image_display(is);
906 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
908 SDL_Event event;
909 event.type = FF_REFRESH_EVENT;
910 event.user.data1 = opaque;
911 SDL_PushEvent(&event);
912 return 0; /* 0 means stop timer */
915 /* schedule a video refresh in 'delay' ms */
916 static void schedule_refresh(VideoState *is, int delay)
918 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
921 /* get the current audio clock value */
922 static double get_audio_clock(VideoState *is)
924 double pts;
925 int hw_buf_size, bytes_per_sec;
926 pts = is->audio_clock;
927 hw_buf_size = audio_write_get_buf_size(is);
928 bytes_per_sec = 0;
929 if (is->audio_st) {
930 bytes_per_sec = is->audio_st->codec->sample_rate *
931 2 * is->audio_st->codec->channels;
933 if (bytes_per_sec)
934 pts -= (double)hw_buf_size / bytes_per_sec;
935 return pts;
938 /* get the current video clock value */
939 static double get_video_clock(VideoState *is)
941 double delta;
942 if (is->paused) {
943 delta = 0;
944 } else {
945 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
947 return is->video_current_pts + delta;
950 /* get the current external clock value */
951 static double get_external_clock(VideoState *is)
953 int64_t ti;
954 ti = av_gettime();
955 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
958 /* get the current master clock value */
959 static double get_master_clock(VideoState *is)
961 double val;
963 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
964 if (is->video_st)
965 val = get_video_clock(is);
966 else
967 val = get_audio_clock(is);
968 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
969 if (is->audio_st)
970 val = get_audio_clock(is);
971 else
972 val = get_video_clock(is);
973 } else {
974 val = get_external_clock(is);
976 return val;
979 /* seek in the stream */
980 static void stream_seek(VideoState *is, int64_t pos, int rel)
982 if (!is->seek_req) {
983 is->seek_pos = pos;
984 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
985 if (seek_by_bytes)
986 is->seek_flags |= AVSEEK_FLAG_BYTE;
987 is->seek_req = 1;
991 /* pause or resume the video */
992 static void stream_pause(VideoState *is)
994 is->paused = !is->paused;
995 if (!is->paused) {
996 is->video_current_pts = get_video_clock(is);
997 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1001 /* called to display each frame */
1002 static void video_refresh_timer(void *opaque)
1004 VideoState *is = opaque;
1005 VideoPicture *vp;
1006 double actual_delay, delay, sync_threshold, ref_clock, diff;
1008 SubPicture *sp, *sp2;
1010 if (is->video_st) {
1011 if (is->pictq_size == 0) {
1012 /* if no picture, need to wait */
1013 schedule_refresh(is, 1);
1014 } else {
1015 /* dequeue the picture */
1016 vp = &is->pictq[is->pictq_rindex];
1018 /* update current video pts */
1019 is->video_current_pts = vp->pts;
1020 is->video_current_pts_time = av_gettime();
1022 /* compute nominal delay */
1023 delay = vp->pts - is->frame_last_pts;
1024 if (delay <= 0 || delay >= 2.0) {
1025 /* if incorrect delay, use previous one */
1026 delay = is->frame_last_delay;
1028 is->frame_last_delay = delay;
1029 is->frame_last_pts = vp->pts;
1031 /* update delay to follow master synchronisation source */
1032 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1033 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1034 /* if video is slave, we try to correct big delays by
1035 duplicating or deleting a frame */
1036 ref_clock = get_master_clock(is);
1037 diff = vp->pts - ref_clock;
1039 /* skip or repeat frame. We take into account the
1040 delay to compute the threshold. I still don't know
1041 if it is the best guess */
1042 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1043 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1044 if (diff <= -sync_threshold)
1045 delay = 0;
1046 else if (diff >= sync_threshold)
1047 delay = 2 * delay;
1051 is->frame_timer += delay;
1052 /* compute the REAL delay (we need to do that to avoid
1053 long term errors */
1054 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1055 if (actual_delay < 0.010) {
1056 /* XXX: should skip picture */
1057 actual_delay = 0.010;
1059 /* launch timer for next picture */
1060 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1062 #if defined(DEBUG_SYNC)
1063 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1064 delay, actual_delay, vp->pts, -diff);
1065 #endif
1067 if(is->subtitle_st) {
1068 if (is->subtitle_stream_changed) {
1069 SDL_LockMutex(is->subpq_mutex);
1071 while (is->subpq_size) {
1072 free_subpicture(&is->subpq[is->subpq_rindex]);
1074 /* update queue size and signal for next picture */
1075 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1076 is->subpq_rindex = 0;
1078 is->subpq_size--;
1080 is->subtitle_stream_changed = 0;
1082 SDL_CondSignal(is->subpq_cond);
1083 SDL_UnlockMutex(is->subpq_mutex);
1084 } else {
1085 if (is->subpq_size > 0) {
1086 sp = &is->subpq[is->subpq_rindex];
1088 if (is->subpq_size > 1)
1089 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1090 else
1091 sp2 = NULL;
1093 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1094 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1096 free_subpicture(sp);
1098 /* update queue size and signal for next picture */
1099 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1100 is->subpq_rindex = 0;
1102 SDL_LockMutex(is->subpq_mutex);
1103 is->subpq_size--;
1104 SDL_CondSignal(is->subpq_cond);
1105 SDL_UnlockMutex(is->subpq_mutex);
1111 /* display picture */
1112 video_display(is);
1114 /* update queue size and signal for next picture */
1115 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1116 is->pictq_rindex = 0;
1118 SDL_LockMutex(is->pictq_mutex);
1119 is->pictq_size--;
1120 SDL_CondSignal(is->pictq_cond);
1121 SDL_UnlockMutex(is->pictq_mutex);
1123 } else if (is->audio_st) {
1124 /* draw the next audio frame */
1126 schedule_refresh(is, 40);
1128 /* if only audio stream, then display the audio bars (better
1129 than nothing, just to test the implementation */
1131 /* display picture */
1132 video_display(is);
1133 } else {
1134 schedule_refresh(is, 100);
1136 if (show_status) {
1137 static int64_t last_time;
1138 int64_t cur_time;
1139 int aqsize, vqsize, sqsize;
1140 double av_diff;
1142 cur_time = av_gettime();
1143 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1144 aqsize = 0;
1145 vqsize = 0;
1146 sqsize = 0;
1147 if (is->audio_st)
1148 aqsize = is->audioq.size;
1149 if (is->video_st)
1150 vqsize = is->videoq.size;
1151 if (is->subtitle_st)
1152 sqsize = is->subtitleq.size;
1153 av_diff = 0;
1154 if (is->audio_st && is->video_st)
1155 av_diff = get_audio_clock(is) - get_video_clock(is);
1156 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1157 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1158 fflush(stdout);
1159 last_time = cur_time;
1164 /* allocate a picture (needs to do that in main thread to avoid
1165 potential locking problems */
1166 static void alloc_picture(void *opaque)
1168 VideoState *is = opaque;
1169 VideoPicture *vp;
1171 vp = &is->pictq[is->pictq_windex];
1173 if (vp->bmp)
1174 SDL_FreeYUVOverlay(vp->bmp);
1176 #if 0
1177 /* XXX: use generic function */
1178 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1179 switch(is->video_st->codec->pix_fmt) {
1180 case PIX_FMT_YUV420P:
1181 case PIX_FMT_YUV422P:
1182 case PIX_FMT_YUV444P:
1183 case PIX_FMT_YUYV422:
1184 case PIX_FMT_YUV410P:
1185 case PIX_FMT_YUV411P:
1186 is_yuv = 1;
1187 break;
1188 default:
1189 is_yuv = 0;
1190 break;
1192 #endif
1193 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1194 is->video_st->codec->height,
1195 SDL_YV12_OVERLAY,
1196 screen);
1197 vp->width = is->video_st->codec->width;
1198 vp->height = is->video_st->codec->height;
1200 SDL_LockMutex(is->pictq_mutex);
1201 vp->allocated = 1;
1202 SDL_CondSignal(is->pictq_cond);
1203 SDL_UnlockMutex(is->pictq_mutex);
1208 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1210 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1212 VideoPicture *vp;
1213 int dst_pix_fmt;
1214 AVPicture pict;
1215 static struct SwsContext *img_convert_ctx;
1217 /* wait until we have space to put a new picture */
1218 SDL_LockMutex(is->pictq_mutex);
1219 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1220 !is->videoq.abort_request) {
1221 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1223 SDL_UnlockMutex(is->pictq_mutex);
1225 if (is->videoq.abort_request)
1226 return -1;
1228 vp = &is->pictq[is->pictq_windex];
1230 /* alloc or resize hardware picture buffer */
1231 if (!vp->bmp ||
1232 vp->width != is->video_st->codec->width ||
1233 vp->height != is->video_st->codec->height) {
1234 SDL_Event event;
1236 vp->allocated = 0;
1238 /* the allocation must be done in the main thread to avoid
1239 locking problems */
1240 event.type = FF_ALLOC_EVENT;
1241 event.user.data1 = is;
1242 SDL_PushEvent(&event);
1244 /* wait until the picture is allocated */
1245 SDL_LockMutex(is->pictq_mutex);
1246 while (!vp->allocated && !is->videoq.abort_request) {
1247 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1249 SDL_UnlockMutex(is->pictq_mutex);
1251 if (is->videoq.abort_request)
1252 return -1;
1255 /* if the frame is not skipped, then display it */
1256 if (vp->bmp) {
1257 /* get a pointer on the bitmap */
1258 SDL_LockYUVOverlay (vp->bmp);
1260 dst_pix_fmt = PIX_FMT_YUV420P;
1261 pict.data[0] = vp->bmp->pixels[0];
1262 pict.data[1] = vp->bmp->pixels[2];
1263 pict.data[2] = vp->bmp->pixels[1];
1265 pict.linesize[0] = vp->bmp->pitches[0];
1266 pict.linesize[1] = vp->bmp->pitches[2];
1267 pict.linesize[2] = vp->bmp->pitches[1];
1268 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1269 is->video_st->codec->width, is->video_st->codec->height,
1270 is->video_st->codec->pix_fmt,
1271 is->video_st->codec->width, is->video_st->codec->height,
1272 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1273 if (img_convert_ctx == NULL) {
1274 fprintf(stderr, "Cannot initialize the conversion context\n");
1275 exit(1);
1277 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1278 0, is->video_st->codec->height, pict.data, pict.linesize);
1279 /* update the bitmap content */
1280 SDL_UnlockYUVOverlay(vp->bmp);
1282 vp->pts = pts;
1284 /* now we can update the picture count */
1285 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1286 is->pictq_windex = 0;
1287 SDL_LockMutex(is->pictq_mutex);
1288 is->pictq_size++;
1289 SDL_UnlockMutex(is->pictq_mutex);
1291 return 0;
1295 * compute the exact PTS for the picture if it is omitted in the stream
1296 * @param pts1 the dts of the pkt / pts of the frame
1298 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1300 double frame_delay, pts;
1302 pts = pts1;
1304 if (pts != 0) {
1305 /* update video clock with pts, if present */
1306 is->video_clock = pts;
1307 } else {
1308 pts = is->video_clock;
1310 /* update video clock for next frame */
1311 frame_delay = av_q2d(is->video_st->codec->time_base);
1312 /* for MPEG2, the frame can be repeated, so we update the
1313 clock accordingly */
1314 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1315 is->video_clock += frame_delay;
1317 #if defined(DEBUG_SYNC) && 0
1319 int ftype;
1320 if (src_frame->pict_type == FF_B_TYPE)
1321 ftype = 'B';
1322 else if (src_frame->pict_type == FF_I_TYPE)
1323 ftype = 'I';
1324 else
1325 ftype = 'P';
1326 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1327 ftype, pts, pts1);
1329 #endif
1330 return queue_picture(is, src_frame, pts);
1333 static int video_thread(void *arg)
1335 VideoState *is = arg;
1336 AVPacket pkt1, *pkt = &pkt1;
1337 int len1, got_picture;
1338 AVFrame *frame= avcodec_alloc_frame();
1339 double pts;
1341 for(;;) {
1342 while (is->paused && !is->videoq.abort_request) {
1343 SDL_Delay(10);
1345 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1346 break;
1348 if(pkt->data == flush_pkt.data){
1349 avcodec_flush_buffers(is->video_st->codec);
1350 continue;
1353 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1354 this packet, if any */
1355 is->video_st->codec->reordered_opaque= pkt->pts;
1356 len1 = avcodec_decode_video(is->video_st->codec,
1357 frame, &got_picture,
1358 pkt->data, pkt->size);
1360 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1361 && frame->reordered_opaque != AV_NOPTS_VALUE)
1362 pts= frame->reordered_opaque;
1363 else if(pkt->dts != AV_NOPTS_VALUE)
1364 pts= pkt->dts;
1365 else
1366 pts= 0;
1367 pts *= av_q2d(is->video_st->time_base);
1369 // if (len1 < 0)
1370 // break;
1371 if (got_picture) {
1372 if (output_picture2(is, frame, pts) < 0)
1373 goto the_end;
1375 av_free_packet(pkt);
1376 if (step)
1377 if (cur_stream)
1378 stream_pause(cur_stream);
1380 the_end:
1381 av_free(frame);
1382 return 0;
1385 static int subtitle_thread(void *arg)
1387 VideoState *is = arg;
1388 SubPicture *sp;
1389 AVPacket pkt1, *pkt = &pkt1;
1390 int len1, got_subtitle;
1391 double pts;
1392 int i, j;
1393 int r, g, b, y, u, v, a;
1395 for(;;) {
1396 while (is->paused && !is->subtitleq.abort_request) {
1397 SDL_Delay(10);
1399 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1400 break;
1402 if(pkt->data == flush_pkt.data){
1403 avcodec_flush_buffers(is->subtitle_st->codec);
1404 continue;
1406 SDL_LockMutex(is->subpq_mutex);
1407 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1408 !is->subtitleq.abort_request) {
1409 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1411 SDL_UnlockMutex(is->subpq_mutex);
1413 if (is->subtitleq.abort_request)
1414 goto the_end;
1416 sp = &is->subpq[is->subpq_windex];
1418 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1419 this packet, if any */
1420 pts = 0;
1421 if (pkt->pts != AV_NOPTS_VALUE)
1422 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1424 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1425 &sp->sub, &got_subtitle,
1426 pkt->data, pkt->size);
1427 // if (len1 < 0)
1428 // break;
1429 if (got_subtitle && sp->sub.format == 0) {
1430 sp->pts = pts;
1432 for (i = 0; i < sp->sub.num_rects; i++)
1434 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1436 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1437 y = RGB_TO_Y_CCIR(r, g, b);
1438 u = RGB_TO_U_CCIR(r, g, b, 0);
1439 v = RGB_TO_V_CCIR(r, g, b, 0);
1440 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1444 /* now we can update the picture count */
1445 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1446 is->subpq_windex = 0;
1447 SDL_LockMutex(is->subpq_mutex);
1448 is->subpq_size++;
1449 SDL_UnlockMutex(is->subpq_mutex);
1451 av_free_packet(pkt);
1452 // if (step)
1453 // if (cur_stream)
1454 // stream_pause(cur_stream);
1456 the_end:
1457 return 0;
1460 /* copy samples for viewing in editor window */
1461 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1463 int size, len, channels;
1465 channels = is->audio_st->codec->channels;
1467 size = samples_size / sizeof(short);
1468 while (size > 0) {
1469 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1470 if (len > size)
1471 len = size;
1472 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1473 samples += len;
1474 is->sample_array_index += len;
1475 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1476 is->sample_array_index = 0;
1477 size -= len;
1481 /* return the new audio buffer size (samples can be added or deleted
1482 to get better sync if video or external master clock) */
1483 static int synchronize_audio(VideoState *is, short *samples,
1484 int samples_size1, double pts)
1486 int n, samples_size;
1487 double ref_clock;
1489 n = 2 * is->audio_st->codec->channels;
1490 samples_size = samples_size1;
1492 /* if not master, then we try to remove or add samples to correct the clock */
1493 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1494 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1495 double diff, avg_diff;
1496 int wanted_size, min_size, max_size, nb_samples;
1498 ref_clock = get_master_clock(is);
1499 diff = get_audio_clock(is) - ref_clock;
1501 if (diff < AV_NOSYNC_THRESHOLD) {
1502 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1503 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1504 /* not enough measures to have a correct estimate */
1505 is->audio_diff_avg_count++;
1506 } else {
1507 /* estimate the A-V difference */
1508 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1510 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1511 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1512 nb_samples = samples_size / n;
1514 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1515 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1516 if (wanted_size < min_size)
1517 wanted_size = min_size;
1518 else if (wanted_size > max_size)
1519 wanted_size = max_size;
1521 /* add or remove samples to correction the synchro */
1522 if (wanted_size < samples_size) {
1523 /* remove samples */
1524 samples_size = wanted_size;
1525 } else if (wanted_size > samples_size) {
1526 uint8_t *samples_end, *q;
1527 int nb;
1529 /* add samples */
1530 nb = (samples_size - wanted_size);
1531 samples_end = (uint8_t *)samples + samples_size - n;
1532 q = samples_end + n;
1533 while (nb > 0) {
1534 memcpy(q, samples_end, n);
1535 q += n;
1536 nb -= n;
1538 samples_size = wanted_size;
1541 #if 0
1542 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1543 diff, avg_diff, samples_size - samples_size1,
1544 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1545 #endif
1547 } else {
1548 /* too big difference : may be initial PTS errors, so
1549 reset A-V filter */
1550 is->audio_diff_avg_count = 0;
1551 is->audio_diff_cum = 0;
1555 return samples_size;
1558 /* decode one audio frame and returns its uncompressed size */
1559 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1561 AVPacket *pkt = &is->audio_pkt;
1562 AVCodecContext *dec= is->audio_st->codec;
1563 int n, len1, data_size;
1564 double pts;
1566 for(;;) {
1567 /* NOTE: the audio packet can contain several frames */
1568 while (is->audio_pkt_size > 0) {
1569 data_size = sizeof(is->audio_buf1);
1570 len1 = avcodec_decode_audio2(dec,
1571 (int16_t *)is->audio_buf1, &data_size,
1572 is->audio_pkt_data, is->audio_pkt_size);
1573 if (len1 < 0) {
1574 /* if error, we skip the frame */
1575 is->audio_pkt_size = 0;
1576 break;
1579 is->audio_pkt_data += len1;
1580 is->audio_pkt_size -= len1;
1581 if (data_size <= 0)
1582 continue;
1584 if (dec->sample_fmt != is->audio_src_fmt) {
1585 if (is->reformat_ctx)
1586 av_audio_convert_free(is->reformat_ctx);
1587 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1588 dec->sample_fmt, 1, NULL, 0);
1589 if (!is->reformat_ctx) {
1590 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1591 avcodec_get_sample_fmt_name(dec->sample_fmt),
1592 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1593 break;
1595 is->audio_src_fmt= dec->sample_fmt;
1598 if (is->reformat_ctx) {
1599 const void *ibuf[6]= {is->audio_buf1};
1600 void *obuf[6]= {is->audio_buf2};
1601 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1602 int ostride[6]= {2};
1603 int len= data_size/istride[0];
1604 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1605 printf("av_audio_convert() failed\n");
1606 break;
1608 is->audio_buf= is->audio_buf2;
1609 /* FIXME: existing code assume that data_size equals framesize*channels*2
1610 remove this legacy cruft */
1611 data_size= len*2;
1612 }else{
1613 is->audio_buf= is->audio_buf1;
1616 /* if no pts, then compute it */
1617 pts = is->audio_clock;
1618 *pts_ptr = pts;
1619 n = 2 * dec->channels;
1620 is->audio_clock += (double)data_size /
1621 (double)(n * dec->sample_rate);
1622 #if defined(DEBUG_SYNC)
1624 static double last_clock;
1625 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1626 is->audio_clock - last_clock,
1627 is->audio_clock, pts);
1628 last_clock = is->audio_clock;
1630 #endif
1631 return data_size;
1634 /* free the current packet */
1635 if (pkt->data)
1636 av_free_packet(pkt);
1638 if (is->paused || is->audioq.abort_request) {
1639 return -1;
1642 /* read next packet */
1643 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1644 return -1;
1645 if(pkt->data == flush_pkt.data){
1646 avcodec_flush_buffers(dec);
1647 continue;
1650 is->audio_pkt_data = pkt->data;
1651 is->audio_pkt_size = pkt->size;
1653 /* if update the audio clock with the pts */
1654 if (pkt->pts != AV_NOPTS_VALUE) {
1655 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1660 /* get the current audio output buffer size, in samples. With SDL, we
1661 cannot have a precise information */
1662 static int audio_write_get_buf_size(VideoState *is)
1664 return is->audio_buf_size - is->audio_buf_index;
1668 /* prepare a new audio buffer */
1669 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1671 VideoState *is = opaque;
1672 int audio_size, len1;
1673 double pts;
1675 audio_callback_time = av_gettime();
1677 while (len > 0) {
1678 if (is->audio_buf_index >= is->audio_buf_size) {
1679 audio_size = audio_decode_frame(is, &pts);
1680 if (audio_size < 0) {
1681 /* if error, just output silence */
1682 is->audio_buf_size = 1024;
1683 memset(is->audio_buf, 0, is->audio_buf_size);
1684 } else {
1685 if (is->show_audio)
1686 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1687 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1688 pts);
1689 is->audio_buf_size = audio_size;
1691 is->audio_buf_index = 0;
1693 len1 = is->audio_buf_size - is->audio_buf_index;
1694 if (len1 > len)
1695 len1 = len;
1696 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1697 len -= len1;
1698 stream += len1;
1699 is->audio_buf_index += len1;
1703 /* open a given stream. Return 0 if OK */
1704 static int stream_component_open(VideoState *is, int stream_index)
1706 AVFormatContext *ic = is->ic;
1707 AVCodecContext *enc;
1708 AVCodec *codec;
1709 SDL_AudioSpec wanted_spec, spec;
1711 if (stream_index < 0 || stream_index >= ic->nb_streams)
1712 return -1;
1713 enc = ic->streams[stream_index]->codec;
1715 /* prepare audio output */
1716 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1717 if (enc->channels > 0) {
1718 enc->request_channels = FFMIN(2, enc->channels);
1719 } else {
1720 enc->request_channels = 2;
1724 codec = avcodec_find_decoder(enc->codec_id);
1725 enc->debug_mv = debug_mv;
1726 enc->debug = debug;
1727 enc->workaround_bugs = workaround_bugs;
1728 enc->lowres = lowres;
1729 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1730 enc->idct_algo= idct;
1731 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1732 enc->skip_frame= skip_frame;
1733 enc->skip_idct= skip_idct;
1734 enc->skip_loop_filter= skip_loop_filter;
1735 enc->error_resilience= error_resilience;
1736 enc->error_concealment= error_concealment;
1737 if (!codec ||
1738 avcodec_open(enc, codec) < 0)
1739 return -1;
1741 /* prepare audio output */
1742 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1743 wanted_spec.freq = enc->sample_rate;
1744 wanted_spec.format = AUDIO_S16SYS;
1745 wanted_spec.channels = enc->channels;
1746 wanted_spec.silence = 0;
1747 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1748 wanted_spec.callback = sdl_audio_callback;
1749 wanted_spec.userdata = is;
1750 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1751 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1752 return -1;
1754 is->audio_hw_buf_size = spec.size;
1755 is->audio_src_fmt= SAMPLE_FMT_S16;
1758 if(thread_count>1)
1759 avcodec_thread_init(enc, thread_count);
1760 enc->thread_count= thread_count;
1761 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1762 switch(enc->codec_type) {
1763 case CODEC_TYPE_AUDIO:
1764 is->audio_stream = stream_index;
1765 is->audio_st = ic->streams[stream_index];
1766 is->audio_buf_size = 0;
1767 is->audio_buf_index = 0;
1769 /* init averaging filter */
1770 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1771 is->audio_diff_avg_count = 0;
1772 /* since we do not have a precise anough audio fifo fullness,
1773 we correct audio sync only if larger than this threshold */
1774 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1776 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1777 packet_queue_init(&is->audioq);
1778 SDL_PauseAudio(0);
1779 break;
1780 case CODEC_TYPE_VIDEO:
1781 is->video_stream = stream_index;
1782 is->video_st = ic->streams[stream_index];
1784 is->frame_last_delay = 40e-3;
1785 is->frame_timer = (double)av_gettime() / 1000000.0;
1786 is->video_current_pts_time = av_gettime();
1788 packet_queue_init(&is->videoq);
1789 is->video_tid = SDL_CreateThread(video_thread, is);
1790 break;
1791 case CODEC_TYPE_SUBTITLE:
1792 is->subtitle_stream = stream_index;
1793 is->subtitle_st = ic->streams[stream_index];
1794 packet_queue_init(&is->subtitleq);
1796 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1797 break;
1798 default:
1799 break;
1801 return 0;
1804 static void stream_component_close(VideoState *is, int stream_index)
1806 AVFormatContext *ic = is->ic;
1807 AVCodecContext *enc;
1809 if (stream_index < 0 || stream_index >= ic->nb_streams)
1810 return;
1811 enc = ic->streams[stream_index]->codec;
1813 switch(enc->codec_type) {
1814 case CODEC_TYPE_AUDIO:
1815 packet_queue_abort(&is->audioq);
1817 SDL_CloseAudio();
1819 packet_queue_end(&is->audioq);
1820 if (is->reformat_ctx)
1821 av_audio_convert_free(is->reformat_ctx);
1822 break;
1823 case CODEC_TYPE_VIDEO:
1824 packet_queue_abort(&is->videoq);
1826 /* note: we also signal this mutex to make sure we deblock the
1827 video thread in all cases */
1828 SDL_LockMutex(is->pictq_mutex);
1829 SDL_CondSignal(is->pictq_cond);
1830 SDL_UnlockMutex(is->pictq_mutex);
1832 SDL_WaitThread(is->video_tid, NULL);
1834 packet_queue_end(&is->videoq);
1835 break;
1836 case CODEC_TYPE_SUBTITLE:
1837 packet_queue_abort(&is->subtitleq);
1839 /* note: we also signal this mutex to make sure we deblock the
1840 video thread in all cases */
1841 SDL_LockMutex(is->subpq_mutex);
1842 is->subtitle_stream_changed = 1;
1844 SDL_CondSignal(is->subpq_cond);
1845 SDL_UnlockMutex(is->subpq_mutex);
1847 SDL_WaitThread(is->subtitle_tid, NULL);
1849 packet_queue_end(&is->subtitleq);
1850 break;
1851 default:
1852 break;
1855 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1856 avcodec_close(enc);
1857 switch(enc->codec_type) {
1858 case CODEC_TYPE_AUDIO:
1859 is->audio_st = NULL;
1860 is->audio_stream = -1;
1861 break;
1862 case CODEC_TYPE_VIDEO:
1863 is->video_st = NULL;
1864 is->video_stream = -1;
1865 break;
1866 case CODEC_TYPE_SUBTITLE:
1867 is->subtitle_st = NULL;
1868 is->subtitle_stream = -1;
1869 break;
1870 default:
1871 break;
1875 static void dump_stream_info(const AVFormatContext *s)
1877 if (s->track != 0)
1878 fprintf(stderr, "Track: %d\n", s->track);
1879 if (s->title[0] != '\0')
1880 fprintf(stderr, "Title: %s\n", s->title);
1881 if (s->author[0] != '\0')
1882 fprintf(stderr, "Author: %s\n", s->author);
1883 if (s->copyright[0] != '\0')
1884 fprintf(stderr, "Copyright: %s\n", s->copyright);
1885 if (s->comment[0] != '\0')
1886 fprintf(stderr, "Comment: %s\n", s->comment);
1887 if (s->album[0] != '\0')
1888 fprintf(stderr, "Album: %s\n", s->album);
1889 if (s->year != 0)
1890 fprintf(stderr, "Year: %d\n", s->year);
1891 if (s->genre[0] != '\0')
1892 fprintf(stderr, "Genre: %s\n", s->genre);
1895 /* since we have only one decoding thread, we can use a global
1896 variable instead of a thread local variable */
1897 static VideoState *global_video_state;
1899 static int decode_interrupt_cb(void)
1901 return (global_video_state && global_video_state->abort_request);
1904 /* this thread gets the stream from the disk or the network */
1905 static int decode_thread(void *arg)
1907 VideoState *is = arg;
1908 AVFormatContext *ic;
1909 int err, i, ret, video_index, audio_index;
1910 AVPacket pkt1, *pkt = &pkt1;
1911 AVFormatParameters params, *ap = &params;
1913 video_index = -1;
1914 audio_index = -1;
1915 is->video_stream = -1;
1916 is->audio_stream = -1;
1917 is->subtitle_stream = -1;
1919 global_video_state = is;
1920 url_set_interrupt_cb(decode_interrupt_cb);
1922 memset(ap, 0, sizeof(*ap));
1924 ap->width = frame_width;
1925 ap->height= frame_height;
1926 ap->time_base= (AVRational){1, 25};
1927 ap->pix_fmt = frame_pix_fmt;
1929 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1930 if (err < 0) {
1931 print_error(is->filename, err);
1932 ret = -1;
1933 goto fail;
1935 is->ic = ic;
1937 if(genpts)
1938 ic->flags |= AVFMT_FLAG_GENPTS;
1940 err = av_find_stream_info(ic);
1941 if (err < 0) {
1942 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1943 ret = -1;
1944 goto fail;
1946 if(ic->pb)
1947 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1949 /* if seeking requested, we execute it */
1950 if (start_time != AV_NOPTS_VALUE) {
1951 int64_t timestamp;
1953 timestamp = start_time;
1954 /* add the stream start time */
1955 if (ic->start_time != AV_NOPTS_VALUE)
1956 timestamp += ic->start_time;
1957 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1958 if (ret < 0) {
1959 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1960 is->filename, (double)timestamp / AV_TIME_BASE);
1964 for(i = 0; i < ic->nb_streams; i++) {
1965 AVCodecContext *enc = ic->streams[i]->codec;
1966 ic->streams[i]->discard = AVDISCARD_ALL;
1967 switch(enc->codec_type) {
1968 case CODEC_TYPE_AUDIO:
1969 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1970 audio_index = i;
1971 break;
1972 case CODEC_TYPE_VIDEO:
1973 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1974 video_index = i;
1975 break;
1976 default:
1977 break;
1980 if (show_status) {
1981 dump_format(ic, 0, is->filename, 0);
1982 dump_stream_info(ic);
1985 /* open the streams */
1986 if (audio_index >= 0) {
1987 stream_component_open(is, audio_index);
1990 if (video_index >= 0) {
1991 stream_component_open(is, video_index);
1992 } else {
1993 if (!display_disable)
1994 is->show_audio = 1;
1997 if (is->video_stream < 0 && is->audio_stream < 0) {
1998 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1999 ret = -1;
2000 goto fail;
2003 for(;;) {
2004 if (is->abort_request)
2005 break;
2006 if (is->paused != is->last_paused) {
2007 is->last_paused = is->paused;
2008 if (is->paused)
2009 av_read_pause(ic);
2010 else
2011 av_read_play(ic);
2013 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2014 if (is->paused &&
2015 (!strcmp(ic->iformat->name, "rtsp") ||
2016 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2017 /* wait 10 ms to avoid trying to get another packet */
2018 /* XXX: horrible */
2019 SDL_Delay(10);
2020 continue;
2022 #endif
2023 if (is->seek_req) {
2024 int stream_index= -1;
2025 int64_t seek_target= is->seek_pos;
2027 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2028 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2029 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2031 if(stream_index>=0){
2032 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2035 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2036 if (ret < 0) {
2037 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2038 }else{
2039 if (is->audio_stream >= 0) {
2040 packet_queue_flush(&is->audioq);
2041 packet_queue_put(&is->audioq, &flush_pkt);
2043 if (is->subtitle_stream >= 0) {
2044 packet_queue_flush(&is->subtitleq);
2045 packet_queue_put(&is->subtitleq, &flush_pkt);
2047 if (is->video_stream >= 0) {
2048 packet_queue_flush(&is->videoq);
2049 packet_queue_put(&is->videoq, &flush_pkt);
2052 is->seek_req = 0;
2055 /* if the queue are full, no need to read more */
2056 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2057 is->videoq.size > MAX_VIDEOQ_SIZE ||
2058 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2059 url_feof(ic->pb)) {
2060 /* wait 10 ms */
2061 SDL_Delay(10);
2062 continue;
2064 ret = av_read_frame(ic, pkt);
2065 if (ret < 0) {
2066 if (url_ferror(ic->pb) == 0) {
2067 SDL_Delay(100); /* wait for user event */
2068 continue;
2069 } else
2070 break;
2072 if (pkt->stream_index == is->audio_stream) {
2073 packet_queue_put(&is->audioq, pkt);
2074 } else if (pkt->stream_index == is->video_stream) {
2075 packet_queue_put(&is->videoq, pkt);
2076 } else if (pkt->stream_index == is->subtitle_stream) {
2077 packet_queue_put(&is->subtitleq, pkt);
2078 } else {
2079 av_free_packet(pkt);
2082 /* wait until the end */
2083 while (!is->abort_request) {
2084 SDL_Delay(100);
2087 ret = 0;
2088 fail:
2089 /* disable interrupting */
2090 global_video_state = NULL;
2092 /* close each stream */
2093 if (is->audio_stream >= 0)
2094 stream_component_close(is, is->audio_stream);
2095 if (is->video_stream >= 0)
2096 stream_component_close(is, is->video_stream);
2097 if (is->subtitle_stream >= 0)
2098 stream_component_close(is, is->subtitle_stream);
2099 if (is->ic) {
2100 av_close_input_file(is->ic);
2101 is->ic = NULL; /* safety */
2103 url_set_interrupt_cb(NULL);
2105 if (ret != 0) {
2106 SDL_Event event;
2108 event.type = FF_QUIT_EVENT;
2109 event.user.data1 = is;
2110 SDL_PushEvent(&event);
2112 return 0;
2115 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2117 VideoState *is;
2119 is = av_mallocz(sizeof(VideoState));
2120 if (!is)
2121 return NULL;
2122 av_strlcpy(is->filename, filename, sizeof(is->filename));
2123 is->iformat = iformat;
2124 is->ytop = 0;
2125 is->xleft = 0;
2127 /* start video display */
2128 is->pictq_mutex = SDL_CreateMutex();
2129 is->pictq_cond = SDL_CreateCond();
2131 is->subpq_mutex = SDL_CreateMutex();
2132 is->subpq_cond = SDL_CreateCond();
2134 /* add the refresh timer to draw the picture */
2135 schedule_refresh(is, 40);
2137 is->av_sync_type = av_sync_type;
2138 is->parse_tid = SDL_CreateThread(decode_thread, is);
2139 if (!is->parse_tid) {
2140 av_free(is);
2141 return NULL;
2143 return is;
2146 static void stream_close(VideoState *is)
2148 VideoPicture *vp;
2149 int i;
2150 /* XXX: use a special url_shutdown call to abort parse cleanly */
2151 is->abort_request = 1;
2152 SDL_WaitThread(is->parse_tid, NULL);
2154 /* free all pictures */
2155 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2156 vp = &is->pictq[i];
2157 if (vp->bmp) {
2158 SDL_FreeYUVOverlay(vp->bmp);
2159 vp->bmp = NULL;
2162 SDL_DestroyMutex(is->pictq_mutex);
2163 SDL_DestroyCond(is->pictq_cond);
2164 SDL_DestroyMutex(is->subpq_mutex);
2165 SDL_DestroyCond(is->subpq_cond);
2168 static void stream_cycle_channel(VideoState *is, int codec_type)
2170 AVFormatContext *ic = is->ic;
2171 int start_index, stream_index;
2172 AVStream *st;
2174 if (codec_type == CODEC_TYPE_VIDEO)
2175 start_index = is->video_stream;
2176 else if (codec_type == CODEC_TYPE_AUDIO)
2177 start_index = is->audio_stream;
2178 else
2179 start_index = is->subtitle_stream;
2180 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2181 return;
2182 stream_index = start_index;
2183 for(;;) {
2184 if (++stream_index >= is->ic->nb_streams)
2186 if (codec_type == CODEC_TYPE_SUBTITLE)
2188 stream_index = -1;
2189 goto the_end;
2190 } else
2191 stream_index = 0;
2193 if (stream_index == start_index)
2194 return;
2195 st = ic->streams[stream_index];
2196 if (st->codec->codec_type == codec_type) {
2197 /* check that parameters are OK */
2198 switch(codec_type) {
2199 case CODEC_TYPE_AUDIO:
2200 if (st->codec->sample_rate != 0 &&
2201 st->codec->channels != 0)
2202 goto the_end;
2203 break;
2204 case CODEC_TYPE_VIDEO:
2205 case CODEC_TYPE_SUBTITLE:
2206 goto the_end;
2207 default:
2208 break;
2212 the_end:
2213 stream_component_close(is, start_index);
2214 stream_component_open(is, stream_index);
2218 static void toggle_full_screen(void)
2220 is_full_screen = !is_full_screen;
2221 if (!fs_screen_width) {
2222 /* use default SDL method */
2223 // SDL_WM_ToggleFullScreen(screen);
2225 video_open(cur_stream);
2228 static void toggle_pause(void)
2230 if (cur_stream)
2231 stream_pause(cur_stream);
2232 step = 0;
2235 static void step_to_next_frame(void)
2237 if (cur_stream) {
2238 /* if the stream is paused unpause it, then step */
2239 if (cur_stream->paused)
2240 stream_pause(cur_stream);
2242 step = 1;
2245 static void do_exit(void)
2247 if (cur_stream) {
2248 stream_close(cur_stream);
2249 cur_stream = NULL;
2251 if (show_status)
2252 printf("\n");
2253 SDL_Quit();
2254 exit(0);
2257 static void toggle_audio_display(void)
2259 if (cur_stream) {
2260 cur_stream->show_audio = !cur_stream->show_audio;
2264 /* handle an event sent by the GUI */
2265 static void event_loop(void)
2267 SDL_Event event;
2268 double incr, pos, frac;
2270 for(;;) {
2271 SDL_WaitEvent(&event);
2272 switch(event.type) {
2273 case SDL_KEYDOWN:
2274 switch(event.key.keysym.sym) {
2275 case SDLK_ESCAPE:
2276 case SDLK_q:
2277 do_exit();
2278 break;
2279 case SDLK_f:
2280 toggle_full_screen();
2281 break;
2282 case SDLK_p:
2283 case SDLK_SPACE:
2284 toggle_pause();
2285 break;
2286 case SDLK_s: //S: Step to next frame
2287 step_to_next_frame();
2288 break;
2289 case SDLK_a:
2290 if (cur_stream)
2291 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2292 break;
2293 case SDLK_v:
2294 if (cur_stream)
2295 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2296 break;
2297 case SDLK_t:
2298 if (cur_stream)
2299 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2300 break;
2301 case SDLK_w:
2302 toggle_audio_display();
2303 break;
2304 case SDLK_LEFT:
2305 incr = -10.0;
2306 goto do_seek;
2307 case SDLK_RIGHT:
2308 incr = 10.0;
2309 goto do_seek;
2310 case SDLK_UP:
2311 incr = 60.0;
2312 goto do_seek;
2313 case SDLK_DOWN:
2314 incr = -60.0;
2315 do_seek:
2316 if (cur_stream) {
2317 if (seek_by_bytes) {
2318 pos = url_ftell(cur_stream->ic->pb);
2319 if (cur_stream->ic->bit_rate)
2320 incr *= cur_stream->ic->bit_rate / 60.0;
2321 else
2322 incr *= 180000.0;
2323 pos += incr;
2324 stream_seek(cur_stream, pos, incr);
2325 } else {
2326 pos = get_master_clock(cur_stream);
2327 pos += incr;
2328 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2331 break;
2332 default:
2333 break;
2335 break;
2336 case SDL_MOUSEBUTTONDOWN:
2337 if (cur_stream) {
2338 int ns, hh, mm, ss;
2339 int tns, thh, tmm, tss;
2340 tns = cur_stream->ic->duration/1000000LL;
2341 thh = tns/3600;
2342 tmm = (tns%3600)/60;
2343 tss = (tns%60);
2344 frac = (double)event.button.x/(double)cur_stream->width;
2345 ns = frac*tns;
2346 hh = ns/3600;
2347 mm = (ns%3600)/60;
2348 ss = (ns%60);
2349 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2350 hh, mm, ss, thh, tmm, tss);
2351 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2353 break;
2354 case SDL_VIDEORESIZE:
2355 if (cur_stream) {
2356 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2357 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2358 screen_width = cur_stream->width = event.resize.w;
2359 screen_height= cur_stream->height= event.resize.h;
2361 break;
2362 case SDL_QUIT:
2363 case FF_QUIT_EVENT:
2364 do_exit();
2365 break;
2366 case FF_ALLOC_EVENT:
2367 video_open(event.user.data1);
2368 alloc_picture(event.user.data1);
2369 break;
2370 case FF_REFRESH_EVENT:
2371 video_refresh_timer(event.user.data1);
2372 break;
2373 default:
2374 break;
2379 static void opt_frame_size(const char *arg)
2381 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2382 fprintf(stderr, "Incorrect frame size\n");
2383 exit(1);
2385 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2386 fprintf(stderr, "Frame size must be a multiple of 2\n");
2387 exit(1);
2391 static int opt_width(const char *opt, const char *arg)
2393 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2394 return 0;
2397 static int opt_height(const char *opt, const char *arg)
2399 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2400 return 0;
2403 static void opt_format(const char *arg)
2405 file_iformat = av_find_input_format(arg);
2406 if (!file_iformat) {
2407 fprintf(stderr, "Unknown input format: %s\n", arg);
2408 exit(1);
2412 static void opt_frame_pix_fmt(const char *arg)
2414 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2417 static int opt_sync(const char *opt, const char *arg)
2419 if (!strcmp(arg, "audio"))
2420 av_sync_type = AV_SYNC_AUDIO_MASTER;
2421 else if (!strcmp(arg, "video"))
2422 av_sync_type = AV_SYNC_VIDEO_MASTER;
2423 else if (!strcmp(arg, "ext"))
2424 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2425 else {
2426 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2427 exit(1);
2429 return 0;
2432 static int opt_seek(const char *opt, const char *arg)
2434 start_time = parse_time_or_die(opt, arg, 1);
2435 return 0;
2438 static int opt_debug(const char *opt, const char *arg)
2440 av_log_set_level(99);
2441 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2442 return 0;
2445 static int opt_vismv(const char *opt, const char *arg)
2447 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2448 return 0;
2451 static int opt_thread_count(const char *opt, const char *arg)
2453 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2454 #if !defined(HAVE_THREADS)
2455 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2456 #endif
2457 return 0;
2460 static const OptionDef options[] = {
2461 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2462 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2463 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2464 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2465 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2466 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2467 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2468 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2469 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2470 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2471 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2472 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2473 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2474 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2475 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2476 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2477 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2478 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2479 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2480 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2481 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2482 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2483 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2484 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2485 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2486 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2487 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2488 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2489 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2490 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2491 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2492 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2493 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2494 { NULL, },
2497 static void show_help(void)
2499 printf("usage: ffplay [options] input_file\n"
2500 "Simple media player\n");
2501 printf("\n");
2502 show_help_options(options, "Main options:\n",
2503 OPT_EXPERT, 0);
2504 show_help_options(options, "\nAdvanced options:\n",
2505 OPT_EXPERT, OPT_EXPERT);
2506 printf("\nWhile playing:\n"
2507 "q, ESC quit\n"
2508 "f toggle full screen\n"
2509 "p, SPC pause\n"
2510 "a cycle audio channel\n"
2511 "v cycle video channel\n"
2512 "t cycle subtitle channel\n"
2513 "w show audio waves\n"
2514 "left/right seek backward/forward 10 seconds\n"
2515 "down/up seek backward/forward 1 minute\n"
2516 "mouse click seek to percentage in file corresponding to fraction of width\n"
2520 static void opt_input_file(const char *filename)
2522 if (!strcmp(filename, "-"))
2523 filename = "pipe:";
2524 input_filename = filename;
2527 /* Called from the main */
2528 int main(int argc, char **argv)
2530 int flags;
2532 /* register all codecs, demux and protocols */
2533 avcodec_register_all();
2534 avdevice_register_all();
2535 av_register_all();
2537 show_banner();
2539 parse_options(argc, argv, options, opt_input_file);
2541 if (!input_filename) {
2542 show_help();
2543 exit(1);
2546 if (display_disable) {
2547 video_disable = 1;
2549 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2550 #if !defined(__MINGW32__) && !defined(__APPLE__)
2551 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2552 #endif
2553 if (SDL_Init (flags)) {
2554 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2555 exit(1);
2558 if (!display_disable) {
2559 #ifdef HAVE_SDL_VIDEO_SIZE
2560 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2561 fs_screen_width = vi->current_w;
2562 fs_screen_height = vi->current_h;
2563 #endif
2566 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2567 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2568 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2569 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2571 av_init_packet(&flush_pkt);
2572 flush_pkt.data= "FLUSH";
2574 cur_stream = stream_open(input_filename, file_iformat);
2576 event_loop();
2578 /* never returns */
2580 return 0;