Simplify if() in copy_and_dup()
[ffmpeg-lucabe.git] / ffplay.c
blob7fe54924c6e8e655fa0f348bef625bffd2274155
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
31 #include "cmdutils.h"
33 #include <SDL.h>
34 #include <SDL_thread.h>
36 #ifdef __MINGW32__
37 #undef main /* We don't want SDL to override our main() */
38 #endif
40 #undef exit
42 const char program_name[] = "FFplay";
43 const int program_birth_year = 2003;
45 //#define DEBUG_SYNC
47 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51 /* SDL audio buffer size, in samples. Should be small to have precise
52 A/V sync as SDL does not have hardware buffer fullness info. */
53 #define SDL_AUDIO_BUFFER_SIZE 1024
55 /* no AV sync correction is done if below the AV sync threshold */
56 #define AV_SYNC_THRESHOLD 0.01
57 /* no AV correction is done if too big error */
58 #define AV_NOSYNC_THRESHOLD 10.0
60 /* maximum audio speed change to get correct sync */
61 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64 #define AUDIO_DIFF_AVG_NB 20
66 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67 #define SAMPLE_ARRAY_SIZE (2*65536)
69 static int sws_flags = SWS_BICUBIC;
71 typedef struct PacketQueue {
72 AVPacketList *first_pkt, *last_pkt;
73 int nb_packets;
74 int size;
75 int abort_request;
76 SDL_mutex *mutex;
77 SDL_cond *cond;
78 } PacketQueue;
80 #define VIDEO_PICTURE_QUEUE_SIZE 1
81 #define SUBPICTURE_QUEUE_SIZE 4
83 typedef struct VideoPicture {
84 double pts; ///<presentation time stamp for this picture
85 SDL_Overlay *bmp;
86 int width, height; /* source height & width */
87 int allocated;
88 } VideoPicture;
90 typedef struct SubPicture {
91 double pts; /* presentation time stamp for this picture */
92 AVSubtitle sub;
93 } SubPicture;
95 enum {
96 AV_SYNC_AUDIO_MASTER, /* default choice */
97 AV_SYNC_VIDEO_MASTER,
98 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101 typedef struct VideoState {
102 SDL_Thread *parse_tid;
103 SDL_Thread *video_tid;
104 AVInputFormat *iformat;
105 int no_background;
106 int abort_request;
107 int paused;
108 int last_paused;
109 int seek_req;
110 int seek_flags;
111 int64_t seek_pos;
112 AVFormatContext *ic;
113 int dtg_active_format;
115 int audio_stream;
117 int av_sync_type;
118 double external_clock; /* external clock base */
119 int64_t external_clock_time;
121 double audio_clock;
122 double audio_diff_cum; /* used for AV difference average computation */
123 double audio_diff_avg_coef;
124 double audio_diff_threshold;
125 int audio_diff_avg_count;
126 AVStream *audio_st;
127 PacketQueue audioq;
128 int audio_hw_buf_size;
129 /* samples output by the codec. we reserve more space for avsync
130 compensation */
131 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 uint8_t *audio_buf;
134 unsigned int audio_buf_size; /* in bytes */
135 int audio_buf_index; /* in bytes */
136 AVPacket audio_pkt;
137 uint8_t *audio_pkt_data;
138 int audio_pkt_size;
139 enum SampleFormat audio_src_fmt;
140 AVAudioConvert *reformat_ctx;
142 int show_audio; /* if true, display audio samples */
143 int16_t sample_array[SAMPLE_ARRAY_SIZE];
144 int sample_array_index;
145 int last_i_start;
147 SDL_Thread *subtitle_tid;
148 int subtitle_stream;
149 int subtitle_stream_changed;
150 AVStream *subtitle_st;
151 PacketQueue subtitleq;
152 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
153 int subpq_size, subpq_rindex, subpq_windex;
154 SDL_mutex *subpq_mutex;
155 SDL_cond *subpq_cond;
157 double frame_timer;
158 double frame_last_pts;
159 double frame_last_delay;
160 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
161 int video_stream;
162 AVStream *video_st;
163 PacketQueue videoq;
164 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
165 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
166 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
167 int pictq_size, pictq_rindex, pictq_windex;
168 SDL_mutex *pictq_mutex;
169 SDL_cond *pictq_cond;
171 // QETimer *video_timer;
172 char filename[1024];
173 int width, height, xleft, ytop;
174 } VideoState;
176 static void show_help(void);
177 static int audio_write_get_buf_size(VideoState *is);
179 /* options specified by the user */
180 static AVInputFormat *file_iformat;
181 static const char *input_filename;
182 static int fs_screen_width;
183 static int fs_screen_height;
184 static int screen_width = 0;
185 static int screen_height = 0;
186 static int frame_width = 0;
187 static int frame_height = 0;
188 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
189 static int audio_disable;
190 static int video_disable;
191 static int wanted_audio_stream= 0;
192 static int wanted_video_stream= 0;
193 static int seek_by_bytes;
194 static int display_disable;
195 static int show_status;
196 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
197 static int64_t start_time = AV_NOPTS_VALUE;
198 static int debug = 0;
199 static int debug_mv = 0;
200 static int step = 0;
201 static int thread_count = 1;
202 static int workaround_bugs = 1;
203 static int fast = 0;
204 static int genpts = 0;
205 static int lowres = 0;
206 static int idct = FF_IDCT_AUTO;
207 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
208 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
210 static int error_resilience = FF_ER_CAREFUL;
211 static int error_concealment = 3;
212 static int decoder_reorder_pts= 0;
214 /* current context */
215 static int is_full_screen;
216 static VideoState *cur_stream;
217 static int64_t audio_callback_time;
219 AVPacket flush_pkt;
221 #define FF_ALLOC_EVENT (SDL_USEREVENT)
222 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
223 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
225 SDL_Surface *screen;
227 /* packet queue handling */
228 static void packet_queue_init(PacketQueue *q)
230 memset(q, 0, sizeof(PacketQueue));
231 q->mutex = SDL_CreateMutex();
232 q->cond = SDL_CreateCond();
235 static void packet_queue_flush(PacketQueue *q)
237 AVPacketList *pkt, *pkt1;
239 SDL_LockMutex(q->mutex);
240 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
241 pkt1 = pkt->next;
242 av_free_packet(&pkt->pkt);
243 av_freep(&pkt);
245 q->last_pkt = NULL;
246 q->first_pkt = NULL;
247 q->nb_packets = 0;
248 q->size = 0;
249 SDL_UnlockMutex(q->mutex);
252 static void packet_queue_end(PacketQueue *q)
254 packet_queue_flush(q);
255 SDL_DestroyMutex(q->mutex);
256 SDL_DestroyCond(q->cond);
259 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261 AVPacketList *pkt1;
263 /* duplicate the packet */
264 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
265 return -1;
267 pkt1 = av_malloc(sizeof(AVPacketList));
268 if (!pkt1)
269 return -1;
270 pkt1->pkt = *pkt;
271 pkt1->next = NULL;
274 SDL_LockMutex(q->mutex);
276 if (!q->last_pkt)
278 q->first_pkt = pkt1;
279 else
280 q->last_pkt->next = pkt1;
281 q->last_pkt = pkt1;
282 q->nb_packets++;
283 q->size += pkt1->pkt.size;
284 /* XXX: should duplicate packet data in DV case */
285 SDL_CondSignal(q->cond);
287 SDL_UnlockMutex(q->mutex);
288 return 0;
291 static void packet_queue_abort(PacketQueue *q)
293 SDL_LockMutex(q->mutex);
295 q->abort_request = 1;
297 SDL_CondSignal(q->cond);
299 SDL_UnlockMutex(q->mutex);
302 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
303 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305 AVPacketList *pkt1;
306 int ret;
308 SDL_LockMutex(q->mutex);
310 for(;;) {
311 if (q->abort_request) {
312 ret = -1;
313 break;
316 pkt1 = q->first_pkt;
317 if (pkt1) {
318 q->first_pkt = pkt1->next;
319 if (!q->first_pkt)
320 q->last_pkt = NULL;
321 q->nb_packets--;
322 q->size -= pkt1->pkt.size;
323 *pkt = pkt1->pkt;
324 av_free(pkt1);
325 ret = 1;
326 break;
327 } else if (!block) {
328 ret = 0;
329 break;
330 } else {
331 SDL_CondWait(q->cond, q->mutex);
334 SDL_UnlockMutex(q->mutex);
335 return ret;
338 static inline void fill_rectangle(SDL_Surface *screen,
339 int x, int y, int w, int h, int color)
341 SDL_Rect rect;
342 rect.x = x;
343 rect.y = y;
344 rect.w = w;
345 rect.h = h;
346 SDL_FillRect(screen, &rect, color);
349 #if 0
350 /* draw only the border of a rectangle */
351 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353 int w1, w2, h1, h2;
355 /* fill the background */
356 w1 = x;
357 if (w1 < 0)
358 w1 = 0;
359 w2 = s->width - (x + w);
360 if (w2 < 0)
361 w2 = 0;
362 h1 = y;
363 if (h1 < 0)
364 h1 = 0;
365 h2 = s->height - (y + h);
366 if (h2 < 0)
367 h2 = 0;
368 fill_rectangle(screen,
369 s->xleft, s->ytop,
370 w1, s->height,
371 color);
372 fill_rectangle(screen,
373 s->xleft + s->width - w2, s->ytop,
374 w2, s->height,
375 color);
376 fill_rectangle(screen,
377 s->xleft + w1, s->ytop,
378 s->width - w1 - w2, h1,
379 color);
380 fill_rectangle(screen,
381 s->xleft + w1, s->ytop + s->height - h2,
382 s->width - w1 - w2, h2,
383 color);
385 #endif
389 #define SCALEBITS 10
390 #define ONE_HALF (1 << (SCALEBITS - 1))
391 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
393 #define RGB_TO_Y_CCIR(r, g, b) \
394 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
395 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
398 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
399 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
402 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
403 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405 #define ALPHA_BLEND(a, oldp, newp, s)\
406 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408 #define RGBA_IN(r, g, b, a, s)\
410 unsigned int v = ((const uint32_t *)(s))[0];\
411 a = (v >> 24) & 0xff;\
412 r = (v >> 16) & 0xff;\
413 g = (v >> 8) & 0xff;\
414 b = v & 0xff;\
417 #define YUVA_IN(y, u, v, a, s, pal)\
419 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420 a = (val >> 24) & 0xff;\
421 y = (val >> 16) & 0xff;\
422 u = (val >> 8) & 0xff;\
423 v = val & 0xff;\
426 #define YUVA_OUT(d, y, u, v, a)\
428 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
432 #define BPP 1
434 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 int wrap, wrap3, width2, skip2;
437 int y, u, v, a, u1, v1, a1, w, h;
438 uint8_t *lum, *cb, *cr;
439 const uint8_t *p;
440 const uint32_t *pal;
441 int dstx, dsty, dstw, dsth;
443 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
444 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
445 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
446 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
447 lum = dst->data[0] + dsty * dst->linesize[0];
448 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451 width2 = (dstw + 1) >> 1;
452 skip2 = dstx >> 1;
453 wrap = dst->linesize[0];
454 wrap3 = rect->linesize;
455 p = rect->bitmap;
456 pal = rect->rgba_palette; /* Now in YCrCb! */
458 if (dsty & 1) {
459 lum += dstx;
460 cb += skip2;
461 cr += skip2;
463 if (dstx & 1) {
464 YUVA_IN(y, u, v, a, p, pal);
465 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468 cb++;
469 cr++;
470 lum++;
471 p += BPP;
473 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474 YUVA_IN(y, u, v, a, p, pal);
475 u1 = u;
476 v1 = v;
477 a1 = a;
478 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480 YUVA_IN(y, u, v, a, p + BPP, pal);
481 u1 += u;
482 v1 += v;
483 a1 += a;
484 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487 cb++;
488 cr++;
489 p += 2 * BPP;
490 lum += 2;
492 if (w) {
493 YUVA_IN(y, u, v, a, p, pal);
494 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498 p += wrap3 + (wrap3 - dstw * BPP);
499 lum += wrap + (wrap - dstw - dstx);
500 cb += dst->linesize[1] - width2 - skip2;
501 cr += dst->linesize[2] - width2 - skip2;
503 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504 lum += dstx;
505 cb += skip2;
506 cr += skip2;
508 if (dstx & 1) {
509 YUVA_IN(y, u, v, a, p, pal);
510 u1 = u;
511 v1 = v;
512 a1 = a;
513 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514 p += wrap3;
515 lum += wrap;
516 YUVA_IN(y, u, v, a, p, pal);
517 u1 += u;
518 v1 += v;
519 a1 += a;
520 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523 cb++;
524 cr++;
525 p += -wrap3 + BPP;
526 lum += -wrap + 1;
528 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529 YUVA_IN(y, u, v, a, p, pal);
530 u1 = u;
531 v1 = v;
532 a1 = a;
533 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535 YUVA_IN(y, u, v, a, p, pal);
536 u1 += u;
537 v1 += v;
538 a1 += a;
539 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540 p += wrap3;
541 lum += wrap;
543 YUVA_IN(y, u, v, a, p, pal);
544 u1 += u;
545 v1 += v;
546 a1 += a;
547 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549 YUVA_IN(y, u, v, a, p, pal);
550 u1 += u;
551 v1 += v;
552 a1 += a;
553 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558 cb++;
559 cr++;
560 p += -wrap3 + 2 * BPP;
561 lum += -wrap + 2;
563 if (w) {
564 YUVA_IN(y, u, v, a, p, pal);
565 u1 = u;
566 v1 = v;
567 a1 = a;
568 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569 p += wrap3;
570 lum += wrap;
571 YUVA_IN(y, u, v, a, p, pal);
572 u1 += u;
573 v1 += v;
574 a1 += a;
575 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578 cb++;
579 cr++;
580 p += -wrap3 + BPP;
581 lum += -wrap + 1;
583 p += wrap3 + (wrap3 - dstw * BPP);
584 lum += wrap + (wrap - dstw - dstx);
585 cb += dst->linesize[1] - width2 - skip2;
586 cr += dst->linesize[2] - width2 - skip2;
588 /* handle odd height */
589 if (h) {
590 lum += dstx;
591 cb += skip2;
592 cr += skip2;
594 if (dstx & 1) {
595 YUVA_IN(y, u, v, a, p, pal);
596 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599 cb++;
600 cr++;
601 lum++;
602 p += BPP;
604 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605 YUVA_IN(y, u, v, a, p, pal);
606 u1 = u;
607 v1 = v;
608 a1 = a;
609 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611 YUVA_IN(y, u, v, a, p + BPP, pal);
612 u1 += u;
613 v1 += v;
614 a1 += a;
615 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618 cb++;
619 cr++;
620 p += 2 * BPP;
621 lum += 2;
623 if (w) {
624 YUVA_IN(y, u, v, a, p, pal);
625 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632 static void free_subpicture(SubPicture *sp)
634 int i;
636 for (i = 0; i < sp->sub.num_rects; i++)
638 av_free(sp->sub.rects[i].bitmap);
639 av_free(sp->sub.rects[i].rgba_palette);
642 av_free(sp->sub.rects);
644 memset(&sp->sub, 0, sizeof(AVSubtitle));
647 static void video_image_display(VideoState *is)
649 VideoPicture *vp;
650 SubPicture *sp;
651 AVPicture pict;
652 float aspect_ratio;
653 int width, height, x, y;
654 SDL_Rect rect;
655 int i;
657 vp = &is->pictq[is->pictq_rindex];
658 if (vp->bmp) {
659 /* XXX: use variable in the frame */
660 if (is->video_st->sample_aspect_ratio.num)
661 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662 else if (is->video_st->codec->sample_aspect_ratio.num)
663 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664 else
665 aspect_ratio = 0;
666 if (aspect_ratio <= 0.0)
667 aspect_ratio = 1.0;
668 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
669 /* if an active format is indicated, then it overrides the
670 mpeg format */
671 #if 0
672 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
673 is->dtg_active_format = is->video_st->codec->dtg_active_format;
674 printf("dtg_active_format=%d\n", is->dtg_active_format);
676 #endif
677 #if 0
678 switch(is->video_st->codec->dtg_active_format) {
679 case FF_DTG_AFD_SAME:
680 default:
681 /* nothing to do */
682 break;
683 case FF_DTG_AFD_4_3:
684 aspect_ratio = 4.0 / 3.0;
685 break;
686 case FF_DTG_AFD_16_9:
687 aspect_ratio = 16.0 / 9.0;
688 break;
689 case FF_DTG_AFD_14_9:
690 aspect_ratio = 14.0 / 9.0;
691 break;
692 case FF_DTG_AFD_4_3_SP_14_9:
693 aspect_ratio = 14.0 / 9.0;
694 break;
695 case FF_DTG_AFD_16_9_SP_14_9:
696 aspect_ratio = 14.0 / 9.0;
697 break;
698 case FF_DTG_AFD_SP_4_3:
699 aspect_ratio = 4.0 / 3.0;
700 break;
702 #endif
704 if (is->subtitle_st)
706 if (is->subpq_size > 0)
708 sp = &is->subpq[is->subpq_rindex];
710 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
712 SDL_LockYUVOverlay (vp->bmp);
714 pict.data[0] = vp->bmp->pixels[0];
715 pict.data[1] = vp->bmp->pixels[2];
716 pict.data[2] = vp->bmp->pixels[1];
718 pict.linesize[0] = vp->bmp->pitches[0];
719 pict.linesize[1] = vp->bmp->pitches[2];
720 pict.linesize[2] = vp->bmp->pitches[1];
722 for (i = 0; i < sp->sub.num_rects; i++)
723 blend_subrect(&pict, &sp->sub.rects[i],
724 vp->bmp->w, vp->bmp->h);
726 SDL_UnlockYUVOverlay (vp->bmp);
732 /* XXX: we suppose the screen has a 1.0 pixel ratio */
733 height = is->height;
734 width = ((int)rint(height * aspect_ratio)) & ~1;
735 if (width > is->width) {
736 width = is->width;
737 height = ((int)rint(width / aspect_ratio)) & ~1;
739 x = (is->width - width) / 2;
740 y = (is->height - height) / 2;
741 if (!is->no_background) {
742 /* fill the background */
743 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
744 } else {
745 is->no_background = 0;
747 rect.x = is->xleft + x;
748 rect.y = is->ytop + y;
749 rect.w = width;
750 rect.h = height;
751 SDL_DisplayYUVOverlay(vp->bmp, &rect);
752 } else {
753 #if 0
754 fill_rectangle(screen,
755 is->xleft, is->ytop, is->width, is->height,
756 QERGB(0x00, 0x00, 0x00));
757 #endif
761 static inline int compute_mod(int a, int b)
763 a = a % b;
764 if (a >= 0)
765 return a;
766 else
767 return a + b;
770 static void video_audio_display(VideoState *s)
772 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
773 int ch, channels, h, h2, bgcolor, fgcolor;
774 int16_t time_diff;
776 /* compute display index : center on currently output samples */
777 channels = s->audio_st->codec->channels;
778 nb_display_channels = channels;
779 if (!s->paused) {
780 n = 2 * channels;
781 delay = audio_write_get_buf_size(s);
782 delay /= n;
784 /* to be more precise, we take into account the time spent since
785 the last buffer computation */
786 if (audio_callback_time) {
787 time_diff = av_gettime() - audio_callback_time;
788 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
791 delay -= s->width / 2;
792 if (delay < s->width)
793 delay = s->width;
795 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
797 h= INT_MIN;
798 for(i=0; i<1000; i+=channels){
799 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
800 int a= s->sample_array[idx];
801 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
802 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
803 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
804 int score= a-d;
805 if(h<score && (b^c)<0){
806 h= score;
807 i_start= idx;
811 s->last_i_start = i_start;
812 } else {
813 i_start = s->last_i_start;
816 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
817 fill_rectangle(screen,
818 s->xleft, s->ytop, s->width, s->height,
819 bgcolor);
821 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
823 /* total height for one channel */
824 h = s->height / nb_display_channels;
825 /* graph height / 2 */
826 h2 = (h * 9) / 20;
827 for(ch = 0;ch < nb_display_channels; ch++) {
828 i = i_start + ch;
829 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
830 for(x = 0; x < s->width; x++) {
831 y = (s->sample_array[i] * h2) >> 15;
832 if (y < 0) {
833 y = -y;
834 ys = y1 - y;
835 } else {
836 ys = y1;
838 fill_rectangle(screen,
839 s->xleft + x, ys, 1, y,
840 fgcolor);
841 i += channels;
842 if (i >= SAMPLE_ARRAY_SIZE)
843 i -= SAMPLE_ARRAY_SIZE;
847 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
849 for(ch = 1;ch < nb_display_channels; ch++) {
850 y = s->ytop + ch * h;
851 fill_rectangle(screen,
852 s->xleft, y, s->width, 1,
853 fgcolor);
855 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
858 static int video_open(VideoState *is){
859 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
860 int w,h;
862 if(is_full_screen) flags |= SDL_FULLSCREEN;
863 else flags |= SDL_RESIZABLE;
865 if (is_full_screen && fs_screen_width) {
866 w = fs_screen_width;
867 h = fs_screen_height;
868 } else if(!is_full_screen && screen_width){
869 w = screen_width;
870 h = screen_height;
871 }else if (is->video_st && is->video_st->codec->width){
872 w = is->video_st->codec->width;
873 h = is->video_st->codec->height;
874 } else {
875 w = 640;
876 h = 480;
878 #ifndef __APPLE__
879 screen = SDL_SetVideoMode(w, h, 0, flags);
880 #else
881 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
882 screen = SDL_SetVideoMode(w, h, 24, flags);
883 #endif
884 if (!screen) {
885 fprintf(stderr, "SDL: could not set video mode - exiting\n");
886 return -1;
888 SDL_WM_SetCaption("FFplay", "FFplay");
890 is->width = screen->w;
891 is->height = screen->h;
893 return 0;
896 /* display the current picture, if any */
897 static void video_display(VideoState *is)
899 if(!screen)
900 video_open(cur_stream);
901 if (is->audio_st && is->show_audio)
902 video_audio_display(is);
903 else if (is->video_st)
904 video_image_display(is);
907 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
909 SDL_Event event;
910 event.type = FF_REFRESH_EVENT;
911 event.user.data1 = opaque;
912 SDL_PushEvent(&event);
913 return 0; /* 0 means stop timer */
916 /* schedule a video refresh in 'delay' ms */
917 static void schedule_refresh(VideoState *is, int delay)
919 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
920 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
923 /* get the current audio clock value */
924 static double get_audio_clock(VideoState *is)
926 double pts;
927 int hw_buf_size, bytes_per_sec;
928 pts = is->audio_clock;
929 hw_buf_size = audio_write_get_buf_size(is);
930 bytes_per_sec = 0;
931 if (is->audio_st) {
932 bytes_per_sec = is->audio_st->codec->sample_rate *
933 2 * is->audio_st->codec->channels;
935 if (bytes_per_sec)
936 pts -= (double)hw_buf_size / bytes_per_sec;
937 return pts;
940 /* get the current video clock value */
941 static double get_video_clock(VideoState *is)
943 double delta;
944 if (is->paused) {
945 delta = 0;
946 } else {
947 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
949 return is->video_current_pts + delta;
952 /* get the current external clock value */
953 static double get_external_clock(VideoState *is)
955 int64_t ti;
956 ti = av_gettime();
957 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
960 /* get the current master clock value */
961 static double get_master_clock(VideoState *is)
963 double val;
965 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
966 if (is->video_st)
967 val = get_video_clock(is);
968 else
969 val = get_audio_clock(is);
970 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
971 if (is->audio_st)
972 val = get_audio_clock(is);
973 else
974 val = get_video_clock(is);
975 } else {
976 val = get_external_clock(is);
978 return val;
981 /* seek in the stream */
982 static void stream_seek(VideoState *is, int64_t pos, int rel)
984 if (!is->seek_req) {
985 is->seek_pos = pos;
986 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
987 if (seek_by_bytes)
988 is->seek_flags |= AVSEEK_FLAG_BYTE;
989 is->seek_req = 1;
993 /* pause or resume the video */
994 static void stream_pause(VideoState *is)
996 is->paused = !is->paused;
997 if (!is->paused) {
998 is->video_current_pts = get_video_clock(is);
999 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1003 /* called to display each frame */
1004 static void video_refresh_timer(void *opaque)
1006 VideoState *is = opaque;
1007 VideoPicture *vp;
1008 double actual_delay, delay, sync_threshold, ref_clock, diff;
1010 SubPicture *sp, *sp2;
1012 if (is->video_st) {
1013 if (is->pictq_size == 0) {
1014 /* if no picture, need to wait */
1015 schedule_refresh(is, 1);
1016 } else {
1017 /* dequeue the picture */
1018 vp = &is->pictq[is->pictq_rindex];
1020 /* update current video pts */
1021 is->video_current_pts = vp->pts;
1022 is->video_current_pts_time = av_gettime();
1024 /* compute nominal delay */
1025 delay = vp->pts - is->frame_last_pts;
1026 if (delay <= 0 || delay >= 2.0) {
1027 /* if incorrect delay, use previous one */
1028 delay = is->frame_last_delay;
1030 is->frame_last_delay = delay;
1031 is->frame_last_pts = vp->pts;
1033 /* update delay to follow master synchronisation source */
1034 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1035 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1036 /* if video is slave, we try to correct big delays by
1037 duplicating or deleting a frame */
1038 ref_clock = get_master_clock(is);
1039 diff = vp->pts - ref_clock;
1041 /* skip or repeat frame. We take into account the
1042 delay to compute the threshold. I still don't know
1043 if it is the best guess */
1044 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1045 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1046 if (diff <= -sync_threshold)
1047 delay = 0;
1048 else if (diff >= sync_threshold)
1049 delay = 2 * delay;
1053 is->frame_timer += delay;
1054 /* compute the REAL delay (we need to do that to avoid
1055 long term errors */
1056 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1057 if (actual_delay < 0.010) {
1058 /* XXX: should skip picture */
1059 actual_delay = 0.010;
1061 /* launch timer for next picture */
1062 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1064 #if defined(DEBUG_SYNC)
1065 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1066 delay, actual_delay, vp->pts, -diff);
1067 #endif
1069 if(is->subtitle_st) {
1070 if (is->subtitle_stream_changed) {
1071 SDL_LockMutex(is->subpq_mutex);
1073 while (is->subpq_size) {
1074 free_subpicture(&is->subpq[is->subpq_rindex]);
1076 /* update queue size and signal for next picture */
1077 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1078 is->subpq_rindex = 0;
1080 is->subpq_size--;
1082 is->subtitle_stream_changed = 0;
1084 SDL_CondSignal(is->subpq_cond);
1085 SDL_UnlockMutex(is->subpq_mutex);
1086 } else {
1087 if (is->subpq_size > 0) {
1088 sp = &is->subpq[is->subpq_rindex];
1090 if (is->subpq_size > 1)
1091 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1092 else
1093 sp2 = NULL;
1095 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1096 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1098 free_subpicture(sp);
1100 /* update queue size and signal for next picture */
1101 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1102 is->subpq_rindex = 0;
1104 SDL_LockMutex(is->subpq_mutex);
1105 is->subpq_size--;
1106 SDL_CondSignal(is->subpq_cond);
1107 SDL_UnlockMutex(is->subpq_mutex);
1113 /* display picture */
1114 video_display(is);
1116 /* update queue size and signal for next picture */
1117 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1118 is->pictq_rindex = 0;
1120 SDL_LockMutex(is->pictq_mutex);
1121 is->pictq_size--;
1122 SDL_CondSignal(is->pictq_cond);
1123 SDL_UnlockMutex(is->pictq_mutex);
1125 } else if (is->audio_st) {
1126 /* draw the next audio frame */
1128 schedule_refresh(is, 40);
1130 /* if only audio stream, then display the audio bars (better
1131 than nothing, just to test the implementation */
1133 /* display picture */
1134 video_display(is);
1135 } else {
1136 schedule_refresh(is, 100);
1138 if (show_status) {
1139 static int64_t last_time;
1140 int64_t cur_time;
1141 int aqsize, vqsize, sqsize;
1142 double av_diff;
1144 cur_time = av_gettime();
1145 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1146 aqsize = 0;
1147 vqsize = 0;
1148 sqsize = 0;
1149 if (is->audio_st)
1150 aqsize = is->audioq.size;
1151 if (is->video_st)
1152 vqsize = is->videoq.size;
1153 if (is->subtitle_st)
1154 sqsize = is->subtitleq.size;
1155 av_diff = 0;
1156 if (is->audio_st && is->video_st)
1157 av_diff = get_audio_clock(is) - get_video_clock(is);
1158 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1159 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1160 fflush(stdout);
1161 last_time = cur_time;
1166 /* allocate a picture (needs to do that in main thread to avoid
1167 potential locking problems */
1168 static void alloc_picture(void *opaque)
1170 VideoState *is = opaque;
1171 VideoPicture *vp;
1173 vp = &is->pictq[is->pictq_windex];
1175 if (vp->bmp)
1176 SDL_FreeYUVOverlay(vp->bmp);
1178 #if 0
1179 /* XXX: use generic function */
1180 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1181 switch(is->video_st->codec->pix_fmt) {
1182 case PIX_FMT_YUV420P:
1183 case PIX_FMT_YUV422P:
1184 case PIX_FMT_YUV444P:
1185 case PIX_FMT_YUYV422:
1186 case PIX_FMT_YUV410P:
1187 case PIX_FMT_YUV411P:
1188 is_yuv = 1;
1189 break;
1190 default:
1191 is_yuv = 0;
1192 break;
1194 #endif
1195 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1196 is->video_st->codec->height,
1197 SDL_YV12_OVERLAY,
1198 screen);
1199 vp->width = is->video_st->codec->width;
1200 vp->height = is->video_st->codec->height;
1202 SDL_LockMutex(is->pictq_mutex);
1203 vp->allocated = 1;
1204 SDL_CondSignal(is->pictq_cond);
1205 SDL_UnlockMutex(is->pictq_mutex);
1210 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1212 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1214 VideoPicture *vp;
1215 int dst_pix_fmt;
1216 AVPicture pict;
1217 static struct SwsContext *img_convert_ctx;
1219 /* wait until we have space to put a new picture */
1220 SDL_LockMutex(is->pictq_mutex);
1221 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1222 !is->videoq.abort_request) {
1223 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1225 SDL_UnlockMutex(is->pictq_mutex);
1227 if (is->videoq.abort_request)
1228 return -1;
1230 vp = &is->pictq[is->pictq_windex];
1232 /* alloc or resize hardware picture buffer */
1233 if (!vp->bmp ||
1234 vp->width != is->video_st->codec->width ||
1235 vp->height != is->video_st->codec->height) {
1236 SDL_Event event;
1238 vp->allocated = 0;
1240 /* the allocation must be done in the main thread to avoid
1241 locking problems */
1242 event.type = FF_ALLOC_EVENT;
1243 event.user.data1 = is;
1244 SDL_PushEvent(&event);
1246 /* wait until the picture is allocated */
1247 SDL_LockMutex(is->pictq_mutex);
1248 while (!vp->allocated && !is->videoq.abort_request) {
1249 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1251 SDL_UnlockMutex(is->pictq_mutex);
1253 if (is->videoq.abort_request)
1254 return -1;
1257 /* if the frame is not skipped, then display it */
1258 if (vp->bmp) {
1259 /* get a pointer on the bitmap */
1260 SDL_LockYUVOverlay (vp->bmp);
1262 dst_pix_fmt = PIX_FMT_YUV420P;
1263 pict.data[0] = vp->bmp->pixels[0];
1264 pict.data[1] = vp->bmp->pixels[2];
1265 pict.data[2] = vp->bmp->pixels[1];
1267 pict.linesize[0] = vp->bmp->pitches[0];
1268 pict.linesize[1] = vp->bmp->pitches[2];
1269 pict.linesize[2] = vp->bmp->pitches[1];
1270 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1271 is->video_st->codec->width, is->video_st->codec->height,
1272 is->video_st->codec->pix_fmt,
1273 is->video_st->codec->width, is->video_st->codec->height,
1274 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1275 if (img_convert_ctx == NULL) {
1276 fprintf(stderr, "Cannot initialize the conversion context\n");
1277 exit(1);
1279 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1280 0, is->video_st->codec->height, pict.data, pict.linesize);
1281 /* update the bitmap content */
1282 SDL_UnlockYUVOverlay(vp->bmp);
1284 vp->pts = pts;
1286 /* now we can update the picture count */
1287 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1288 is->pictq_windex = 0;
1289 SDL_LockMutex(is->pictq_mutex);
1290 is->pictq_size++;
1291 SDL_UnlockMutex(is->pictq_mutex);
1293 return 0;
1297 * compute the exact PTS for the picture if it is omitted in the stream
1298 * @param pts1 the dts of the pkt / pts of the frame
1300 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1302 double frame_delay, pts;
1304 pts = pts1;
1306 if (pts != 0) {
1307 /* update video clock with pts, if present */
1308 is->video_clock = pts;
1309 } else {
1310 pts = is->video_clock;
1312 /* update video clock for next frame */
1313 frame_delay = av_q2d(is->video_st->codec->time_base);
1314 /* for MPEG2, the frame can be repeated, so we update the
1315 clock accordingly */
1316 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1317 is->video_clock += frame_delay;
1319 #if defined(DEBUG_SYNC) && 0
1321 int ftype;
1322 if (src_frame->pict_type == FF_B_TYPE)
1323 ftype = 'B';
1324 else if (src_frame->pict_type == FF_I_TYPE)
1325 ftype = 'I';
1326 else
1327 ftype = 'P';
1328 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1329 ftype, pts, pts1);
1331 #endif
1332 return queue_picture(is, src_frame, pts);
1335 static int video_thread(void *arg)
1337 VideoState *is = arg;
1338 AVPacket pkt1, *pkt = &pkt1;
1339 int len1, got_picture;
1340 AVFrame *frame= avcodec_alloc_frame();
1341 double pts;
1343 for(;;) {
1344 while (is->paused && !is->videoq.abort_request) {
1345 SDL_Delay(10);
1347 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1348 break;
1350 if(pkt->data == flush_pkt.data){
1351 avcodec_flush_buffers(is->video_st->codec);
1352 continue;
1355 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1356 this packet, if any */
1357 is->video_st->codec->reordered_opaque= pkt->pts;
1358 len1 = avcodec_decode_video(is->video_st->codec,
1359 frame, &got_picture,
1360 pkt->data, pkt->size);
1362 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1363 && frame->reordered_opaque != AV_NOPTS_VALUE)
1364 pts= frame->reordered_opaque;
1365 else if(pkt->dts != AV_NOPTS_VALUE)
1366 pts= pkt->dts;
1367 else
1368 pts= 0;
1369 pts *= av_q2d(is->video_st->time_base);
1371 // if (len1 < 0)
1372 // break;
1373 if (got_picture) {
1374 if (output_picture2(is, frame, pts) < 0)
1375 goto the_end;
1377 av_free_packet(pkt);
1378 if (step)
1379 if (cur_stream)
1380 stream_pause(cur_stream);
1382 the_end:
1383 av_free(frame);
1384 return 0;
1387 static int subtitle_thread(void *arg)
1389 VideoState *is = arg;
1390 SubPicture *sp;
1391 AVPacket pkt1, *pkt = &pkt1;
1392 int len1, got_subtitle;
1393 double pts;
1394 int i, j;
1395 int r, g, b, y, u, v, a;
1397 for(;;) {
1398 while (is->paused && !is->subtitleq.abort_request) {
1399 SDL_Delay(10);
1401 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1402 break;
1404 if(pkt->data == flush_pkt.data){
1405 avcodec_flush_buffers(is->subtitle_st->codec);
1406 continue;
1408 SDL_LockMutex(is->subpq_mutex);
1409 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1410 !is->subtitleq.abort_request) {
1411 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1413 SDL_UnlockMutex(is->subpq_mutex);
1415 if (is->subtitleq.abort_request)
1416 goto the_end;
1418 sp = &is->subpq[is->subpq_windex];
1420 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1421 this packet, if any */
1422 pts = 0;
1423 if (pkt->pts != AV_NOPTS_VALUE)
1424 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1426 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1427 &sp->sub, &got_subtitle,
1428 pkt->data, pkt->size);
1429 // if (len1 < 0)
1430 // break;
1431 if (got_subtitle && sp->sub.format == 0) {
1432 sp->pts = pts;
1434 for (i = 0; i < sp->sub.num_rects; i++)
1436 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1438 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1439 y = RGB_TO_Y_CCIR(r, g, b);
1440 u = RGB_TO_U_CCIR(r, g, b, 0);
1441 v = RGB_TO_V_CCIR(r, g, b, 0);
1442 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1446 /* now we can update the picture count */
1447 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1448 is->subpq_windex = 0;
1449 SDL_LockMutex(is->subpq_mutex);
1450 is->subpq_size++;
1451 SDL_UnlockMutex(is->subpq_mutex);
1453 av_free_packet(pkt);
1454 // if (step)
1455 // if (cur_stream)
1456 // stream_pause(cur_stream);
1458 the_end:
1459 return 0;
1462 /* copy samples for viewing in editor window */
1463 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1465 int size, len, channels;
1467 channels = is->audio_st->codec->channels;
1469 size = samples_size / sizeof(short);
1470 while (size > 0) {
1471 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1472 if (len > size)
1473 len = size;
1474 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1475 samples += len;
1476 is->sample_array_index += len;
1477 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1478 is->sample_array_index = 0;
1479 size -= len;
1483 /* return the new audio buffer size (samples can be added or deleted
1484 to get better sync if video or external master clock) */
1485 static int synchronize_audio(VideoState *is, short *samples,
1486 int samples_size1, double pts)
1488 int n, samples_size;
1489 double ref_clock;
1491 n = 2 * is->audio_st->codec->channels;
1492 samples_size = samples_size1;
1494 /* if not master, then we try to remove or add samples to correct the clock */
1495 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1496 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1497 double diff, avg_diff;
1498 int wanted_size, min_size, max_size, nb_samples;
1500 ref_clock = get_master_clock(is);
1501 diff = get_audio_clock(is) - ref_clock;
1503 if (diff < AV_NOSYNC_THRESHOLD) {
1504 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1505 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1506 /* not enough measures to have a correct estimate */
1507 is->audio_diff_avg_count++;
1508 } else {
1509 /* estimate the A-V difference */
1510 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1512 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1513 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1514 nb_samples = samples_size / n;
1516 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1517 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1518 if (wanted_size < min_size)
1519 wanted_size = min_size;
1520 else if (wanted_size > max_size)
1521 wanted_size = max_size;
1523 /* add or remove samples to correction the synchro */
1524 if (wanted_size < samples_size) {
1525 /* remove samples */
1526 samples_size = wanted_size;
1527 } else if (wanted_size > samples_size) {
1528 uint8_t *samples_end, *q;
1529 int nb;
1531 /* add samples */
1532 nb = (samples_size - wanted_size);
1533 samples_end = (uint8_t *)samples + samples_size - n;
1534 q = samples_end + n;
1535 while (nb > 0) {
1536 memcpy(q, samples_end, n);
1537 q += n;
1538 nb -= n;
1540 samples_size = wanted_size;
1543 #if 0
1544 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1545 diff, avg_diff, samples_size - samples_size1,
1546 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1547 #endif
1549 } else {
1550 /* too big difference : may be initial PTS errors, so
1551 reset A-V filter */
1552 is->audio_diff_avg_count = 0;
1553 is->audio_diff_cum = 0;
1557 return samples_size;
1560 /* decode one audio frame and returns its uncompressed size */
1561 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1563 AVPacket *pkt = &is->audio_pkt;
1564 AVCodecContext *dec= is->audio_st->codec;
1565 int n, len1, data_size;
1566 double pts;
1568 for(;;) {
1569 /* NOTE: the audio packet can contain several frames */
1570 while (is->audio_pkt_size > 0) {
1571 data_size = sizeof(is->audio_buf1);
1572 len1 = avcodec_decode_audio2(dec,
1573 (int16_t *)is->audio_buf1, &data_size,
1574 is->audio_pkt_data, is->audio_pkt_size);
1575 if (len1 < 0) {
1576 /* if error, we skip the frame */
1577 is->audio_pkt_size = 0;
1578 break;
1581 is->audio_pkt_data += len1;
1582 is->audio_pkt_size -= len1;
1583 if (data_size <= 0)
1584 continue;
1586 if (dec->sample_fmt != is->audio_src_fmt) {
1587 if (is->reformat_ctx)
1588 av_audio_convert_free(is->reformat_ctx);
1589 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1590 dec->sample_fmt, 1, NULL, 0);
1591 if (!is->reformat_ctx) {
1592 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1593 avcodec_get_sample_fmt_name(dec->sample_fmt),
1594 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1595 break;
1597 is->audio_src_fmt= dec->sample_fmt;
1600 if (is->reformat_ctx) {
1601 const void *ibuf[6]= {is->audio_buf1};
1602 void *obuf[6]= {is->audio_buf2};
1603 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1604 int ostride[6]= {2};
1605 int len= data_size/istride[0];
1606 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1607 printf("av_audio_convert() failed\n");
1608 break;
1610 is->audio_buf= is->audio_buf2;
1611 /* FIXME: existing code assume that data_size equals framesize*channels*2
1612 remove this legacy cruft */
1613 data_size= len*2;
1614 }else{
1615 is->audio_buf= is->audio_buf1;
1618 /* if no pts, then compute it */
1619 pts = is->audio_clock;
1620 *pts_ptr = pts;
1621 n = 2 * dec->channels;
1622 is->audio_clock += (double)data_size /
1623 (double)(n * dec->sample_rate);
1624 #if defined(DEBUG_SYNC)
1626 static double last_clock;
1627 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1628 is->audio_clock - last_clock,
1629 is->audio_clock, pts);
1630 last_clock = is->audio_clock;
1632 #endif
1633 return data_size;
1636 /* free the current packet */
1637 if (pkt->data)
1638 av_free_packet(pkt);
1640 if (is->paused || is->audioq.abort_request) {
1641 return -1;
1644 /* read next packet */
1645 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1646 return -1;
1647 if(pkt->data == flush_pkt.data){
1648 avcodec_flush_buffers(dec);
1649 continue;
1652 is->audio_pkt_data = pkt->data;
1653 is->audio_pkt_size = pkt->size;
1655 /* if update the audio clock with the pts */
1656 if (pkt->pts != AV_NOPTS_VALUE) {
1657 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1662 /* get the current audio output buffer size, in samples. With SDL, we
1663 cannot have a precise information */
1664 static int audio_write_get_buf_size(VideoState *is)
1666 return is->audio_buf_size - is->audio_buf_index;
1670 /* prepare a new audio buffer */
1671 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1673 VideoState *is = opaque;
1674 int audio_size, len1;
1675 double pts;
1677 audio_callback_time = av_gettime();
1679 while (len > 0) {
1680 if (is->audio_buf_index >= is->audio_buf_size) {
1681 audio_size = audio_decode_frame(is, &pts);
1682 if (audio_size < 0) {
1683 /* if error, just output silence */
1684 is->audio_buf_size = 1024;
1685 memset(is->audio_buf, 0, is->audio_buf_size);
1686 } else {
1687 if (is->show_audio)
1688 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1689 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1690 pts);
1691 is->audio_buf_size = audio_size;
1693 is->audio_buf_index = 0;
1695 len1 = is->audio_buf_size - is->audio_buf_index;
1696 if (len1 > len)
1697 len1 = len;
1698 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1699 len -= len1;
1700 stream += len1;
1701 is->audio_buf_index += len1;
1705 /* open a given stream. Return 0 if OK */
1706 static int stream_component_open(VideoState *is, int stream_index)
1708 AVFormatContext *ic = is->ic;
1709 AVCodecContext *enc;
1710 AVCodec *codec;
1711 SDL_AudioSpec wanted_spec, spec;
1713 if (stream_index < 0 || stream_index >= ic->nb_streams)
1714 return -1;
1715 enc = ic->streams[stream_index]->codec;
1717 /* prepare audio output */
1718 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1719 if (enc->channels > 0) {
1720 enc->request_channels = FFMIN(2, enc->channels);
1721 } else {
1722 enc->request_channels = 2;
1726 codec = avcodec_find_decoder(enc->codec_id);
1727 enc->debug_mv = debug_mv;
1728 enc->debug = debug;
1729 enc->workaround_bugs = workaround_bugs;
1730 enc->lowres = lowres;
1731 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1732 enc->idct_algo= idct;
1733 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1734 enc->skip_frame= skip_frame;
1735 enc->skip_idct= skip_idct;
1736 enc->skip_loop_filter= skip_loop_filter;
1737 enc->error_resilience= error_resilience;
1738 enc->error_concealment= error_concealment;
1739 if (!codec ||
1740 avcodec_open(enc, codec) < 0)
1741 return -1;
1743 /* prepare audio output */
1744 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1745 wanted_spec.freq = enc->sample_rate;
1746 wanted_spec.format = AUDIO_S16SYS;
1747 wanted_spec.channels = enc->channels;
1748 wanted_spec.silence = 0;
1749 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1750 wanted_spec.callback = sdl_audio_callback;
1751 wanted_spec.userdata = is;
1752 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1753 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1754 return -1;
1756 is->audio_hw_buf_size = spec.size;
1757 is->audio_src_fmt= SAMPLE_FMT_S16;
1760 if(thread_count>1)
1761 avcodec_thread_init(enc, thread_count);
1762 enc->thread_count= thread_count;
1763 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1764 switch(enc->codec_type) {
1765 case CODEC_TYPE_AUDIO:
1766 is->audio_stream = stream_index;
1767 is->audio_st = ic->streams[stream_index];
1768 is->audio_buf_size = 0;
1769 is->audio_buf_index = 0;
1771 /* init averaging filter */
1772 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1773 is->audio_diff_avg_count = 0;
1774 /* since we do not have a precise anough audio fifo fullness,
1775 we correct audio sync only if larger than this threshold */
1776 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1778 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1779 packet_queue_init(&is->audioq);
1780 SDL_PauseAudio(0);
1781 break;
1782 case CODEC_TYPE_VIDEO:
1783 is->video_stream = stream_index;
1784 is->video_st = ic->streams[stream_index];
1786 is->frame_last_delay = 40e-3;
1787 is->frame_timer = (double)av_gettime() / 1000000.0;
1788 is->video_current_pts_time = av_gettime();
1790 packet_queue_init(&is->videoq);
1791 is->video_tid = SDL_CreateThread(video_thread, is);
1792 break;
1793 case CODEC_TYPE_SUBTITLE:
1794 is->subtitle_stream = stream_index;
1795 is->subtitle_st = ic->streams[stream_index];
1796 packet_queue_init(&is->subtitleq);
1798 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1799 break;
1800 default:
1801 break;
1803 return 0;
1806 static void stream_component_close(VideoState *is, int stream_index)
1808 AVFormatContext *ic = is->ic;
1809 AVCodecContext *enc;
1811 if (stream_index < 0 || stream_index >= ic->nb_streams)
1812 return;
1813 enc = ic->streams[stream_index]->codec;
1815 switch(enc->codec_type) {
1816 case CODEC_TYPE_AUDIO:
1817 packet_queue_abort(&is->audioq);
1819 SDL_CloseAudio();
1821 packet_queue_end(&is->audioq);
1822 if (is->reformat_ctx)
1823 av_audio_convert_free(is->reformat_ctx);
1824 break;
1825 case CODEC_TYPE_VIDEO:
1826 packet_queue_abort(&is->videoq);
1828 /* note: we also signal this mutex to make sure we deblock the
1829 video thread in all cases */
1830 SDL_LockMutex(is->pictq_mutex);
1831 SDL_CondSignal(is->pictq_cond);
1832 SDL_UnlockMutex(is->pictq_mutex);
1834 SDL_WaitThread(is->video_tid, NULL);
1836 packet_queue_end(&is->videoq);
1837 break;
1838 case CODEC_TYPE_SUBTITLE:
1839 packet_queue_abort(&is->subtitleq);
1841 /* note: we also signal this mutex to make sure we deblock the
1842 video thread in all cases */
1843 SDL_LockMutex(is->subpq_mutex);
1844 is->subtitle_stream_changed = 1;
1846 SDL_CondSignal(is->subpq_cond);
1847 SDL_UnlockMutex(is->subpq_mutex);
1849 SDL_WaitThread(is->subtitle_tid, NULL);
1851 packet_queue_end(&is->subtitleq);
1852 break;
1853 default:
1854 break;
1857 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1858 avcodec_close(enc);
1859 switch(enc->codec_type) {
1860 case CODEC_TYPE_AUDIO:
1861 is->audio_st = NULL;
1862 is->audio_stream = -1;
1863 break;
1864 case CODEC_TYPE_VIDEO:
1865 is->video_st = NULL;
1866 is->video_stream = -1;
1867 break;
1868 case CODEC_TYPE_SUBTITLE:
1869 is->subtitle_st = NULL;
1870 is->subtitle_stream = -1;
1871 break;
1872 default:
1873 break;
1877 static void dump_stream_info(const AVFormatContext *s)
1879 if (s->track != 0)
1880 fprintf(stderr, "Track: %d\n", s->track);
1881 if (s->title[0] != '\0')
1882 fprintf(stderr, "Title: %s\n", s->title);
1883 if (s->author[0] != '\0')
1884 fprintf(stderr, "Author: %s\n", s->author);
1885 if (s->copyright[0] != '\0')
1886 fprintf(stderr, "Copyright: %s\n", s->copyright);
1887 if (s->comment[0] != '\0')
1888 fprintf(stderr, "Comment: %s\n", s->comment);
1889 if (s->album[0] != '\0')
1890 fprintf(stderr, "Album: %s\n", s->album);
1891 if (s->year != 0)
1892 fprintf(stderr, "Year: %d\n", s->year);
1893 if (s->genre[0] != '\0')
1894 fprintf(stderr, "Genre: %s\n", s->genre);
1897 /* since we have only one decoding thread, we can use a global
1898 variable instead of a thread local variable */
1899 static VideoState *global_video_state;
1901 static int decode_interrupt_cb(void)
1903 return (global_video_state && global_video_state->abort_request);
1906 /* this thread gets the stream from the disk or the network */
1907 static int decode_thread(void *arg)
1909 VideoState *is = arg;
1910 AVFormatContext *ic;
1911 int err, i, ret, video_index, audio_index;
1912 AVPacket pkt1, *pkt = &pkt1;
1913 AVFormatParameters params, *ap = &params;
1915 video_index = -1;
1916 audio_index = -1;
1917 is->video_stream = -1;
1918 is->audio_stream = -1;
1919 is->subtitle_stream = -1;
1921 global_video_state = is;
1922 url_set_interrupt_cb(decode_interrupt_cb);
1924 memset(ap, 0, sizeof(*ap));
1926 ap->width = frame_width;
1927 ap->height= frame_height;
1928 ap->time_base= (AVRational){1, 25};
1929 ap->pix_fmt = frame_pix_fmt;
1931 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1932 if (err < 0) {
1933 print_error(is->filename, err);
1934 ret = -1;
1935 goto fail;
1937 is->ic = ic;
1939 if(genpts)
1940 ic->flags |= AVFMT_FLAG_GENPTS;
1942 err = av_find_stream_info(ic);
1943 if (err < 0) {
1944 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1945 ret = -1;
1946 goto fail;
1948 if(ic->pb)
1949 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1951 /* if seeking requested, we execute it */
1952 if (start_time != AV_NOPTS_VALUE) {
1953 int64_t timestamp;
1955 timestamp = start_time;
1956 /* add the stream start time */
1957 if (ic->start_time != AV_NOPTS_VALUE)
1958 timestamp += ic->start_time;
1959 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1960 if (ret < 0) {
1961 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1962 is->filename, (double)timestamp / AV_TIME_BASE);
1966 for(i = 0; i < ic->nb_streams; i++) {
1967 AVCodecContext *enc = ic->streams[i]->codec;
1968 ic->streams[i]->discard = AVDISCARD_ALL;
1969 switch(enc->codec_type) {
1970 case CODEC_TYPE_AUDIO:
1971 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1972 audio_index = i;
1973 break;
1974 case CODEC_TYPE_VIDEO:
1975 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1976 video_index = i;
1977 break;
1978 default:
1979 break;
1982 if (show_status) {
1983 dump_format(ic, 0, is->filename, 0);
1984 dump_stream_info(ic);
1987 /* open the streams */
1988 if (audio_index >= 0) {
1989 stream_component_open(is, audio_index);
1992 if (video_index >= 0) {
1993 stream_component_open(is, video_index);
1994 } else {
1995 if (!display_disable)
1996 is->show_audio = 1;
1999 if (is->video_stream < 0 && is->audio_stream < 0) {
2000 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2001 ret = -1;
2002 goto fail;
2005 for(;;) {
2006 if (is->abort_request)
2007 break;
2008 if (is->paused != is->last_paused) {
2009 is->last_paused = is->paused;
2010 if (is->paused)
2011 av_read_pause(ic);
2012 else
2013 av_read_play(ic);
2015 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2016 if (is->paused &&
2017 (!strcmp(ic->iformat->name, "rtsp") ||
2018 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2019 /* wait 10 ms to avoid trying to get another packet */
2020 /* XXX: horrible */
2021 SDL_Delay(10);
2022 continue;
2024 #endif
2025 if (is->seek_req) {
2026 int stream_index= -1;
2027 int64_t seek_target= is->seek_pos;
2029 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2030 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2031 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2033 if(stream_index>=0){
2034 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2037 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2038 if (ret < 0) {
2039 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2040 }else{
2041 if (is->audio_stream >= 0) {
2042 packet_queue_flush(&is->audioq);
2043 packet_queue_put(&is->audioq, &flush_pkt);
2045 if (is->subtitle_stream >= 0) {
2046 packet_queue_flush(&is->subtitleq);
2047 packet_queue_put(&is->subtitleq, &flush_pkt);
2049 if (is->video_stream >= 0) {
2050 packet_queue_flush(&is->videoq);
2051 packet_queue_put(&is->videoq, &flush_pkt);
2054 is->seek_req = 0;
2057 /* if the queue are full, no need to read more */
2058 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2059 is->videoq.size > MAX_VIDEOQ_SIZE ||
2060 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2061 url_feof(ic->pb)) {
2062 /* wait 10 ms */
2063 SDL_Delay(10);
2064 continue;
2066 ret = av_read_frame(ic, pkt);
2067 if (ret < 0) {
2068 if (url_ferror(ic->pb) == 0) {
2069 SDL_Delay(100); /* wait for user event */
2070 continue;
2071 } else
2072 break;
2074 if (pkt->stream_index == is->audio_stream) {
2075 packet_queue_put(&is->audioq, pkt);
2076 } else if (pkt->stream_index == is->video_stream) {
2077 packet_queue_put(&is->videoq, pkt);
2078 } else if (pkt->stream_index == is->subtitle_stream) {
2079 packet_queue_put(&is->subtitleq, pkt);
2080 } else {
2081 av_free_packet(pkt);
2084 /* wait until the end */
2085 while (!is->abort_request) {
2086 SDL_Delay(100);
2089 ret = 0;
2090 fail:
2091 /* disable interrupting */
2092 global_video_state = NULL;
2094 /* close each stream */
2095 if (is->audio_stream >= 0)
2096 stream_component_close(is, is->audio_stream);
2097 if (is->video_stream >= 0)
2098 stream_component_close(is, is->video_stream);
2099 if (is->subtitle_stream >= 0)
2100 stream_component_close(is, is->subtitle_stream);
2101 if (is->ic) {
2102 av_close_input_file(is->ic);
2103 is->ic = NULL; /* safety */
2105 url_set_interrupt_cb(NULL);
2107 if (ret != 0) {
2108 SDL_Event event;
2110 event.type = FF_QUIT_EVENT;
2111 event.user.data1 = is;
2112 SDL_PushEvent(&event);
2114 return 0;
2117 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2119 VideoState *is;
2121 is = av_mallocz(sizeof(VideoState));
2122 if (!is)
2123 return NULL;
2124 av_strlcpy(is->filename, filename, sizeof(is->filename));
2125 is->iformat = iformat;
2126 is->ytop = 0;
2127 is->xleft = 0;
2129 /* start video display */
2130 is->pictq_mutex = SDL_CreateMutex();
2131 is->pictq_cond = SDL_CreateCond();
2133 is->subpq_mutex = SDL_CreateMutex();
2134 is->subpq_cond = SDL_CreateCond();
2136 /* add the refresh timer to draw the picture */
2137 schedule_refresh(is, 40);
2139 is->av_sync_type = av_sync_type;
2140 is->parse_tid = SDL_CreateThread(decode_thread, is);
2141 if (!is->parse_tid) {
2142 av_free(is);
2143 return NULL;
2145 return is;
2148 static void stream_close(VideoState *is)
2150 VideoPicture *vp;
2151 int i;
2152 /* XXX: use a special url_shutdown call to abort parse cleanly */
2153 is->abort_request = 1;
2154 SDL_WaitThread(is->parse_tid, NULL);
2156 /* free all pictures */
2157 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2158 vp = &is->pictq[i];
2159 if (vp->bmp) {
2160 SDL_FreeYUVOverlay(vp->bmp);
2161 vp->bmp = NULL;
2164 SDL_DestroyMutex(is->pictq_mutex);
2165 SDL_DestroyCond(is->pictq_cond);
2166 SDL_DestroyMutex(is->subpq_mutex);
2167 SDL_DestroyCond(is->subpq_cond);
2170 static void stream_cycle_channel(VideoState *is, int codec_type)
2172 AVFormatContext *ic = is->ic;
2173 int start_index, stream_index;
2174 AVStream *st;
2176 if (codec_type == CODEC_TYPE_VIDEO)
2177 start_index = is->video_stream;
2178 else if (codec_type == CODEC_TYPE_AUDIO)
2179 start_index = is->audio_stream;
2180 else
2181 start_index = is->subtitle_stream;
2182 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2183 return;
2184 stream_index = start_index;
2185 for(;;) {
2186 if (++stream_index >= is->ic->nb_streams)
2188 if (codec_type == CODEC_TYPE_SUBTITLE)
2190 stream_index = -1;
2191 goto the_end;
2192 } else
2193 stream_index = 0;
2195 if (stream_index == start_index)
2196 return;
2197 st = ic->streams[stream_index];
2198 if (st->codec->codec_type == codec_type) {
2199 /* check that parameters are OK */
2200 switch(codec_type) {
2201 case CODEC_TYPE_AUDIO:
2202 if (st->codec->sample_rate != 0 &&
2203 st->codec->channels != 0)
2204 goto the_end;
2205 break;
2206 case CODEC_TYPE_VIDEO:
2207 case CODEC_TYPE_SUBTITLE:
2208 goto the_end;
2209 default:
2210 break;
2214 the_end:
2215 stream_component_close(is, start_index);
2216 stream_component_open(is, stream_index);
2220 static void toggle_full_screen(void)
2222 is_full_screen = !is_full_screen;
2223 if (!fs_screen_width) {
2224 /* use default SDL method */
2225 // SDL_WM_ToggleFullScreen(screen);
2227 video_open(cur_stream);
2230 static void toggle_pause(void)
2232 if (cur_stream)
2233 stream_pause(cur_stream);
2234 step = 0;
2237 static void step_to_next_frame(void)
2239 if (cur_stream) {
2240 /* if the stream is paused unpause it, then step */
2241 if (cur_stream->paused)
2242 stream_pause(cur_stream);
2244 step = 1;
2247 static void do_exit(void)
2249 if (cur_stream) {
2250 stream_close(cur_stream);
2251 cur_stream = NULL;
2253 if (show_status)
2254 printf("\n");
2255 SDL_Quit();
2256 exit(0);
2259 static void toggle_audio_display(void)
2261 if (cur_stream) {
2262 cur_stream->show_audio = !cur_stream->show_audio;
2266 /* handle an event sent by the GUI */
2267 static void event_loop(void)
2269 SDL_Event event;
2270 double incr, pos, frac;
2272 for(;;) {
2273 SDL_WaitEvent(&event);
2274 switch(event.type) {
2275 case SDL_KEYDOWN:
2276 switch(event.key.keysym.sym) {
2277 case SDLK_ESCAPE:
2278 case SDLK_q:
2279 do_exit();
2280 break;
2281 case SDLK_f:
2282 toggle_full_screen();
2283 break;
2284 case SDLK_p:
2285 case SDLK_SPACE:
2286 toggle_pause();
2287 break;
2288 case SDLK_s: //S: Step to next frame
2289 step_to_next_frame();
2290 break;
2291 case SDLK_a:
2292 if (cur_stream)
2293 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2294 break;
2295 case SDLK_v:
2296 if (cur_stream)
2297 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2298 break;
2299 case SDLK_t:
2300 if (cur_stream)
2301 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2302 break;
2303 case SDLK_w:
2304 toggle_audio_display();
2305 break;
2306 case SDLK_LEFT:
2307 incr = -10.0;
2308 goto do_seek;
2309 case SDLK_RIGHT:
2310 incr = 10.0;
2311 goto do_seek;
2312 case SDLK_UP:
2313 incr = 60.0;
2314 goto do_seek;
2315 case SDLK_DOWN:
2316 incr = -60.0;
2317 do_seek:
2318 if (cur_stream) {
2319 if (seek_by_bytes) {
2320 pos = url_ftell(cur_stream->ic->pb);
2321 if (cur_stream->ic->bit_rate)
2322 incr *= cur_stream->ic->bit_rate / 60.0;
2323 else
2324 incr *= 180000.0;
2325 pos += incr;
2326 stream_seek(cur_stream, pos, incr);
2327 } else {
2328 pos = get_master_clock(cur_stream);
2329 pos += incr;
2330 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2333 break;
2334 default:
2335 break;
2337 break;
2338 case SDL_MOUSEBUTTONDOWN:
2339 if (cur_stream) {
2340 int ns, hh, mm, ss;
2341 int tns, thh, tmm, tss;
2342 tns = cur_stream->ic->duration/1000000LL;
2343 thh = tns/3600;
2344 tmm = (tns%3600)/60;
2345 tss = (tns%60);
2346 frac = (double)event.button.x/(double)cur_stream->width;
2347 ns = frac*tns;
2348 hh = ns/3600;
2349 mm = (ns%3600)/60;
2350 ss = (ns%60);
2351 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2352 hh, mm, ss, thh, tmm, tss);
2353 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2355 break;
2356 case SDL_VIDEORESIZE:
2357 if (cur_stream) {
2358 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2359 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2360 screen_width = cur_stream->width = event.resize.w;
2361 screen_height= cur_stream->height= event.resize.h;
2363 break;
2364 case SDL_QUIT:
2365 case FF_QUIT_EVENT:
2366 do_exit();
2367 break;
2368 case FF_ALLOC_EVENT:
2369 video_open(event.user.data1);
2370 alloc_picture(event.user.data1);
2371 break;
2372 case FF_REFRESH_EVENT:
2373 video_refresh_timer(event.user.data1);
2374 break;
2375 default:
2376 break;
2381 static void opt_frame_size(const char *arg)
2383 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2384 fprintf(stderr, "Incorrect frame size\n");
2385 exit(1);
2387 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2388 fprintf(stderr, "Frame size must be a multiple of 2\n");
2389 exit(1);
2393 static int opt_width(const char *opt, const char *arg)
2395 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2396 return 0;
2399 static int opt_height(const char *opt, const char *arg)
2401 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2402 return 0;
2405 static void opt_format(const char *arg)
2407 file_iformat = av_find_input_format(arg);
2408 if (!file_iformat) {
2409 fprintf(stderr, "Unknown input format: %s\n", arg);
2410 exit(1);
2414 static void opt_frame_pix_fmt(const char *arg)
2416 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2419 static int opt_sync(const char *opt, const char *arg)
2421 if (!strcmp(arg, "audio"))
2422 av_sync_type = AV_SYNC_AUDIO_MASTER;
2423 else if (!strcmp(arg, "video"))
2424 av_sync_type = AV_SYNC_VIDEO_MASTER;
2425 else if (!strcmp(arg, "ext"))
2426 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2427 else {
2428 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2429 exit(1);
2431 return 0;
2434 static int opt_seek(const char *opt, const char *arg)
2436 start_time = parse_time_or_die(opt, arg, 1);
2437 return 0;
2440 static int opt_debug(const char *opt, const char *arg)
2442 av_log_set_level(99);
2443 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2444 return 0;
2447 static int opt_vismv(const char *opt, const char *arg)
2449 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2450 return 0;
2453 static int opt_thread_count(const char *opt, const char *arg)
2455 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2456 #if !defined(HAVE_THREADS)
2457 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2458 #endif
2459 return 0;
2462 static const OptionDef options[] = {
2463 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2464 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2465 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2466 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2467 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2468 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2469 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2470 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2471 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2472 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2473 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2474 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2475 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2476 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2477 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2478 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2479 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2480 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2481 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2482 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2483 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2484 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2485 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2486 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2487 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2488 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2489 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2490 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2491 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2492 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2493 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2494 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2495 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2496 { NULL, },
2499 static void show_help(void)
2501 printf("usage: ffplay [options] input_file\n"
2502 "Simple media player\n");
2503 printf("\n");
2504 show_help_options(options, "Main options:\n",
2505 OPT_EXPERT, 0);
2506 show_help_options(options, "\nAdvanced options:\n",
2507 OPT_EXPERT, OPT_EXPERT);
2508 printf("\nWhile playing:\n"
2509 "q, ESC quit\n"
2510 "f toggle full screen\n"
2511 "p, SPC pause\n"
2512 "a cycle audio channel\n"
2513 "v cycle video channel\n"
2514 "t cycle subtitle channel\n"
2515 "w show audio waves\n"
2516 "left/right seek backward/forward 10 seconds\n"
2517 "down/up seek backward/forward 1 minute\n"
2518 "mouse click seek to percentage in file corresponding to fraction of width\n"
2522 static void opt_input_file(const char *filename)
2524 if (!strcmp(filename, "-"))
2525 filename = "pipe:";
2526 input_filename = filename;
2529 /* Called from the main */
2530 int main(int argc, char **argv)
2532 int flags;
2534 /* register all codecs, demux and protocols */
2535 avcodec_register_all();
2536 avdevice_register_all();
2537 av_register_all();
2539 show_banner();
2541 parse_options(argc, argv, options, opt_input_file);
2543 if (!input_filename) {
2544 show_help();
2545 exit(1);
2548 if (display_disable) {
2549 video_disable = 1;
2551 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2552 #if !defined(__MINGW32__) && !defined(__APPLE__)
2553 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2554 #endif
2555 if (SDL_Init (flags)) {
2556 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2557 exit(1);
2560 if (!display_disable) {
2561 #ifdef HAVE_SDL_VIDEO_SIZE
2562 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2563 fs_screen_width = vi->current_w;
2564 fs_screen_height = vi->current_h;
2565 #endif
2568 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2569 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2570 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2571 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2573 av_init_packet(&flush_pkt);
2574 flush_pkt.data= "FLUSH";
2576 cur_stream = stream_open(input_filename, file_iformat);
2578 event_loop();
2580 /* never returns */
2582 return 0;