Cast a __u64 variable to uint64_t so that it can be printed without warnings
[ffmpeg-lucabe.git] / ffplay.c
blobdf6be717c362da164d4c0c94f8be215f5c6bce89
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
116 int audio_stream;
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int wanted_subtitle_stream= -1;
195 static int seek_by_bytes;
196 static int display_disable;
197 static int show_status;
198 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
199 static int64_t start_time = AV_NOPTS_VALUE;
200 static int debug = 0;
201 static int debug_mv = 0;
202 static int step = 0;
203 static int thread_count = 1;
204 static int workaround_bugs = 1;
205 static int fast = 0;
206 static int genpts = 0;
207 static int lowres = 0;
208 static int idct = FF_IDCT_AUTO;
209 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
212 static int error_recognition = FF_ER_CAREFUL;
213 static int error_concealment = 3;
214 static int decoder_reorder_pts= 0;
216 /* current context */
217 static int is_full_screen;
218 static VideoState *cur_stream;
219 static int64_t audio_callback_time;
221 static AVPacket flush_pkt;
223 #define FF_ALLOC_EVENT (SDL_USEREVENT)
224 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
225 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
227 static SDL_Surface *screen;
229 /* packet queue handling */
230 static void packet_queue_init(PacketQueue *q)
232 memset(q, 0, sizeof(PacketQueue));
233 q->mutex = SDL_CreateMutex();
234 q->cond = SDL_CreateCond();
237 static void packet_queue_flush(PacketQueue *q)
239 AVPacketList *pkt, *pkt1;
241 SDL_LockMutex(q->mutex);
242 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
243 pkt1 = pkt->next;
244 av_free_packet(&pkt->pkt);
245 av_freep(&pkt);
247 q->last_pkt = NULL;
248 q->first_pkt = NULL;
249 q->nb_packets = 0;
250 q->size = 0;
251 SDL_UnlockMutex(q->mutex);
254 static void packet_queue_end(PacketQueue *q)
256 packet_queue_flush(q);
257 SDL_DestroyMutex(q->mutex);
258 SDL_DestroyCond(q->cond);
261 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
263 AVPacketList *pkt1;
265 /* duplicate the packet */
266 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
267 return -1;
269 pkt1 = av_malloc(sizeof(AVPacketList));
270 if (!pkt1)
271 return -1;
272 pkt1->pkt = *pkt;
273 pkt1->next = NULL;
276 SDL_LockMutex(q->mutex);
278 if (!q->last_pkt)
280 q->first_pkt = pkt1;
281 else
282 q->last_pkt->next = pkt1;
283 q->last_pkt = pkt1;
284 q->nb_packets++;
285 q->size += pkt1->pkt.size;
286 /* XXX: should duplicate packet data in DV case */
287 SDL_CondSignal(q->cond);
289 SDL_UnlockMutex(q->mutex);
290 return 0;
293 static void packet_queue_abort(PacketQueue *q)
295 SDL_LockMutex(q->mutex);
297 q->abort_request = 1;
299 SDL_CondSignal(q->cond);
301 SDL_UnlockMutex(q->mutex);
304 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
305 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
307 AVPacketList *pkt1;
308 int ret;
310 SDL_LockMutex(q->mutex);
312 for(;;) {
313 if (q->abort_request) {
314 ret = -1;
315 break;
318 pkt1 = q->first_pkt;
319 if (pkt1) {
320 q->first_pkt = pkt1->next;
321 if (!q->first_pkt)
322 q->last_pkt = NULL;
323 q->nb_packets--;
324 q->size -= pkt1->pkt.size;
325 *pkt = pkt1->pkt;
326 av_free(pkt1);
327 ret = 1;
328 break;
329 } else if (!block) {
330 ret = 0;
331 break;
332 } else {
333 SDL_CondWait(q->cond, q->mutex);
336 SDL_UnlockMutex(q->mutex);
337 return ret;
340 static inline void fill_rectangle(SDL_Surface *screen,
341 int x, int y, int w, int h, int color)
343 SDL_Rect rect;
344 rect.x = x;
345 rect.y = y;
346 rect.w = w;
347 rect.h = h;
348 SDL_FillRect(screen, &rect, color);
351 #if 0
352 /* draw only the border of a rectangle */
353 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
355 int w1, w2, h1, h2;
357 /* fill the background */
358 w1 = x;
359 if (w1 < 0)
360 w1 = 0;
361 w2 = s->width - (x + w);
362 if (w2 < 0)
363 w2 = 0;
364 h1 = y;
365 if (h1 < 0)
366 h1 = 0;
367 h2 = s->height - (y + h);
368 if (h2 < 0)
369 h2 = 0;
370 fill_rectangle(screen,
371 s->xleft, s->ytop,
372 w1, s->height,
373 color);
374 fill_rectangle(screen,
375 s->xleft + s->width - w2, s->ytop,
376 w2, s->height,
377 color);
378 fill_rectangle(screen,
379 s->xleft + w1, s->ytop,
380 s->width - w1 - w2, h1,
381 color);
382 fill_rectangle(screen,
383 s->xleft + w1, s->ytop + s->height - h2,
384 s->width - w1 - w2, h2,
385 color);
387 #endif
391 #define SCALEBITS 10
392 #define ONE_HALF (1 << (SCALEBITS - 1))
393 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
395 #define RGB_TO_Y_CCIR(r, g, b) \
396 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
397 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
399 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
400 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
401 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
403 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
404 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
405 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410 #define RGBA_IN(r, g, b, a, s)\
412 unsigned int v = ((const uint32_t *)(s))[0];\
413 a = (v >> 24) & 0xff;\
414 r = (v >> 16) & 0xff;\
415 g = (v >> 8) & 0xff;\
416 b = v & 0xff;\
419 #define YUVA_IN(y, u, v, a, s, pal)\
421 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422 a = (val >> 24) & 0xff;\
423 y = (val >> 16) & 0xff;\
424 u = (val >> 8) & 0xff;\
425 v = val & 0xff;\
428 #define YUVA_OUT(d, y, u, v, a)\
430 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 #define BPP 1
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 int wrap, wrap3, width2, skip2;
439 int y, u, v, a, u1, v1, a1, w, h;
440 uint8_t *lum, *cb, *cr;
441 const uint8_t *p;
442 const uint32_t *pal;
443 int dstx, dsty, dstw, dsth;
445 dstw = av_clip(rect->w, 0, imgw);
446 dsth = av_clip(rect->h, 0, imgh);
447 dstx = av_clip(rect->x, 0, imgw - dstw);
448 dsty = av_clip(rect->y, 0, imgh - dsth);
449 lum = dst->data[0] + dsty * dst->linesize[0];
450 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454 skip2 = dstx >> 1;
455 wrap = dst->linesize[0];
456 wrap3 = rect->pict.linesize[0];
457 p = rect->pict.data[0];
458 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
460 if (dsty & 1) {
461 lum += dstx;
462 cb += skip2;
463 cr += skip2;
465 if (dstx & 1) {
466 YUVA_IN(y, u, v, a, p, pal);
467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470 cb++;
471 cr++;
472 lum++;
473 p += BPP;
475 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476 YUVA_IN(y, u, v, a, p, pal);
477 u1 = u;
478 v1 = v;
479 a1 = a;
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482 YUVA_IN(y, u, v, a, p + BPP, pal);
483 u1 += u;
484 v1 += v;
485 a1 += a;
486 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489 cb++;
490 cr++;
491 p += 2 * BPP;
492 lum += 2;
494 if (w) {
495 YUVA_IN(y, u, v, a, p, pal);
496 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499 p++;
500 lum++;
502 p += wrap3 - dstw * BPP;
503 lum += wrap - dstw - dstx;
504 cb += dst->linesize[1] - width2 - skip2;
505 cr += dst->linesize[2] - width2 - skip2;
507 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508 lum += dstx;
509 cb += skip2;
510 cr += skip2;
512 if (dstx & 1) {
513 YUVA_IN(y, u, v, a, p, pal);
514 u1 = u;
515 v1 = v;
516 a1 = a;
517 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518 p += wrap3;
519 lum += wrap;
520 YUVA_IN(y, u, v, a, p, pal);
521 u1 += u;
522 v1 += v;
523 a1 += a;
524 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527 cb++;
528 cr++;
529 p += -wrap3 + BPP;
530 lum += -wrap + 1;
532 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533 YUVA_IN(y, u, v, a, p, pal);
534 u1 = u;
535 v1 = v;
536 a1 = a;
537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539 YUVA_IN(y, u, v, a, p + BPP, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544 p += wrap3;
545 lum += wrap;
547 YUVA_IN(y, u, v, a, p, pal);
548 u1 += u;
549 v1 += v;
550 a1 += a;
551 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553 YUVA_IN(y, u, v, a, p + BPP, pal);
554 u1 += u;
555 v1 += v;
556 a1 += a;
557 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562 cb++;
563 cr++;
564 p += -wrap3 + 2 * BPP;
565 lum += -wrap + 2;
567 if (w) {
568 YUVA_IN(y, u, v, a, p, pal);
569 u1 = u;
570 v1 = v;
571 a1 = a;
572 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573 p += wrap3;
574 lum += wrap;
575 YUVA_IN(y, u, v, a, p, pal);
576 u1 += u;
577 v1 += v;
578 a1 += a;
579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582 cb++;
583 cr++;
584 p += -wrap3 + BPP;
585 lum += -wrap + 1;
587 p += wrap3 + (wrap3 - dstw * BPP);
588 lum += wrap + (wrap - dstw - dstx);
589 cb += dst->linesize[1] - width2 - skip2;
590 cr += dst->linesize[2] - width2 - skip2;
592 /* handle odd height */
593 if (h) {
594 lum += dstx;
595 cb += skip2;
596 cr += skip2;
598 if (dstx & 1) {
599 YUVA_IN(y, u, v, a, p, pal);
600 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603 cb++;
604 cr++;
605 lum++;
606 p += BPP;
608 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609 YUVA_IN(y, u, v, a, p, pal);
610 u1 = u;
611 v1 = v;
612 a1 = a;
613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 YUVA_IN(y, u, v, a, p + BPP, pal);
616 u1 += u;
617 v1 += v;
618 a1 += a;
619 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622 cb++;
623 cr++;
624 p += 2 * BPP;
625 lum += 2;
627 if (w) {
628 YUVA_IN(y, u, v, a, p, pal);
629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636 static void free_subpicture(SubPicture *sp)
638 int i;
640 for (i = 0; i < sp->sub.num_rects; i++)
642 av_freep(&sp->sub.rects[i]->pict.data[0]);
643 av_freep(&sp->sub.rects[i]->pict.data[1]);
644 av_freep(&sp->sub.rects[i]);
647 av_free(sp->sub.rects);
649 memset(&sp->sub, 0, sizeof(AVSubtitle));
652 static void video_image_display(VideoState *is)
654 VideoPicture *vp;
655 SubPicture *sp;
656 AVPicture pict;
657 float aspect_ratio;
658 int width, height, x, y;
659 SDL_Rect rect;
660 int i;
662 vp = &is->pictq[is->pictq_rindex];
663 if (vp->bmp) {
664 /* XXX: use variable in the frame */
665 if (is->video_st->sample_aspect_ratio.num)
666 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667 else if (is->video_st->codec->sample_aspect_ratio.num)
668 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669 else
670 aspect_ratio = 0;
671 if (aspect_ratio <= 0.0)
672 aspect_ratio = 1.0;
673 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
674 /* if an active format is indicated, then it overrides the
675 mpeg format */
676 #if 0
677 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
678 is->dtg_active_format = is->video_st->codec->dtg_active_format;
679 printf("dtg_active_format=%d\n", is->dtg_active_format);
681 #endif
682 #if 0
683 switch(is->video_st->codec->dtg_active_format) {
684 case FF_DTG_AFD_SAME:
685 default:
686 /* nothing to do */
687 break;
688 case FF_DTG_AFD_4_3:
689 aspect_ratio = 4.0 / 3.0;
690 break;
691 case FF_DTG_AFD_16_9:
692 aspect_ratio = 16.0 / 9.0;
693 break;
694 case FF_DTG_AFD_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_4_3_SP_14_9:
698 aspect_ratio = 14.0 / 9.0;
699 break;
700 case FF_DTG_AFD_16_9_SP_14_9:
701 aspect_ratio = 14.0 / 9.0;
702 break;
703 case FF_DTG_AFD_SP_4_3:
704 aspect_ratio = 4.0 / 3.0;
705 break;
707 #endif
709 if (is->subtitle_st)
711 if (is->subpq_size > 0)
713 sp = &is->subpq[is->subpq_rindex];
715 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
717 SDL_LockYUVOverlay (vp->bmp);
719 pict.data[0] = vp->bmp->pixels[0];
720 pict.data[1] = vp->bmp->pixels[2];
721 pict.data[2] = vp->bmp->pixels[1];
723 pict.linesize[0] = vp->bmp->pitches[0];
724 pict.linesize[1] = vp->bmp->pitches[2];
725 pict.linesize[2] = vp->bmp->pitches[1];
727 for (i = 0; i < sp->sub.num_rects; i++)
728 blend_subrect(&pict, sp->sub.rects[i],
729 vp->bmp->w, vp->bmp->h);
731 SDL_UnlockYUVOverlay (vp->bmp);
737 /* XXX: we suppose the screen has a 1.0 pixel ratio */
738 height = is->height;
739 width = ((int)rint(height * aspect_ratio)) & ~1;
740 if (width > is->width) {
741 width = is->width;
742 height = ((int)rint(width / aspect_ratio)) & ~1;
744 x = (is->width - width) / 2;
745 y = (is->height - height) / 2;
746 if (!is->no_background) {
747 /* fill the background */
748 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749 } else {
750 is->no_background = 0;
752 rect.x = is->xleft + x;
753 rect.y = is->ytop + y;
754 rect.w = width;
755 rect.h = height;
756 SDL_DisplayYUVOverlay(vp->bmp, &rect);
757 } else {
758 #if 0
759 fill_rectangle(screen,
760 is->xleft, is->ytop, is->width, is->height,
761 QERGB(0x00, 0x00, 0x00));
762 #endif
766 static inline int compute_mod(int a, int b)
768 a = a % b;
769 if (a >= 0)
770 return a;
771 else
772 return a + b;
775 static void video_audio_display(VideoState *s)
777 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
778 int ch, channels, h, h2, bgcolor, fgcolor;
779 int16_t time_diff;
781 /* compute display index : center on currently output samples */
782 channels = s->audio_st->codec->channels;
783 nb_display_channels = channels;
784 if (!s->paused) {
785 n = 2 * channels;
786 delay = audio_write_get_buf_size(s);
787 delay /= n;
789 /* to be more precise, we take into account the time spent since
790 the last buffer computation */
791 if (audio_callback_time) {
792 time_diff = av_gettime() - audio_callback_time;
793 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
796 delay -= s->width / 2;
797 if (delay < s->width)
798 delay = s->width;
800 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
802 h= INT_MIN;
803 for(i=0; i<1000; i+=channels){
804 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
805 int a= s->sample_array[idx];
806 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
807 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
808 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
809 int score= a-d;
810 if(h<score && (b^c)<0){
811 h= score;
812 i_start= idx;
816 s->last_i_start = i_start;
817 } else {
818 i_start = s->last_i_start;
821 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
822 fill_rectangle(screen,
823 s->xleft, s->ytop, s->width, s->height,
824 bgcolor);
826 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
828 /* total height for one channel */
829 h = s->height / nb_display_channels;
830 /* graph height / 2 */
831 h2 = (h * 9) / 20;
832 for(ch = 0;ch < nb_display_channels; ch++) {
833 i = i_start + ch;
834 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
835 for(x = 0; x < s->width; x++) {
836 y = (s->sample_array[i] * h2) >> 15;
837 if (y < 0) {
838 y = -y;
839 ys = y1 - y;
840 } else {
841 ys = y1;
843 fill_rectangle(screen,
844 s->xleft + x, ys, 1, y,
845 fgcolor);
846 i += channels;
847 if (i >= SAMPLE_ARRAY_SIZE)
848 i -= SAMPLE_ARRAY_SIZE;
852 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
854 for(ch = 1;ch < nb_display_channels; ch++) {
855 y = s->ytop + ch * h;
856 fill_rectangle(screen,
857 s->xleft, y, s->width, 1,
858 fgcolor);
860 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
863 static int video_open(VideoState *is){
864 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
865 int w,h;
867 if(is_full_screen) flags |= SDL_FULLSCREEN;
868 else flags |= SDL_RESIZABLE;
870 if (is_full_screen && fs_screen_width) {
871 w = fs_screen_width;
872 h = fs_screen_height;
873 } else if(!is_full_screen && screen_width){
874 w = screen_width;
875 h = screen_height;
876 }else if (is->video_st && is->video_st->codec->width){
877 w = is->video_st->codec->width;
878 h = is->video_st->codec->height;
879 } else {
880 w = 640;
881 h = 480;
883 #ifndef __APPLE__
884 screen = SDL_SetVideoMode(w, h, 0, flags);
885 #else
886 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
887 screen = SDL_SetVideoMode(w, h, 24, flags);
888 #endif
889 if (!screen) {
890 fprintf(stderr, "SDL: could not set video mode - exiting\n");
891 return -1;
893 SDL_WM_SetCaption("FFplay", "FFplay");
895 is->width = screen->w;
896 is->height = screen->h;
898 return 0;
901 /* display the current picture, if any */
902 static void video_display(VideoState *is)
904 if(!screen)
905 video_open(cur_stream);
906 if (is->audio_st && is->show_audio)
907 video_audio_display(is);
908 else if (is->video_st)
909 video_image_display(is);
912 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
914 SDL_Event event;
915 event.type = FF_REFRESH_EVENT;
916 event.user.data1 = opaque;
917 SDL_PushEvent(&event);
918 return 0; /* 0 means stop timer */
921 /* schedule a video refresh in 'delay' ms */
922 static void schedule_refresh(VideoState *is, int delay)
924 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
925 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
928 /* get the current audio clock value */
929 static double get_audio_clock(VideoState *is)
931 double pts;
932 int hw_buf_size, bytes_per_sec;
933 pts = is->audio_clock;
934 hw_buf_size = audio_write_get_buf_size(is);
935 bytes_per_sec = 0;
936 if (is->audio_st) {
937 bytes_per_sec = is->audio_st->codec->sample_rate *
938 2 * is->audio_st->codec->channels;
940 if (bytes_per_sec)
941 pts -= (double)hw_buf_size / bytes_per_sec;
942 return pts;
945 /* get the current video clock value */
946 static double get_video_clock(VideoState *is)
948 double delta;
949 if (is->paused) {
950 delta = 0;
951 } else {
952 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
954 return is->video_current_pts + delta;
957 /* get the current external clock value */
958 static double get_external_clock(VideoState *is)
960 int64_t ti;
961 ti = av_gettime();
962 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
965 /* get the current master clock value */
966 static double get_master_clock(VideoState *is)
968 double val;
970 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
971 if (is->video_st)
972 val = get_video_clock(is);
973 else
974 val = get_audio_clock(is);
975 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
976 if (is->audio_st)
977 val = get_audio_clock(is);
978 else
979 val = get_video_clock(is);
980 } else {
981 val = get_external_clock(is);
983 return val;
986 /* seek in the stream */
987 static void stream_seek(VideoState *is, int64_t pos, int rel)
989 if (!is->seek_req) {
990 is->seek_pos = pos;
991 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
992 if (seek_by_bytes)
993 is->seek_flags |= AVSEEK_FLAG_BYTE;
994 is->seek_req = 1;
998 /* pause or resume the video */
999 static void stream_pause(VideoState *is)
1001 is->paused = !is->paused;
1002 if (!is->paused) {
1003 is->video_current_pts = get_video_clock(is);
1004 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1008 /* called to display each frame */
1009 static void video_refresh_timer(void *opaque)
1011 VideoState *is = opaque;
1012 VideoPicture *vp;
1013 double actual_delay, delay, sync_threshold, ref_clock, diff;
1015 SubPicture *sp, *sp2;
1017 if (is->video_st) {
1018 if (is->pictq_size == 0) {
1019 /* if no picture, need to wait */
1020 schedule_refresh(is, 1);
1021 } else {
1022 /* dequeue the picture */
1023 vp = &is->pictq[is->pictq_rindex];
1025 /* update current video pts */
1026 is->video_current_pts = vp->pts;
1027 is->video_current_pts_time = av_gettime();
1029 /* compute nominal delay */
1030 delay = vp->pts - is->frame_last_pts;
1031 if (delay <= 0 || delay >= 10.0) {
1032 /* if incorrect delay, use previous one */
1033 delay = is->frame_last_delay;
1035 is->frame_last_delay = delay;
1036 is->frame_last_pts = vp->pts;
1038 /* update delay to follow master synchronisation source */
1039 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1040 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1041 /* if video is slave, we try to correct big delays by
1042 duplicating or deleting a frame */
1043 ref_clock = get_master_clock(is);
1044 diff = vp->pts - ref_clock;
1046 /* skip or repeat frame. We take into account the
1047 delay to compute the threshold. I still don't know
1048 if it is the best guess */
1049 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1050 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1051 if (diff <= -sync_threshold)
1052 delay = 0;
1053 else if (diff >= sync_threshold)
1054 delay = 2 * delay;
1058 is->frame_timer += delay;
1059 /* compute the REAL delay (we need to do that to avoid
1060 long term errors */
1061 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1062 if (actual_delay < 0.010) {
1063 /* XXX: should skip picture */
1064 actual_delay = 0.010;
1066 /* launch timer for next picture */
1067 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1069 #if defined(DEBUG_SYNC)
1070 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1071 delay, actual_delay, vp->pts, -diff);
1072 #endif
1074 if(is->subtitle_st) {
1075 if (is->subtitle_stream_changed) {
1076 SDL_LockMutex(is->subpq_mutex);
1078 while (is->subpq_size) {
1079 free_subpicture(&is->subpq[is->subpq_rindex]);
1081 /* update queue size and signal for next picture */
1082 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1083 is->subpq_rindex = 0;
1085 is->subpq_size--;
1087 is->subtitle_stream_changed = 0;
1089 SDL_CondSignal(is->subpq_cond);
1090 SDL_UnlockMutex(is->subpq_mutex);
1091 } else {
1092 if (is->subpq_size > 0) {
1093 sp = &is->subpq[is->subpq_rindex];
1095 if (is->subpq_size > 1)
1096 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1097 else
1098 sp2 = NULL;
1100 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1101 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1103 free_subpicture(sp);
1105 /* update queue size and signal for next picture */
1106 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1107 is->subpq_rindex = 0;
1109 SDL_LockMutex(is->subpq_mutex);
1110 is->subpq_size--;
1111 SDL_CondSignal(is->subpq_cond);
1112 SDL_UnlockMutex(is->subpq_mutex);
1118 /* display picture */
1119 video_display(is);
1121 /* update queue size and signal for next picture */
1122 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1123 is->pictq_rindex = 0;
1125 SDL_LockMutex(is->pictq_mutex);
1126 is->pictq_size--;
1127 SDL_CondSignal(is->pictq_cond);
1128 SDL_UnlockMutex(is->pictq_mutex);
1130 } else if (is->audio_st) {
1131 /* draw the next audio frame */
1133 schedule_refresh(is, 40);
1135 /* if only audio stream, then display the audio bars (better
1136 than nothing, just to test the implementation */
1138 /* display picture */
1139 video_display(is);
1140 } else {
1141 schedule_refresh(is, 100);
1143 if (show_status) {
1144 static int64_t last_time;
1145 int64_t cur_time;
1146 int aqsize, vqsize, sqsize;
1147 double av_diff;
1149 cur_time = av_gettime();
1150 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1151 aqsize = 0;
1152 vqsize = 0;
1153 sqsize = 0;
1154 if (is->audio_st)
1155 aqsize = is->audioq.size;
1156 if (is->video_st)
1157 vqsize = is->videoq.size;
1158 if (is->subtitle_st)
1159 sqsize = is->subtitleq.size;
1160 av_diff = 0;
1161 if (is->audio_st && is->video_st)
1162 av_diff = get_audio_clock(is) - get_video_clock(is);
1163 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1164 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1165 fflush(stdout);
1166 last_time = cur_time;
1171 /* allocate a picture (needs to do that in main thread to avoid
1172 potential locking problems */
1173 static void alloc_picture(void *opaque)
1175 VideoState *is = opaque;
1176 VideoPicture *vp;
1178 vp = &is->pictq[is->pictq_windex];
1180 if (vp->bmp)
1181 SDL_FreeYUVOverlay(vp->bmp);
1183 #if 0
1184 /* XXX: use generic function */
1185 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1186 switch(is->video_st->codec->pix_fmt) {
1187 case PIX_FMT_YUV420P:
1188 case PIX_FMT_YUV422P:
1189 case PIX_FMT_YUV444P:
1190 case PIX_FMT_YUYV422:
1191 case PIX_FMT_YUV410P:
1192 case PIX_FMT_YUV411P:
1193 is_yuv = 1;
1194 break;
1195 default:
1196 is_yuv = 0;
1197 break;
1199 #endif
1200 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1201 is->video_st->codec->height,
1202 SDL_YV12_OVERLAY,
1203 screen);
1204 vp->width = is->video_st->codec->width;
1205 vp->height = is->video_st->codec->height;
1207 SDL_LockMutex(is->pictq_mutex);
1208 vp->allocated = 1;
1209 SDL_CondSignal(is->pictq_cond);
1210 SDL_UnlockMutex(is->pictq_mutex);
1215 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1217 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1219 VideoPicture *vp;
1220 int dst_pix_fmt;
1221 AVPicture pict;
1222 static struct SwsContext *img_convert_ctx;
1224 /* wait until we have space to put a new picture */
1225 SDL_LockMutex(is->pictq_mutex);
1226 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1227 !is->videoq.abort_request) {
1228 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1230 SDL_UnlockMutex(is->pictq_mutex);
1232 if (is->videoq.abort_request)
1233 return -1;
1235 vp = &is->pictq[is->pictq_windex];
1237 /* alloc or resize hardware picture buffer */
1238 if (!vp->bmp ||
1239 vp->width != is->video_st->codec->width ||
1240 vp->height != is->video_st->codec->height) {
1241 SDL_Event event;
1243 vp->allocated = 0;
1245 /* the allocation must be done in the main thread to avoid
1246 locking problems */
1247 event.type = FF_ALLOC_EVENT;
1248 event.user.data1 = is;
1249 SDL_PushEvent(&event);
1251 /* wait until the picture is allocated */
1252 SDL_LockMutex(is->pictq_mutex);
1253 while (!vp->allocated && !is->videoq.abort_request) {
1254 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1256 SDL_UnlockMutex(is->pictq_mutex);
1258 if (is->videoq.abort_request)
1259 return -1;
1262 /* if the frame is not skipped, then display it */
1263 if (vp->bmp) {
1264 /* get a pointer on the bitmap */
1265 SDL_LockYUVOverlay (vp->bmp);
1267 dst_pix_fmt = PIX_FMT_YUV420P;
1268 pict.data[0] = vp->bmp->pixels[0];
1269 pict.data[1] = vp->bmp->pixels[2];
1270 pict.data[2] = vp->bmp->pixels[1];
1272 pict.linesize[0] = vp->bmp->pitches[0];
1273 pict.linesize[1] = vp->bmp->pitches[2];
1274 pict.linesize[2] = vp->bmp->pitches[1];
1275 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1276 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1277 is->video_st->codec->width, is->video_st->codec->height,
1278 is->video_st->codec->pix_fmt,
1279 is->video_st->codec->width, is->video_st->codec->height,
1280 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1281 if (img_convert_ctx == NULL) {
1282 fprintf(stderr, "Cannot initialize the conversion context\n");
1283 exit(1);
1285 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1286 0, is->video_st->codec->height, pict.data, pict.linesize);
1287 /* update the bitmap content */
1288 SDL_UnlockYUVOverlay(vp->bmp);
1290 vp->pts = pts;
1292 /* now we can update the picture count */
1293 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1294 is->pictq_windex = 0;
1295 SDL_LockMutex(is->pictq_mutex);
1296 is->pictq_size++;
1297 SDL_UnlockMutex(is->pictq_mutex);
1299 return 0;
1303 * compute the exact PTS for the picture if it is omitted in the stream
1304 * @param pts1 the dts of the pkt / pts of the frame
1306 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1308 double frame_delay, pts;
1310 pts = pts1;
1312 if (pts != 0) {
1313 /* update video clock with pts, if present */
1314 is->video_clock = pts;
1315 } else {
1316 pts = is->video_clock;
1318 /* update video clock for next frame */
1319 frame_delay = av_q2d(is->video_st->codec->time_base);
1320 /* for MPEG2, the frame can be repeated, so we update the
1321 clock accordingly */
1322 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1323 is->video_clock += frame_delay;
1325 #if defined(DEBUG_SYNC) && 0
1327 int ftype;
1328 if (src_frame->pict_type == FF_B_TYPE)
1329 ftype = 'B';
1330 else if (src_frame->pict_type == FF_I_TYPE)
1331 ftype = 'I';
1332 else
1333 ftype = 'P';
1334 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1335 ftype, pts, pts1);
1337 #endif
1338 return queue_picture(is, src_frame, pts);
1341 static int video_thread(void *arg)
1343 VideoState *is = arg;
1344 AVPacket pkt1, *pkt = &pkt1;
1345 int len1, got_picture;
1346 AVFrame *frame= avcodec_alloc_frame();
1347 double pts;
1349 for(;;) {
1350 while (is->paused && !is->videoq.abort_request) {
1351 SDL_Delay(10);
1353 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1354 break;
1356 if(pkt->data == flush_pkt.data){
1357 avcodec_flush_buffers(is->video_st->codec);
1358 continue;
1361 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1362 this packet, if any */
1363 is->video_st->codec->reordered_opaque= pkt->pts;
1364 len1 = avcodec_decode_video(is->video_st->codec,
1365 frame, &got_picture,
1366 pkt->data, pkt->size);
1368 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1369 && frame->reordered_opaque != AV_NOPTS_VALUE)
1370 pts= frame->reordered_opaque;
1371 else if(pkt->dts != AV_NOPTS_VALUE)
1372 pts= pkt->dts;
1373 else
1374 pts= 0;
1375 pts *= av_q2d(is->video_st->time_base);
1377 // if (len1 < 0)
1378 // break;
1379 if (got_picture) {
1380 if (output_picture2(is, frame, pts) < 0)
1381 goto the_end;
1383 av_free_packet(pkt);
1384 if (step)
1385 if (cur_stream)
1386 stream_pause(cur_stream);
1388 the_end:
1389 av_free(frame);
1390 return 0;
1393 static int subtitle_thread(void *arg)
1395 VideoState *is = arg;
1396 SubPicture *sp;
1397 AVPacket pkt1, *pkt = &pkt1;
1398 int len1, got_subtitle;
1399 double pts;
1400 int i, j;
1401 int r, g, b, y, u, v, a;
1403 for(;;) {
1404 while (is->paused && !is->subtitleq.abort_request) {
1405 SDL_Delay(10);
1407 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1408 break;
1410 if(pkt->data == flush_pkt.data){
1411 avcodec_flush_buffers(is->subtitle_st->codec);
1412 continue;
1414 SDL_LockMutex(is->subpq_mutex);
1415 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1416 !is->subtitleq.abort_request) {
1417 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1419 SDL_UnlockMutex(is->subpq_mutex);
1421 if (is->subtitleq.abort_request)
1422 goto the_end;
1424 sp = &is->subpq[is->subpq_windex];
1426 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1427 this packet, if any */
1428 pts = 0;
1429 if (pkt->pts != AV_NOPTS_VALUE)
1430 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1432 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1433 &sp->sub, &got_subtitle,
1434 pkt->data, pkt->size);
1435 // if (len1 < 0)
1436 // break;
1437 if (got_subtitle && sp->sub.format == 0) {
1438 sp->pts = pts;
1440 for (i = 0; i < sp->sub.num_rects; i++)
1442 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1444 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1445 y = RGB_TO_Y_CCIR(r, g, b);
1446 u = RGB_TO_U_CCIR(r, g, b, 0);
1447 v = RGB_TO_V_CCIR(r, g, b, 0);
1448 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1452 /* now we can update the picture count */
1453 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1454 is->subpq_windex = 0;
1455 SDL_LockMutex(is->subpq_mutex);
1456 is->subpq_size++;
1457 SDL_UnlockMutex(is->subpq_mutex);
1459 av_free_packet(pkt);
1460 // if (step)
1461 // if (cur_stream)
1462 // stream_pause(cur_stream);
1464 the_end:
1465 return 0;
1468 /* copy samples for viewing in editor window */
1469 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1471 int size, len, channels;
1473 channels = is->audio_st->codec->channels;
1475 size = samples_size / sizeof(short);
1476 while (size > 0) {
1477 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1478 if (len > size)
1479 len = size;
1480 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1481 samples += len;
1482 is->sample_array_index += len;
1483 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1484 is->sample_array_index = 0;
1485 size -= len;
1489 /* return the new audio buffer size (samples can be added or deleted
1490 to get better sync if video or external master clock) */
1491 static int synchronize_audio(VideoState *is, short *samples,
1492 int samples_size1, double pts)
1494 int n, samples_size;
1495 double ref_clock;
1497 n = 2 * is->audio_st->codec->channels;
1498 samples_size = samples_size1;
1500 /* if not master, then we try to remove or add samples to correct the clock */
1501 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1502 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1503 double diff, avg_diff;
1504 int wanted_size, min_size, max_size, nb_samples;
1506 ref_clock = get_master_clock(is);
1507 diff = get_audio_clock(is) - ref_clock;
1509 if (diff < AV_NOSYNC_THRESHOLD) {
1510 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1511 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1512 /* not enough measures to have a correct estimate */
1513 is->audio_diff_avg_count++;
1514 } else {
1515 /* estimate the A-V difference */
1516 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1518 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1519 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1520 nb_samples = samples_size / n;
1522 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1523 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1524 if (wanted_size < min_size)
1525 wanted_size = min_size;
1526 else if (wanted_size > max_size)
1527 wanted_size = max_size;
1529 /* add or remove samples to correction the synchro */
1530 if (wanted_size < samples_size) {
1531 /* remove samples */
1532 samples_size = wanted_size;
1533 } else if (wanted_size > samples_size) {
1534 uint8_t *samples_end, *q;
1535 int nb;
1537 /* add samples */
1538 nb = (samples_size - wanted_size);
1539 samples_end = (uint8_t *)samples + samples_size - n;
1540 q = samples_end + n;
1541 while (nb > 0) {
1542 memcpy(q, samples_end, n);
1543 q += n;
1544 nb -= n;
1546 samples_size = wanted_size;
1549 #if 0
1550 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1551 diff, avg_diff, samples_size - samples_size1,
1552 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1553 #endif
1555 } else {
1556 /* too big difference : may be initial PTS errors, so
1557 reset A-V filter */
1558 is->audio_diff_avg_count = 0;
1559 is->audio_diff_cum = 0;
1563 return samples_size;
1566 /* decode one audio frame and returns its uncompressed size */
1567 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1569 AVPacket *pkt = &is->audio_pkt;
1570 AVCodecContext *dec= is->audio_st->codec;
1571 int n, len1, data_size;
1572 double pts;
1574 for(;;) {
1575 /* NOTE: the audio packet can contain several frames */
1576 while (is->audio_pkt_size > 0) {
1577 data_size = sizeof(is->audio_buf1);
1578 len1 = avcodec_decode_audio2(dec,
1579 (int16_t *)is->audio_buf1, &data_size,
1580 is->audio_pkt_data, is->audio_pkt_size);
1581 if (len1 < 0) {
1582 /* if error, we skip the frame */
1583 is->audio_pkt_size = 0;
1584 break;
1587 is->audio_pkt_data += len1;
1588 is->audio_pkt_size -= len1;
1589 if (data_size <= 0)
1590 continue;
1592 if (dec->sample_fmt != is->audio_src_fmt) {
1593 if (is->reformat_ctx)
1594 av_audio_convert_free(is->reformat_ctx);
1595 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1596 dec->sample_fmt, 1, NULL, 0);
1597 if (!is->reformat_ctx) {
1598 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1599 avcodec_get_sample_fmt_name(dec->sample_fmt),
1600 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1601 break;
1603 is->audio_src_fmt= dec->sample_fmt;
1606 if (is->reformat_ctx) {
1607 const void *ibuf[6]= {is->audio_buf1};
1608 void *obuf[6]= {is->audio_buf2};
1609 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1610 int ostride[6]= {2};
1611 int len= data_size/istride[0];
1612 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1613 printf("av_audio_convert() failed\n");
1614 break;
1616 is->audio_buf= is->audio_buf2;
1617 /* FIXME: existing code assume that data_size equals framesize*channels*2
1618 remove this legacy cruft */
1619 data_size= len*2;
1620 }else{
1621 is->audio_buf= is->audio_buf1;
1624 /* if no pts, then compute it */
1625 pts = is->audio_clock;
1626 *pts_ptr = pts;
1627 n = 2 * dec->channels;
1628 is->audio_clock += (double)data_size /
1629 (double)(n * dec->sample_rate);
1630 #if defined(DEBUG_SYNC)
1632 static double last_clock;
1633 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1634 is->audio_clock - last_clock,
1635 is->audio_clock, pts);
1636 last_clock = is->audio_clock;
1638 #endif
1639 return data_size;
1642 /* free the current packet */
1643 if (pkt->data)
1644 av_free_packet(pkt);
1646 if (is->paused || is->audioq.abort_request) {
1647 return -1;
1650 /* read next packet */
1651 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1652 return -1;
1653 if(pkt->data == flush_pkt.data){
1654 avcodec_flush_buffers(dec);
1655 continue;
1658 is->audio_pkt_data = pkt->data;
1659 is->audio_pkt_size = pkt->size;
1661 /* if update the audio clock with the pts */
1662 if (pkt->pts != AV_NOPTS_VALUE) {
1663 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1668 /* get the current audio output buffer size, in samples. With SDL, we
1669 cannot have a precise information */
1670 static int audio_write_get_buf_size(VideoState *is)
1672 return is->audio_buf_size - is->audio_buf_index;
1676 /* prepare a new audio buffer */
1677 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1679 VideoState *is = opaque;
1680 int audio_size, len1;
1681 double pts;
1683 audio_callback_time = av_gettime();
1685 while (len > 0) {
1686 if (is->audio_buf_index >= is->audio_buf_size) {
1687 audio_size = audio_decode_frame(is, &pts);
1688 if (audio_size < 0) {
1689 /* if error, just output silence */
1690 is->audio_buf = is->audio_buf1;
1691 is->audio_buf_size = 1024;
1692 memset(is->audio_buf, 0, is->audio_buf_size);
1693 } else {
1694 if (is->show_audio)
1695 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1696 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1697 pts);
1698 is->audio_buf_size = audio_size;
1700 is->audio_buf_index = 0;
1702 len1 = is->audio_buf_size - is->audio_buf_index;
1703 if (len1 > len)
1704 len1 = len;
1705 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1706 len -= len1;
1707 stream += len1;
1708 is->audio_buf_index += len1;
1712 /* open a given stream. Return 0 if OK */
1713 static int stream_component_open(VideoState *is, int stream_index)
1715 AVFormatContext *ic = is->ic;
1716 AVCodecContext *enc;
1717 AVCodec *codec;
1718 SDL_AudioSpec wanted_spec, spec;
1720 if (stream_index < 0 || stream_index >= ic->nb_streams)
1721 return -1;
1722 enc = ic->streams[stream_index]->codec;
1724 /* prepare audio output */
1725 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1726 if (enc->channels > 0) {
1727 enc->request_channels = FFMIN(2, enc->channels);
1728 } else {
1729 enc->request_channels = 2;
1733 codec = avcodec_find_decoder(enc->codec_id);
1734 enc->debug_mv = debug_mv;
1735 enc->debug = debug;
1736 enc->workaround_bugs = workaround_bugs;
1737 enc->lowres = lowres;
1738 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1739 enc->idct_algo= idct;
1740 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1741 enc->skip_frame= skip_frame;
1742 enc->skip_idct= skip_idct;
1743 enc->skip_loop_filter= skip_loop_filter;
1744 enc->error_recognition= error_recognition;
1745 enc->error_concealment= error_concealment;
1747 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1749 if (!codec ||
1750 avcodec_open(enc, codec) < 0)
1751 return -1;
1753 /* prepare audio output */
1754 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1755 wanted_spec.freq = enc->sample_rate;
1756 wanted_spec.format = AUDIO_S16SYS;
1757 wanted_spec.channels = enc->channels;
1758 wanted_spec.silence = 0;
1759 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1760 wanted_spec.callback = sdl_audio_callback;
1761 wanted_spec.userdata = is;
1762 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1763 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1764 return -1;
1766 is->audio_hw_buf_size = spec.size;
1767 is->audio_src_fmt= SAMPLE_FMT_S16;
1770 if(thread_count>1)
1771 avcodec_thread_init(enc, thread_count);
1772 enc->thread_count= thread_count;
1773 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1774 switch(enc->codec_type) {
1775 case CODEC_TYPE_AUDIO:
1776 is->audio_stream = stream_index;
1777 is->audio_st = ic->streams[stream_index];
1778 is->audio_buf_size = 0;
1779 is->audio_buf_index = 0;
1781 /* init averaging filter */
1782 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1783 is->audio_diff_avg_count = 0;
1784 /* since we do not have a precise anough audio fifo fullness,
1785 we correct audio sync only if larger than this threshold */
1786 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1788 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1789 packet_queue_init(&is->audioq);
1790 SDL_PauseAudio(0);
1791 break;
1792 case CODEC_TYPE_VIDEO:
1793 is->video_stream = stream_index;
1794 is->video_st = ic->streams[stream_index];
1796 is->frame_last_delay = 40e-3;
1797 is->frame_timer = (double)av_gettime() / 1000000.0;
1798 is->video_current_pts_time = av_gettime();
1800 packet_queue_init(&is->videoq);
1801 is->video_tid = SDL_CreateThread(video_thread, is);
1802 break;
1803 case CODEC_TYPE_SUBTITLE:
1804 is->subtitle_stream = stream_index;
1805 is->subtitle_st = ic->streams[stream_index];
1806 packet_queue_init(&is->subtitleq);
1808 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1809 break;
1810 default:
1811 break;
1813 return 0;
1816 static void stream_component_close(VideoState *is, int stream_index)
1818 AVFormatContext *ic = is->ic;
1819 AVCodecContext *enc;
1821 if (stream_index < 0 || stream_index >= ic->nb_streams)
1822 return;
1823 enc = ic->streams[stream_index]->codec;
1825 switch(enc->codec_type) {
1826 case CODEC_TYPE_AUDIO:
1827 packet_queue_abort(&is->audioq);
1829 SDL_CloseAudio();
1831 packet_queue_end(&is->audioq);
1832 if (is->reformat_ctx)
1833 av_audio_convert_free(is->reformat_ctx);
1834 break;
1835 case CODEC_TYPE_VIDEO:
1836 packet_queue_abort(&is->videoq);
1838 /* note: we also signal this mutex to make sure we deblock the
1839 video thread in all cases */
1840 SDL_LockMutex(is->pictq_mutex);
1841 SDL_CondSignal(is->pictq_cond);
1842 SDL_UnlockMutex(is->pictq_mutex);
1844 SDL_WaitThread(is->video_tid, NULL);
1846 packet_queue_end(&is->videoq);
1847 break;
1848 case CODEC_TYPE_SUBTITLE:
1849 packet_queue_abort(&is->subtitleq);
1851 /* note: we also signal this mutex to make sure we deblock the
1852 video thread in all cases */
1853 SDL_LockMutex(is->subpq_mutex);
1854 is->subtitle_stream_changed = 1;
1856 SDL_CondSignal(is->subpq_cond);
1857 SDL_UnlockMutex(is->subpq_mutex);
1859 SDL_WaitThread(is->subtitle_tid, NULL);
1861 packet_queue_end(&is->subtitleq);
1862 break;
1863 default:
1864 break;
1867 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1868 avcodec_close(enc);
1869 switch(enc->codec_type) {
1870 case CODEC_TYPE_AUDIO:
1871 is->audio_st = NULL;
1872 is->audio_stream = -1;
1873 break;
1874 case CODEC_TYPE_VIDEO:
1875 is->video_st = NULL;
1876 is->video_stream = -1;
1877 break;
1878 case CODEC_TYPE_SUBTITLE:
1879 is->subtitle_st = NULL;
1880 is->subtitle_stream = -1;
1881 break;
1882 default:
1883 break;
1887 static void dump_stream_info(const AVFormatContext *s)
1889 if (s->track != 0)
1890 fprintf(stderr, "Track: %d\n", s->track);
1891 if (s->title[0] != '\0')
1892 fprintf(stderr, "Title: %s\n", s->title);
1893 if (s->author[0] != '\0')
1894 fprintf(stderr, "Author: %s\n", s->author);
1895 if (s->copyright[0] != '\0')
1896 fprintf(stderr, "Copyright: %s\n", s->copyright);
1897 if (s->comment[0] != '\0')
1898 fprintf(stderr, "Comment: %s\n", s->comment);
1899 if (s->album[0] != '\0')
1900 fprintf(stderr, "Album: %s\n", s->album);
1901 if (s->year != 0)
1902 fprintf(stderr, "Year: %d\n", s->year);
1903 if (s->genre[0] != '\0')
1904 fprintf(stderr, "Genre: %s\n", s->genre);
1907 /* since we have only one decoding thread, we can use a global
1908 variable instead of a thread local variable */
1909 static VideoState *global_video_state;
1911 static int decode_interrupt_cb(void)
1913 return (global_video_state && global_video_state->abort_request);
1916 /* this thread gets the stream from the disk or the network */
1917 static int decode_thread(void *arg)
1919 VideoState *is = arg;
1920 AVFormatContext *ic;
1921 int err, i, ret, video_index, audio_index, subtitle_index;
1922 AVPacket pkt1, *pkt = &pkt1;
1923 AVFormatParameters params, *ap = &params;
1925 video_index = -1;
1926 audio_index = -1;
1927 subtitle_index = -1;
1928 is->video_stream = -1;
1929 is->audio_stream = -1;
1930 is->subtitle_stream = -1;
1932 global_video_state = is;
1933 url_set_interrupt_cb(decode_interrupt_cb);
1935 memset(ap, 0, sizeof(*ap));
1937 ap->width = frame_width;
1938 ap->height= frame_height;
1939 ap->time_base= (AVRational){1, 25};
1940 ap->pix_fmt = frame_pix_fmt;
1942 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1943 if (err < 0) {
1944 print_error(is->filename, err);
1945 ret = -1;
1946 goto fail;
1948 is->ic = ic;
1950 if(genpts)
1951 ic->flags |= AVFMT_FLAG_GENPTS;
1953 err = av_find_stream_info(ic);
1954 if (err < 0) {
1955 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1956 ret = -1;
1957 goto fail;
1959 if(ic->pb)
1960 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1962 /* if seeking requested, we execute it */
1963 if (start_time != AV_NOPTS_VALUE) {
1964 int64_t timestamp;
1966 timestamp = start_time;
1967 /* add the stream start time */
1968 if (ic->start_time != AV_NOPTS_VALUE)
1969 timestamp += ic->start_time;
1970 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1971 if (ret < 0) {
1972 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1973 is->filename, (double)timestamp / AV_TIME_BASE);
1977 for(i = 0; i < ic->nb_streams; i++) {
1978 AVCodecContext *enc = ic->streams[i]->codec;
1979 ic->streams[i]->discard = AVDISCARD_ALL;
1980 switch(enc->codec_type) {
1981 case CODEC_TYPE_AUDIO:
1982 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1983 audio_index = i;
1984 break;
1985 case CODEC_TYPE_VIDEO:
1986 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1987 video_index = i;
1988 break;
1989 case CODEC_TYPE_SUBTITLE:
1990 if (wanted_subtitle_stream >= 0 && !video_disable &&
1991 (subtitle_index < 0 || wanted_subtitle_stream-- > 0))
1992 subtitle_index = i;
1993 break;
1994 default:
1995 break;
1998 if (show_status) {
1999 dump_format(ic, 0, is->filename, 0);
2000 dump_stream_info(ic);
2003 /* open the streams */
2004 if (audio_index >= 0) {
2005 stream_component_open(is, audio_index);
2008 if (video_index >= 0) {
2009 stream_component_open(is, video_index);
2010 } else {
2011 if (!display_disable)
2012 is->show_audio = 1;
2015 if (subtitle_index >= 0) {
2016 stream_component_open(is, subtitle_index);
2019 if (is->video_stream < 0 && is->audio_stream < 0) {
2020 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2021 ret = -1;
2022 goto fail;
2025 for(;;) {
2026 if (is->abort_request)
2027 break;
2028 if (is->paused != is->last_paused) {
2029 is->last_paused = is->paused;
2030 if (is->paused)
2031 av_read_pause(ic);
2032 else
2033 av_read_play(ic);
2035 #if CONFIG_RTSP_DEMUXER
2036 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2037 /* wait 10 ms to avoid trying to get another packet */
2038 /* XXX: horrible */
2039 SDL_Delay(10);
2040 continue;
2042 #endif
2043 if (is->seek_req) {
2044 int stream_index= -1;
2045 int64_t seek_target= is->seek_pos;
2047 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2048 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2049 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2051 if(stream_index>=0){
2052 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2055 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2056 if (ret < 0) {
2057 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2058 }else{
2059 if (is->audio_stream >= 0) {
2060 packet_queue_flush(&is->audioq);
2061 packet_queue_put(&is->audioq, &flush_pkt);
2063 if (is->subtitle_stream >= 0) {
2064 packet_queue_flush(&is->subtitleq);
2065 packet_queue_put(&is->subtitleq, &flush_pkt);
2067 if (is->video_stream >= 0) {
2068 packet_queue_flush(&is->videoq);
2069 packet_queue_put(&is->videoq, &flush_pkt);
2072 is->seek_req = 0;
2075 /* if the queue are full, no need to read more */
2076 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2077 is->videoq.size > MAX_VIDEOQ_SIZE ||
2078 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2079 /* wait 10 ms */
2080 SDL_Delay(10);
2081 continue;
2083 if(url_feof(ic->pb)) {
2084 av_init_packet(pkt);
2085 pkt->data=NULL;
2086 pkt->size=0;
2087 pkt->stream_index= is->video_stream;
2088 packet_queue_put(&is->videoq, pkt);
2089 continue;
2091 ret = av_read_frame(ic, pkt);
2092 if (ret < 0) {
2093 if (url_ferror(ic->pb) == 0) {
2094 SDL_Delay(100); /* wait for user event */
2095 continue;
2096 } else
2097 break;
2099 if (pkt->stream_index == is->audio_stream) {
2100 packet_queue_put(&is->audioq, pkt);
2101 } else if (pkt->stream_index == is->video_stream) {
2102 packet_queue_put(&is->videoq, pkt);
2103 } else if (pkt->stream_index == is->subtitle_stream) {
2104 packet_queue_put(&is->subtitleq, pkt);
2105 } else {
2106 av_free_packet(pkt);
2109 /* wait until the end */
2110 while (!is->abort_request) {
2111 SDL_Delay(100);
2114 ret = 0;
2115 fail:
2116 /* disable interrupting */
2117 global_video_state = NULL;
2119 /* close each stream */
2120 if (is->audio_stream >= 0)
2121 stream_component_close(is, is->audio_stream);
2122 if (is->video_stream >= 0)
2123 stream_component_close(is, is->video_stream);
2124 if (is->subtitle_stream >= 0)
2125 stream_component_close(is, is->subtitle_stream);
2126 if (is->ic) {
2127 av_close_input_file(is->ic);
2128 is->ic = NULL; /* safety */
2130 url_set_interrupt_cb(NULL);
2132 if (ret != 0) {
2133 SDL_Event event;
2135 event.type = FF_QUIT_EVENT;
2136 event.user.data1 = is;
2137 SDL_PushEvent(&event);
2139 return 0;
2142 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2144 VideoState *is;
2146 is = av_mallocz(sizeof(VideoState));
2147 if (!is)
2148 return NULL;
2149 av_strlcpy(is->filename, filename, sizeof(is->filename));
2150 is->iformat = iformat;
2151 is->ytop = 0;
2152 is->xleft = 0;
2154 /* start video display */
2155 is->pictq_mutex = SDL_CreateMutex();
2156 is->pictq_cond = SDL_CreateCond();
2158 is->subpq_mutex = SDL_CreateMutex();
2159 is->subpq_cond = SDL_CreateCond();
2161 /* add the refresh timer to draw the picture */
2162 schedule_refresh(is, 40);
2164 is->av_sync_type = av_sync_type;
2165 is->parse_tid = SDL_CreateThread(decode_thread, is);
2166 if (!is->parse_tid) {
2167 av_free(is);
2168 return NULL;
2170 return is;
2173 static void stream_close(VideoState *is)
2175 VideoPicture *vp;
2176 int i;
2177 /* XXX: use a special url_shutdown call to abort parse cleanly */
2178 is->abort_request = 1;
2179 SDL_WaitThread(is->parse_tid, NULL);
2181 /* free all pictures */
2182 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2183 vp = &is->pictq[i];
2184 if (vp->bmp) {
2185 SDL_FreeYUVOverlay(vp->bmp);
2186 vp->bmp = NULL;
2189 SDL_DestroyMutex(is->pictq_mutex);
2190 SDL_DestroyCond(is->pictq_cond);
2191 SDL_DestroyMutex(is->subpq_mutex);
2192 SDL_DestroyCond(is->subpq_cond);
2195 static void stream_cycle_channel(VideoState *is, int codec_type)
2197 AVFormatContext *ic = is->ic;
2198 int start_index, stream_index;
2199 AVStream *st;
2201 if (codec_type == CODEC_TYPE_VIDEO)
2202 start_index = is->video_stream;
2203 else if (codec_type == CODEC_TYPE_AUDIO)
2204 start_index = is->audio_stream;
2205 else
2206 start_index = is->subtitle_stream;
2207 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2208 return;
2209 stream_index = start_index;
2210 for(;;) {
2211 if (++stream_index >= is->ic->nb_streams)
2213 if (codec_type == CODEC_TYPE_SUBTITLE)
2215 stream_index = -1;
2216 goto the_end;
2217 } else
2218 stream_index = 0;
2220 if (stream_index == start_index)
2221 return;
2222 st = ic->streams[stream_index];
2223 if (st->codec->codec_type == codec_type) {
2224 /* check that parameters are OK */
2225 switch(codec_type) {
2226 case CODEC_TYPE_AUDIO:
2227 if (st->codec->sample_rate != 0 &&
2228 st->codec->channels != 0)
2229 goto the_end;
2230 break;
2231 case CODEC_TYPE_VIDEO:
2232 case CODEC_TYPE_SUBTITLE:
2233 goto the_end;
2234 default:
2235 break;
2239 the_end:
2240 stream_component_close(is, start_index);
2241 stream_component_open(is, stream_index);
2245 static void toggle_full_screen(void)
2247 is_full_screen = !is_full_screen;
2248 if (!fs_screen_width) {
2249 /* use default SDL method */
2250 // SDL_WM_ToggleFullScreen(screen);
2252 video_open(cur_stream);
2255 static void toggle_pause(void)
2257 if (cur_stream)
2258 stream_pause(cur_stream);
2259 step = 0;
2262 static void step_to_next_frame(void)
2264 if (cur_stream) {
2265 /* if the stream is paused unpause it, then step */
2266 if (cur_stream->paused)
2267 stream_pause(cur_stream);
2269 step = 1;
2272 static void do_exit(void)
2274 if (cur_stream) {
2275 stream_close(cur_stream);
2276 cur_stream = NULL;
2278 if (show_status)
2279 printf("\n");
2280 SDL_Quit();
2281 exit(0);
2284 static void toggle_audio_display(void)
2286 if (cur_stream) {
2287 cur_stream->show_audio = !cur_stream->show_audio;
2291 /* handle an event sent by the GUI */
2292 static void event_loop(void)
2294 SDL_Event event;
2295 double incr, pos, frac;
2297 for(;;) {
2298 SDL_WaitEvent(&event);
2299 switch(event.type) {
2300 case SDL_KEYDOWN:
2301 switch(event.key.keysym.sym) {
2302 case SDLK_ESCAPE:
2303 case SDLK_q:
2304 do_exit();
2305 break;
2306 case SDLK_f:
2307 toggle_full_screen();
2308 break;
2309 case SDLK_p:
2310 case SDLK_SPACE:
2311 toggle_pause();
2312 break;
2313 case SDLK_s: //S: Step to next frame
2314 step_to_next_frame();
2315 break;
2316 case SDLK_a:
2317 if (cur_stream)
2318 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2319 break;
2320 case SDLK_v:
2321 if (cur_stream)
2322 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2323 break;
2324 case SDLK_t:
2325 if (cur_stream)
2326 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2327 break;
2328 case SDLK_w:
2329 toggle_audio_display();
2330 break;
2331 case SDLK_LEFT:
2332 incr = -10.0;
2333 goto do_seek;
2334 case SDLK_RIGHT:
2335 incr = 10.0;
2336 goto do_seek;
2337 case SDLK_UP:
2338 incr = 60.0;
2339 goto do_seek;
2340 case SDLK_DOWN:
2341 incr = -60.0;
2342 do_seek:
2343 if (cur_stream) {
2344 if (seek_by_bytes) {
2345 pos = url_ftell(cur_stream->ic->pb);
2346 if (cur_stream->ic->bit_rate)
2347 incr *= cur_stream->ic->bit_rate / 60.0;
2348 else
2349 incr *= 180000.0;
2350 pos += incr;
2351 stream_seek(cur_stream, pos, incr);
2352 } else {
2353 pos = get_master_clock(cur_stream);
2354 pos += incr;
2355 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2358 break;
2359 default:
2360 break;
2362 break;
2363 case SDL_MOUSEBUTTONDOWN:
2364 if (cur_stream) {
2365 int ns, hh, mm, ss;
2366 int tns, thh, tmm, tss;
2367 tns = cur_stream->ic->duration/1000000LL;
2368 thh = tns/3600;
2369 tmm = (tns%3600)/60;
2370 tss = (tns%60);
2371 frac = (double)event.button.x/(double)cur_stream->width;
2372 ns = frac*tns;
2373 hh = ns/3600;
2374 mm = (ns%3600)/60;
2375 ss = (ns%60);
2376 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2377 hh, mm, ss, thh, tmm, tss);
2378 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2380 break;
2381 case SDL_VIDEORESIZE:
2382 if (cur_stream) {
2383 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2384 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2385 screen_width = cur_stream->width = event.resize.w;
2386 screen_height= cur_stream->height= event.resize.h;
2388 break;
2389 case SDL_QUIT:
2390 case FF_QUIT_EVENT:
2391 do_exit();
2392 break;
2393 case FF_ALLOC_EVENT:
2394 video_open(event.user.data1);
2395 alloc_picture(event.user.data1);
2396 break;
2397 case FF_REFRESH_EVENT:
2398 video_refresh_timer(event.user.data1);
2399 break;
2400 default:
2401 break;
2406 static void opt_frame_size(const char *arg)
2408 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2409 fprintf(stderr, "Incorrect frame size\n");
2410 exit(1);
2412 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2413 fprintf(stderr, "Frame size must be a multiple of 2\n");
2414 exit(1);
2418 static int opt_width(const char *opt, const char *arg)
2420 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2421 return 0;
2424 static int opt_height(const char *opt, const char *arg)
2426 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2427 return 0;
2430 static void opt_format(const char *arg)
2432 file_iformat = av_find_input_format(arg);
2433 if (!file_iformat) {
2434 fprintf(stderr, "Unknown input format: %s\n", arg);
2435 exit(1);
2439 static void opt_frame_pix_fmt(const char *arg)
2441 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2444 static int opt_sync(const char *opt, const char *arg)
2446 if (!strcmp(arg, "audio"))
2447 av_sync_type = AV_SYNC_AUDIO_MASTER;
2448 else if (!strcmp(arg, "video"))
2449 av_sync_type = AV_SYNC_VIDEO_MASTER;
2450 else if (!strcmp(arg, "ext"))
2451 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2452 else {
2453 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2454 exit(1);
2456 return 0;
2459 static int opt_seek(const char *opt, const char *arg)
2461 start_time = parse_time_or_die(opt, arg, 1);
2462 return 0;
2465 static int opt_debug(const char *opt, const char *arg)
2467 av_log_set_level(99);
2468 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2469 return 0;
2472 static int opt_vismv(const char *opt, const char *arg)
2474 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2475 return 0;
2478 static int opt_thread_count(const char *opt, const char *arg)
2480 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2481 #if !HAVE_THREADS
2482 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2483 #endif
2484 return 0;
2487 static const OptionDef options[] = {
2488 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2489 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2490 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2491 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2492 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2493 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2494 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2495 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2496 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2497 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2498 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2499 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2500 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "", "" },
2501 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2502 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2503 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2504 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2505 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2506 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2507 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2508 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2509 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2510 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2511 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2512 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2513 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2514 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2515 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2516 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2517 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2518 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2519 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2520 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2521 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2522 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2523 { NULL, },
2526 static void show_help(void)
2528 printf("usage: ffplay [options] input_file\n"
2529 "Simple media player\n");
2530 printf("\n");
2531 show_help_options(options, "Main options:\n",
2532 OPT_EXPERT, 0);
2533 show_help_options(options, "\nAdvanced options:\n",
2534 OPT_EXPERT, OPT_EXPERT);
2535 printf("\nWhile playing:\n"
2536 "q, ESC quit\n"
2537 "f toggle full screen\n"
2538 "p, SPC pause\n"
2539 "a cycle audio channel\n"
2540 "v cycle video channel\n"
2541 "t cycle subtitle channel\n"
2542 "w show audio waves\n"
2543 "left/right seek backward/forward 10 seconds\n"
2544 "down/up seek backward/forward 1 minute\n"
2545 "mouse click seek to percentage in file corresponding to fraction of width\n"
2549 static void opt_input_file(const char *filename)
2551 if (!strcmp(filename, "-"))
2552 filename = "pipe:";
2553 input_filename = filename;
2556 /* Called from the main */
2557 int main(int argc, char **argv)
2559 int flags, i;
2561 /* register all codecs, demux and protocols */
2562 avcodec_register_all();
2563 avdevice_register_all();
2564 av_register_all();
2566 for(i=0; i<CODEC_TYPE_NB; i++){
2567 avctx_opts[i]= avcodec_alloc_context2(i);
2569 avformat_opts = avformat_alloc_context();
2570 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2572 show_banner();
2574 parse_options(argc, argv, options, opt_input_file);
2576 if (!input_filename) {
2577 fprintf(stderr, "An input file must be specified\n");
2578 exit(1);
2581 if (display_disable) {
2582 video_disable = 1;
2584 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2585 #if !defined(__MINGW32__) && !defined(__APPLE__)
2586 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2587 #endif
2588 if (SDL_Init (flags)) {
2589 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2590 exit(1);
2593 if (!display_disable) {
2594 #if HAVE_SDL_VIDEO_SIZE
2595 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2596 fs_screen_width = vi->current_w;
2597 fs_screen_height = vi->current_h;
2598 #endif
2601 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2602 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2603 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2604 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2606 av_init_packet(&flush_pkt);
2607 flush_pkt.data= "FLUSH";
2609 cur_stream = stream_open(input_filename, file_iformat);
2611 event_loop();
2613 /* never returns */
2615 return 0;