flacdec: change frame bps validation to return an error value if bps
[FFMpeg-mirror/lagarith.git] / ffplay.c
blob413225e9ceb9e54e367bbba9746581f304d2279c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 int64_t seek_rel;
114 AVFormatContext *ic;
115 int dtg_active_format;
117 int audio_stream;
119 int av_sync_type;
120 double external_clock; /* external clock base */
121 int64_t external_clock_time;
123 double audio_clock;
124 double audio_diff_cum; /* used for AV difference average computation */
125 double audio_diff_avg_coef;
126 double audio_diff_threshold;
127 int audio_diff_avg_count;
128 AVStream *audio_st;
129 PacketQueue audioq;
130 int audio_hw_buf_size;
131 /* samples output by the codec. we reserve more space for avsync
132 compensation */
133 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135 uint8_t *audio_buf;
136 unsigned int audio_buf_size; /* in bytes */
137 int audio_buf_index; /* in bytes */
138 AVPacket audio_pkt;
139 uint8_t *audio_pkt_data;
140 int audio_pkt_size;
141 enum SampleFormat audio_src_fmt;
142 AVAudioConvert *reformat_ctx;
144 int show_audio; /* if true, display audio samples */
145 int16_t sample_array[SAMPLE_ARRAY_SIZE];
146 int sample_array_index;
147 int last_i_start;
149 SDL_Thread *subtitle_tid;
150 int subtitle_stream;
151 int subtitle_stream_changed;
152 AVStream *subtitle_st;
153 PacketQueue subtitleq;
154 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
155 int subpq_size, subpq_rindex, subpq_windex;
156 SDL_mutex *subpq_mutex;
157 SDL_cond *subpq_cond;
159 double frame_timer;
160 double frame_last_pts;
161 double frame_last_delay;
162 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
163 int video_stream;
164 AVStream *video_st;
165 PacketQueue videoq;
166 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
167 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
168 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
169 int pictq_size, pictq_rindex, pictq_windex;
170 SDL_mutex *pictq_mutex;
171 SDL_cond *pictq_cond;
173 // QETimer *video_timer;
174 char filename[1024];
175 int width, height, xleft, ytop;
176 } VideoState;
178 static void show_help(void);
179 static int audio_write_get_buf_size(VideoState *is);
181 /* options specified by the user */
182 static AVInputFormat *file_iformat;
183 static const char *input_filename;
184 static int fs_screen_width;
185 static int fs_screen_height;
186 static int screen_width = 0;
187 static int screen_height = 0;
188 static int frame_width = 0;
189 static int frame_height = 0;
190 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
191 static int audio_disable;
192 static int video_disable;
193 static int wanted_audio_stream= 0;
194 static int wanted_video_stream= 0;
195 static int wanted_subtitle_stream= -1;
196 static int seek_by_bytes;
197 static int display_disable;
198 static int show_status;
199 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
200 static int64_t start_time = AV_NOPTS_VALUE;
201 static int debug = 0;
202 static int debug_mv = 0;
203 static int step = 0;
204 static int thread_count = 1;
205 static int workaround_bugs = 1;
206 static int fast = 0;
207 static int genpts = 0;
208 static int lowres = 0;
209 static int idct = FF_IDCT_AUTO;
210 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
212 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
213 static int error_recognition = FF_ER_CAREFUL;
214 static int error_concealment = 3;
215 static int decoder_reorder_pts= 0;
217 /* current context */
218 static int is_full_screen;
219 static VideoState *cur_stream;
220 static int64_t audio_callback_time;
222 static AVPacket flush_pkt;
224 #define FF_ALLOC_EVENT (SDL_USEREVENT)
225 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
226 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
228 static SDL_Surface *screen;
230 /* packet queue handling */
231 static void packet_queue_init(PacketQueue *q)
233 memset(q, 0, sizeof(PacketQueue));
234 q->mutex = SDL_CreateMutex();
235 q->cond = SDL_CreateCond();
238 static void packet_queue_flush(PacketQueue *q)
240 AVPacketList *pkt, *pkt1;
242 SDL_LockMutex(q->mutex);
243 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
244 pkt1 = pkt->next;
245 av_free_packet(&pkt->pkt);
246 av_freep(&pkt);
248 q->last_pkt = NULL;
249 q->first_pkt = NULL;
250 q->nb_packets = 0;
251 q->size = 0;
252 SDL_UnlockMutex(q->mutex);
255 static void packet_queue_end(PacketQueue *q)
257 packet_queue_flush(q);
258 SDL_DestroyMutex(q->mutex);
259 SDL_DestroyCond(q->cond);
262 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
264 AVPacketList *pkt1;
266 /* duplicate the packet */
267 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
268 return -1;
270 pkt1 = av_malloc(sizeof(AVPacketList));
271 if (!pkt1)
272 return -1;
273 pkt1->pkt = *pkt;
274 pkt1->next = NULL;
277 SDL_LockMutex(q->mutex);
279 if (!q->last_pkt)
281 q->first_pkt = pkt1;
282 else
283 q->last_pkt->next = pkt1;
284 q->last_pkt = pkt1;
285 q->nb_packets++;
286 q->size += pkt1->pkt.size + sizeof(*pkt1);
287 /* XXX: should duplicate packet data in DV case */
288 SDL_CondSignal(q->cond);
290 SDL_UnlockMutex(q->mutex);
291 return 0;
294 static void packet_queue_abort(PacketQueue *q)
296 SDL_LockMutex(q->mutex);
298 q->abort_request = 1;
300 SDL_CondSignal(q->cond);
302 SDL_UnlockMutex(q->mutex);
305 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
306 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
308 AVPacketList *pkt1;
309 int ret;
311 SDL_LockMutex(q->mutex);
313 for(;;) {
314 if (q->abort_request) {
315 ret = -1;
316 break;
319 pkt1 = q->first_pkt;
320 if (pkt1) {
321 q->first_pkt = pkt1->next;
322 if (!q->first_pkt)
323 q->last_pkt = NULL;
324 q->nb_packets--;
325 q->size -= pkt1->pkt.size + sizeof(*pkt1);
326 *pkt = pkt1->pkt;
327 av_free(pkt1);
328 ret = 1;
329 break;
330 } else if (!block) {
331 ret = 0;
332 break;
333 } else {
334 SDL_CondWait(q->cond, q->mutex);
337 SDL_UnlockMutex(q->mutex);
338 return ret;
341 static inline void fill_rectangle(SDL_Surface *screen,
342 int x, int y, int w, int h, int color)
344 SDL_Rect rect;
345 rect.x = x;
346 rect.y = y;
347 rect.w = w;
348 rect.h = h;
349 SDL_FillRect(screen, &rect, color);
352 #if 0
353 /* draw only the border of a rectangle */
354 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
356 int w1, w2, h1, h2;
358 /* fill the background */
359 w1 = x;
360 if (w1 < 0)
361 w1 = 0;
362 w2 = s->width - (x + w);
363 if (w2 < 0)
364 w2 = 0;
365 h1 = y;
366 if (h1 < 0)
367 h1 = 0;
368 h2 = s->height - (y + h);
369 if (h2 < 0)
370 h2 = 0;
371 fill_rectangle(screen,
372 s->xleft, s->ytop,
373 w1, s->height,
374 color);
375 fill_rectangle(screen,
376 s->xleft + s->width - w2, s->ytop,
377 w2, s->height,
378 color);
379 fill_rectangle(screen,
380 s->xleft + w1, s->ytop,
381 s->width - w1 - w2, h1,
382 color);
383 fill_rectangle(screen,
384 s->xleft + w1, s->ytop + s->height - h2,
385 s->width - w1 - w2, h2,
386 color);
388 #endif
392 #define SCALEBITS 10
393 #define ONE_HALF (1 << (SCALEBITS - 1))
394 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
396 #define RGB_TO_Y_CCIR(r, g, b) \
397 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
398 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
400 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
401 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
402 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
404 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
405 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
406 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
408 #define ALPHA_BLEND(a, oldp, newp, s)\
409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411 #define RGBA_IN(r, g, b, a, s)\
413 unsigned int v = ((const uint32_t *)(s))[0];\
414 a = (v >> 24) & 0xff;\
415 r = (v >> 16) & 0xff;\
416 g = (v >> 8) & 0xff;\
417 b = v & 0xff;\
420 #define YUVA_IN(y, u, v, a, s, pal)\
422 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
423 a = (val >> 24) & 0xff;\
424 y = (val >> 16) & 0xff;\
425 u = (val >> 8) & 0xff;\
426 v = val & 0xff;\
429 #define YUVA_OUT(d, y, u, v, a)\
431 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
435 #define BPP 1
437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 int wrap, wrap3, width2, skip2;
440 int y, u, v, a, u1, v1, a1, w, h;
441 uint8_t *lum, *cb, *cr;
442 const uint8_t *p;
443 const uint32_t *pal;
444 int dstx, dsty, dstw, dsth;
446 dstw = av_clip(rect->w, 0, imgw);
447 dsth = av_clip(rect->h, 0, imgh);
448 dstx = av_clip(rect->x, 0, imgw - dstw);
449 dsty = av_clip(rect->y, 0, imgh - dsth);
450 lum = dst->data[0] + dsty * dst->linesize[0];
451 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
452 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
455 skip2 = dstx >> 1;
456 wrap = dst->linesize[0];
457 wrap3 = rect->pict.linesize[0];
458 p = rect->pict.data[0];
459 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
461 if (dsty & 1) {
462 lum += dstx;
463 cb += skip2;
464 cr += skip2;
466 if (dstx & 1) {
467 YUVA_IN(y, u, v, a, p, pal);
468 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
469 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
470 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
471 cb++;
472 cr++;
473 lum++;
474 p += BPP;
476 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
477 YUVA_IN(y, u, v, a, p, pal);
478 u1 = u;
479 v1 = v;
480 a1 = a;
481 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483 YUVA_IN(y, u, v, a, p + BPP, pal);
484 u1 += u;
485 v1 += v;
486 a1 += a;
487 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
488 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
489 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
490 cb++;
491 cr++;
492 p += 2 * BPP;
493 lum += 2;
495 if (w) {
496 YUVA_IN(y, u, v, a, p, pal);
497 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
498 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
499 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500 p++;
501 lum++;
503 p += wrap3 - dstw * BPP;
504 lum += wrap - dstw - dstx;
505 cb += dst->linesize[1] - width2 - skip2;
506 cr += dst->linesize[2] - width2 - skip2;
508 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
509 lum += dstx;
510 cb += skip2;
511 cr += skip2;
513 if (dstx & 1) {
514 YUVA_IN(y, u, v, a, p, pal);
515 u1 = u;
516 v1 = v;
517 a1 = a;
518 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519 p += wrap3;
520 lum += wrap;
521 YUVA_IN(y, u, v, a, p, pal);
522 u1 += u;
523 v1 += v;
524 a1 += a;
525 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528 cb++;
529 cr++;
530 p += -wrap3 + BPP;
531 lum += -wrap + 1;
533 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
534 YUVA_IN(y, u, v, a, p, pal);
535 u1 = u;
536 v1 = v;
537 a1 = a;
538 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540 YUVA_IN(y, u, v, a, p + BPP, pal);
541 u1 += u;
542 v1 += v;
543 a1 += a;
544 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545 p += wrap3;
546 lum += wrap;
548 YUVA_IN(y, u, v, a, p, pal);
549 u1 += u;
550 v1 += v;
551 a1 += a;
552 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554 YUVA_IN(y, u, v, a, p + BPP, pal);
555 u1 += u;
556 v1 += v;
557 a1 += a;
558 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
561 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563 cb++;
564 cr++;
565 p += -wrap3 + 2 * BPP;
566 lum += -wrap + 2;
568 if (w) {
569 YUVA_IN(y, u, v, a, p, pal);
570 u1 = u;
571 v1 = v;
572 a1 = a;
573 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574 p += wrap3;
575 lum += wrap;
576 YUVA_IN(y, u, v, a, p, pal);
577 u1 += u;
578 v1 += v;
579 a1 += a;
580 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583 cb++;
584 cr++;
585 p += -wrap3 + BPP;
586 lum += -wrap + 1;
588 p += wrap3 + (wrap3 - dstw * BPP);
589 lum += wrap + (wrap - dstw - dstx);
590 cb += dst->linesize[1] - width2 - skip2;
591 cr += dst->linesize[2] - width2 - skip2;
593 /* handle odd height */
594 if (h) {
595 lum += dstx;
596 cb += skip2;
597 cr += skip2;
599 if (dstx & 1) {
600 YUVA_IN(y, u, v, a, p, pal);
601 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
603 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
604 cb++;
605 cr++;
606 lum++;
607 p += BPP;
609 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
610 YUVA_IN(y, u, v, a, p, pal);
611 u1 = u;
612 v1 = v;
613 a1 = a;
614 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616 YUVA_IN(y, u, v, a, p + BPP, pal);
617 u1 += u;
618 v1 += v;
619 a1 += a;
620 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
621 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
622 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
623 cb++;
624 cr++;
625 p += 2 * BPP;
626 lum += 2;
628 if (w) {
629 YUVA_IN(y, u, v, a, p, pal);
630 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
632 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637 static void free_subpicture(SubPicture *sp)
639 int i;
641 for (i = 0; i < sp->sub.num_rects; i++)
643 av_freep(&sp->sub.rects[i]->pict.data[0]);
644 av_freep(&sp->sub.rects[i]->pict.data[1]);
645 av_freep(&sp->sub.rects[i]);
648 av_free(sp->sub.rects);
650 memset(&sp->sub, 0, sizeof(AVSubtitle));
653 static void video_image_display(VideoState *is)
655 VideoPicture *vp;
656 SubPicture *sp;
657 AVPicture pict;
658 float aspect_ratio;
659 int width, height, x, y;
660 SDL_Rect rect;
661 int i;
663 vp = &is->pictq[is->pictq_rindex];
664 if (vp->bmp) {
665 /* XXX: use variable in the frame */
666 if (is->video_st->sample_aspect_ratio.num)
667 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
668 else if (is->video_st->codec->sample_aspect_ratio.num)
669 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
670 else
671 aspect_ratio = 0;
672 if (aspect_ratio <= 0.0)
673 aspect_ratio = 1.0;
674 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
675 /* if an active format is indicated, then it overrides the
676 mpeg format */
677 #if 0
678 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
679 is->dtg_active_format = is->video_st->codec->dtg_active_format;
680 printf("dtg_active_format=%d\n", is->dtg_active_format);
682 #endif
683 #if 0
684 switch(is->video_st->codec->dtg_active_format) {
685 case FF_DTG_AFD_SAME:
686 default:
687 /* nothing to do */
688 break;
689 case FF_DTG_AFD_4_3:
690 aspect_ratio = 4.0 / 3.0;
691 break;
692 case FF_DTG_AFD_16_9:
693 aspect_ratio = 16.0 / 9.0;
694 break;
695 case FF_DTG_AFD_14_9:
696 aspect_ratio = 14.0 / 9.0;
697 break;
698 case FF_DTG_AFD_4_3_SP_14_9:
699 aspect_ratio = 14.0 / 9.0;
700 break;
701 case FF_DTG_AFD_16_9_SP_14_9:
702 aspect_ratio = 14.0 / 9.0;
703 break;
704 case FF_DTG_AFD_SP_4_3:
705 aspect_ratio = 4.0 / 3.0;
706 break;
708 #endif
710 if (is->subtitle_st)
712 if (is->subpq_size > 0)
714 sp = &is->subpq[is->subpq_rindex];
716 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
718 SDL_LockYUVOverlay (vp->bmp);
720 pict.data[0] = vp->bmp->pixels[0];
721 pict.data[1] = vp->bmp->pixels[2];
722 pict.data[2] = vp->bmp->pixels[1];
724 pict.linesize[0] = vp->bmp->pitches[0];
725 pict.linesize[1] = vp->bmp->pitches[2];
726 pict.linesize[2] = vp->bmp->pitches[1];
728 for (i = 0; i < sp->sub.num_rects; i++)
729 blend_subrect(&pict, sp->sub.rects[i],
730 vp->bmp->w, vp->bmp->h);
732 SDL_UnlockYUVOverlay (vp->bmp);
738 /* XXX: we suppose the screen has a 1.0 pixel ratio */
739 height = is->height;
740 width = ((int)rint(height * aspect_ratio)) & ~1;
741 if (width > is->width) {
742 width = is->width;
743 height = ((int)rint(width / aspect_ratio)) & ~1;
745 x = (is->width - width) / 2;
746 y = (is->height - height) / 2;
747 if (!is->no_background) {
748 /* fill the background */
749 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
750 } else {
751 is->no_background = 0;
753 rect.x = is->xleft + x;
754 rect.y = is->ytop + y;
755 rect.w = width;
756 rect.h = height;
757 SDL_DisplayYUVOverlay(vp->bmp, &rect);
758 } else {
759 #if 0
760 fill_rectangle(screen,
761 is->xleft, is->ytop, is->width, is->height,
762 QERGB(0x00, 0x00, 0x00));
763 #endif
767 static inline int compute_mod(int a, int b)
769 a = a % b;
770 if (a >= 0)
771 return a;
772 else
773 return a + b;
776 static void video_audio_display(VideoState *s)
778 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
779 int ch, channels, h, h2, bgcolor, fgcolor;
780 int16_t time_diff;
782 /* compute display index : center on currently output samples */
783 channels = s->audio_st->codec->channels;
784 nb_display_channels = channels;
785 if (!s->paused) {
786 n = 2 * channels;
787 delay = audio_write_get_buf_size(s);
788 delay /= n;
790 /* to be more precise, we take into account the time spent since
791 the last buffer computation */
792 if (audio_callback_time) {
793 time_diff = av_gettime() - audio_callback_time;
794 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
797 delay -= s->width / 2;
798 if (delay < s->width)
799 delay = s->width;
801 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
803 h= INT_MIN;
804 for(i=0; i<1000; i+=channels){
805 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
806 int a= s->sample_array[idx];
807 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
808 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
809 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
810 int score= a-d;
811 if(h<score && (b^c)<0){
812 h= score;
813 i_start= idx;
817 s->last_i_start = i_start;
818 } else {
819 i_start = s->last_i_start;
822 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
823 fill_rectangle(screen,
824 s->xleft, s->ytop, s->width, s->height,
825 bgcolor);
827 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
829 /* total height for one channel */
830 h = s->height / nb_display_channels;
831 /* graph height / 2 */
832 h2 = (h * 9) / 20;
833 for(ch = 0;ch < nb_display_channels; ch++) {
834 i = i_start + ch;
835 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
836 for(x = 0; x < s->width; x++) {
837 y = (s->sample_array[i] * h2) >> 15;
838 if (y < 0) {
839 y = -y;
840 ys = y1 - y;
841 } else {
842 ys = y1;
844 fill_rectangle(screen,
845 s->xleft + x, ys, 1, y,
846 fgcolor);
847 i += channels;
848 if (i >= SAMPLE_ARRAY_SIZE)
849 i -= SAMPLE_ARRAY_SIZE;
853 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
855 for(ch = 1;ch < nb_display_channels; ch++) {
856 y = s->ytop + ch * h;
857 fill_rectangle(screen,
858 s->xleft, y, s->width, 1,
859 fgcolor);
861 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
864 static int video_open(VideoState *is){
865 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
866 int w,h;
868 if(is_full_screen) flags |= SDL_FULLSCREEN;
869 else flags |= SDL_RESIZABLE;
871 if (is_full_screen && fs_screen_width) {
872 w = fs_screen_width;
873 h = fs_screen_height;
874 } else if(!is_full_screen && screen_width){
875 w = screen_width;
876 h = screen_height;
877 }else if (is->video_st && is->video_st->codec->width){
878 w = is->video_st->codec->width;
879 h = is->video_st->codec->height;
880 } else {
881 w = 640;
882 h = 480;
884 #ifndef __APPLE__
885 screen = SDL_SetVideoMode(w, h, 0, flags);
886 #else
887 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
888 screen = SDL_SetVideoMode(w, h, 24, flags);
889 #endif
890 if (!screen) {
891 fprintf(stderr, "SDL: could not set video mode - exiting\n");
892 return -1;
894 SDL_WM_SetCaption("FFplay", "FFplay");
896 is->width = screen->w;
897 is->height = screen->h;
899 return 0;
902 /* display the current picture, if any */
903 static void video_display(VideoState *is)
905 if(!screen)
906 video_open(cur_stream);
907 if (is->audio_st && is->show_audio)
908 video_audio_display(is);
909 else if (is->video_st)
910 video_image_display(is);
913 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
915 SDL_Event event;
916 event.type = FF_REFRESH_EVENT;
917 event.user.data1 = opaque;
918 SDL_PushEvent(&event);
919 return 0; /* 0 means stop timer */
922 /* schedule a video refresh in 'delay' ms */
923 static void schedule_refresh(VideoState *is, int delay)
925 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
926 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
929 /* get the current audio clock value */
930 static double get_audio_clock(VideoState *is)
932 double pts;
933 int hw_buf_size, bytes_per_sec;
934 pts = is->audio_clock;
935 hw_buf_size = audio_write_get_buf_size(is);
936 bytes_per_sec = 0;
937 if (is->audio_st) {
938 bytes_per_sec = is->audio_st->codec->sample_rate *
939 2 * is->audio_st->codec->channels;
941 if (bytes_per_sec)
942 pts -= (double)hw_buf_size / bytes_per_sec;
943 return pts;
946 /* get the current video clock value */
947 static double get_video_clock(VideoState *is)
949 double delta;
950 if (is->paused) {
951 delta = 0;
952 } else {
953 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
955 return is->video_current_pts + delta;
958 /* get the current external clock value */
959 static double get_external_clock(VideoState *is)
961 int64_t ti;
962 ti = av_gettime();
963 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
966 /* get the current master clock value */
967 static double get_master_clock(VideoState *is)
969 double val;
971 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
972 if (is->video_st)
973 val = get_video_clock(is);
974 else
975 val = get_audio_clock(is);
976 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
977 if (is->audio_st)
978 val = get_audio_clock(is);
979 else
980 val = get_video_clock(is);
981 } else {
982 val = get_external_clock(is);
984 return val;
987 /* seek in the stream */
988 static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
990 if (!is->seek_req) {
991 is->seek_pos = pos;
992 is->seek_rel = rel;
993 if (seek_by_bytes)
994 is->seek_flags |= AVSEEK_FLAG_BYTE;
995 is->seek_req = 1;
999 /* pause or resume the video */
1000 static void stream_pause(VideoState *is)
1002 is->paused = !is->paused;
1003 if (!is->paused) {
1004 is->video_current_pts = get_video_clock(is);
1005 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1009 static double compute_frame_delay(double frame_current_pts, VideoState *is)
1011 double actual_delay, delay, sync_threshold, ref_clock, diff;
1013 /* compute nominal delay */
1014 delay = frame_current_pts - is->frame_last_pts;
1015 if (delay <= 0 || delay >= 10.0) {
1016 /* if incorrect delay, use previous one */
1017 delay = is->frame_last_delay;
1018 } else {
1019 is->frame_last_delay = delay;
1021 is->frame_last_pts = frame_current_pts;
1023 /* update delay to follow master synchronisation source */
1024 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1025 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1026 /* if video is slave, we try to correct big delays by
1027 duplicating or deleting a frame */
1028 ref_clock = get_master_clock(is);
1029 diff = frame_current_pts - ref_clock;
1031 /* skip or repeat frame. We take into account the
1032 delay to compute the threshold. I still don't know
1033 if it is the best guess */
1034 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1035 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1036 if (diff <= -sync_threshold)
1037 delay = 0;
1038 else if (diff >= sync_threshold)
1039 delay = 2 * delay;
1043 is->frame_timer += delay;
1044 /* compute the REAL delay (we need to do that to avoid
1045 long term errors */
1046 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1047 if (actual_delay < 0.010) {
1048 /* XXX: should skip picture */
1049 actual_delay = 0.010;
1052 #if defined(DEBUG_SYNC)
1053 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1054 delay, actual_delay, frame_current_pts, -diff);
1055 #endif
1057 return actual_delay;
1060 /* called to display each frame */
1061 static void video_refresh_timer(void *opaque)
1063 VideoState *is = opaque;
1064 VideoPicture *vp;
1066 SubPicture *sp, *sp2;
1068 if (is->video_st) {
1069 if (is->pictq_size == 0) {
1070 /* if no picture, need to wait */
1071 schedule_refresh(is, 1);
1072 } else {
1073 /* dequeue the picture */
1074 vp = &is->pictq[is->pictq_rindex];
1076 /* update current video pts */
1077 is->video_current_pts = vp->pts;
1078 is->video_current_pts_time = av_gettime();
1080 /* launch timer for next picture */
1081 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1083 if(is->subtitle_st) {
1084 if (is->subtitle_stream_changed) {
1085 SDL_LockMutex(is->subpq_mutex);
1087 while (is->subpq_size) {
1088 free_subpicture(&is->subpq[is->subpq_rindex]);
1090 /* update queue size and signal for next picture */
1091 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1092 is->subpq_rindex = 0;
1094 is->subpq_size--;
1096 is->subtitle_stream_changed = 0;
1098 SDL_CondSignal(is->subpq_cond);
1099 SDL_UnlockMutex(is->subpq_mutex);
1100 } else {
1101 if (is->subpq_size > 0) {
1102 sp = &is->subpq[is->subpq_rindex];
1104 if (is->subpq_size > 1)
1105 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1106 else
1107 sp2 = NULL;
1109 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1110 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1112 free_subpicture(sp);
1114 /* update queue size and signal for next picture */
1115 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1116 is->subpq_rindex = 0;
1118 SDL_LockMutex(is->subpq_mutex);
1119 is->subpq_size--;
1120 SDL_CondSignal(is->subpq_cond);
1121 SDL_UnlockMutex(is->subpq_mutex);
1127 /* display picture */
1128 video_display(is);
1130 /* update queue size and signal for next picture */
1131 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1132 is->pictq_rindex = 0;
1134 SDL_LockMutex(is->pictq_mutex);
1135 is->pictq_size--;
1136 SDL_CondSignal(is->pictq_cond);
1137 SDL_UnlockMutex(is->pictq_mutex);
1139 } else if (is->audio_st) {
1140 /* draw the next audio frame */
1142 schedule_refresh(is, 40);
1144 /* if only audio stream, then display the audio bars (better
1145 than nothing, just to test the implementation */
1147 /* display picture */
1148 video_display(is);
1149 } else {
1150 schedule_refresh(is, 100);
1152 if (show_status) {
1153 static int64_t last_time;
1154 int64_t cur_time;
1155 int aqsize, vqsize, sqsize;
1156 double av_diff;
1158 cur_time = av_gettime();
1159 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1160 aqsize = 0;
1161 vqsize = 0;
1162 sqsize = 0;
1163 if (is->audio_st)
1164 aqsize = is->audioq.size;
1165 if (is->video_st)
1166 vqsize = is->videoq.size;
1167 if (is->subtitle_st)
1168 sqsize = is->subtitleq.size;
1169 av_diff = 0;
1170 if (is->audio_st && is->video_st)
1171 av_diff = get_audio_clock(is) - get_video_clock(is);
1172 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1173 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1174 fflush(stdout);
1175 last_time = cur_time;
1180 /* allocate a picture (needs to do that in main thread to avoid
1181 potential locking problems */
1182 static void alloc_picture(void *opaque)
1184 VideoState *is = opaque;
1185 VideoPicture *vp;
1187 vp = &is->pictq[is->pictq_windex];
1189 if (vp->bmp)
1190 SDL_FreeYUVOverlay(vp->bmp);
1192 #if 0
1193 /* XXX: use generic function */
1194 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1195 switch(is->video_st->codec->pix_fmt) {
1196 case PIX_FMT_YUV420P:
1197 case PIX_FMT_YUV422P:
1198 case PIX_FMT_YUV444P:
1199 case PIX_FMT_YUYV422:
1200 case PIX_FMT_YUV410P:
1201 case PIX_FMT_YUV411P:
1202 is_yuv = 1;
1203 break;
1204 default:
1205 is_yuv = 0;
1206 break;
1208 #endif
1209 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1210 is->video_st->codec->height,
1211 SDL_YV12_OVERLAY,
1212 screen);
1213 vp->width = is->video_st->codec->width;
1214 vp->height = is->video_st->codec->height;
1216 SDL_LockMutex(is->pictq_mutex);
1217 vp->allocated = 1;
1218 SDL_CondSignal(is->pictq_cond);
1219 SDL_UnlockMutex(is->pictq_mutex);
1224 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1226 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1228 VideoPicture *vp;
1229 int dst_pix_fmt;
1230 AVPicture pict;
1231 static struct SwsContext *img_convert_ctx;
1233 /* wait until we have space to put a new picture */
1234 SDL_LockMutex(is->pictq_mutex);
1235 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1236 !is->videoq.abort_request) {
1237 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1239 SDL_UnlockMutex(is->pictq_mutex);
1241 if (is->videoq.abort_request)
1242 return -1;
1244 vp = &is->pictq[is->pictq_windex];
1246 /* alloc or resize hardware picture buffer */
1247 if (!vp->bmp ||
1248 vp->width != is->video_st->codec->width ||
1249 vp->height != is->video_st->codec->height) {
1250 SDL_Event event;
1252 vp->allocated = 0;
1254 /* the allocation must be done in the main thread to avoid
1255 locking problems */
1256 event.type = FF_ALLOC_EVENT;
1257 event.user.data1 = is;
1258 SDL_PushEvent(&event);
1260 /* wait until the picture is allocated */
1261 SDL_LockMutex(is->pictq_mutex);
1262 while (!vp->allocated && !is->videoq.abort_request) {
1263 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1265 SDL_UnlockMutex(is->pictq_mutex);
1267 if (is->videoq.abort_request)
1268 return -1;
1271 /* if the frame is not skipped, then display it */
1272 if (vp->bmp) {
1273 /* get a pointer on the bitmap */
1274 SDL_LockYUVOverlay (vp->bmp);
1276 dst_pix_fmt = PIX_FMT_YUV420P;
1277 pict.data[0] = vp->bmp->pixels[0];
1278 pict.data[1] = vp->bmp->pixels[2];
1279 pict.data[2] = vp->bmp->pixels[1];
1281 pict.linesize[0] = vp->bmp->pitches[0];
1282 pict.linesize[1] = vp->bmp->pitches[2];
1283 pict.linesize[2] = vp->bmp->pitches[1];
1284 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1285 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1286 is->video_st->codec->width, is->video_st->codec->height,
1287 is->video_st->codec->pix_fmt,
1288 is->video_st->codec->width, is->video_st->codec->height,
1289 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1290 if (img_convert_ctx == NULL) {
1291 fprintf(stderr, "Cannot initialize the conversion context\n");
1292 exit(1);
1294 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1295 0, is->video_st->codec->height, pict.data, pict.linesize);
1296 /* update the bitmap content */
1297 SDL_UnlockYUVOverlay(vp->bmp);
1299 vp->pts = pts;
1301 /* now we can update the picture count */
1302 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1303 is->pictq_windex = 0;
1304 SDL_LockMutex(is->pictq_mutex);
1305 is->pictq_size++;
1306 SDL_UnlockMutex(is->pictq_mutex);
1308 return 0;
1312 * compute the exact PTS for the picture if it is omitted in the stream
1313 * @param pts1 the dts of the pkt / pts of the frame
1315 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1317 double frame_delay, pts;
1319 pts = pts1;
1321 if (pts != 0) {
1322 /* update video clock with pts, if present */
1323 is->video_clock = pts;
1324 } else {
1325 pts = is->video_clock;
1327 /* update video clock for next frame */
1328 frame_delay = av_q2d(is->video_st->codec->time_base);
1329 /* for MPEG2, the frame can be repeated, so we update the
1330 clock accordingly */
1331 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1332 is->video_clock += frame_delay;
1334 #if defined(DEBUG_SYNC) && 0
1336 int ftype;
1337 if (src_frame->pict_type == FF_B_TYPE)
1338 ftype = 'B';
1339 else if (src_frame->pict_type == FF_I_TYPE)
1340 ftype = 'I';
1341 else
1342 ftype = 'P';
1343 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1344 ftype, pts, pts1);
1346 #endif
1347 return queue_picture(is, src_frame, pts);
1350 static int video_thread(void *arg)
1352 VideoState *is = arg;
1353 AVPacket pkt1, *pkt = &pkt1;
1354 int len1, got_picture;
1355 AVFrame *frame= avcodec_alloc_frame();
1356 double pts;
1358 for(;;) {
1359 while (is->paused && !is->videoq.abort_request) {
1360 SDL_Delay(10);
1362 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1363 break;
1365 if(pkt->data == flush_pkt.data){
1366 avcodec_flush_buffers(is->video_st->codec);
1367 continue;
1370 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1371 this packet, if any */
1372 is->video_st->codec->reordered_opaque= pkt->pts;
1373 len1 = avcodec_decode_video(is->video_st->codec,
1374 frame, &got_picture,
1375 pkt->data, pkt->size);
1377 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1378 && frame->reordered_opaque != AV_NOPTS_VALUE)
1379 pts= frame->reordered_opaque;
1380 else if(pkt->dts != AV_NOPTS_VALUE)
1381 pts= pkt->dts;
1382 else
1383 pts= 0;
1384 pts *= av_q2d(is->video_st->time_base);
1386 // if (len1 < 0)
1387 // break;
1388 if (got_picture) {
1389 if (output_picture2(is, frame, pts) < 0)
1390 goto the_end;
1392 av_free_packet(pkt);
1393 if (step)
1394 if (cur_stream)
1395 stream_pause(cur_stream);
1397 the_end:
1398 av_free(frame);
1399 return 0;
1402 static int subtitle_thread(void *arg)
1404 VideoState *is = arg;
1405 SubPicture *sp;
1406 AVPacket pkt1, *pkt = &pkt1;
1407 int len1, got_subtitle;
1408 double pts;
1409 int i, j;
1410 int r, g, b, y, u, v, a;
1412 for(;;) {
1413 while (is->paused && !is->subtitleq.abort_request) {
1414 SDL_Delay(10);
1416 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1417 break;
1419 if(pkt->data == flush_pkt.data){
1420 avcodec_flush_buffers(is->subtitle_st->codec);
1421 continue;
1423 SDL_LockMutex(is->subpq_mutex);
1424 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1425 !is->subtitleq.abort_request) {
1426 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1428 SDL_UnlockMutex(is->subpq_mutex);
1430 if (is->subtitleq.abort_request)
1431 goto the_end;
1433 sp = &is->subpq[is->subpq_windex];
1435 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1436 this packet, if any */
1437 pts = 0;
1438 if (pkt->pts != AV_NOPTS_VALUE)
1439 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1441 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1442 &sp->sub, &got_subtitle,
1443 pkt->data, pkt->size);
1444 // if (len1 < 0)
1445 // break;
1446 if (got_subtitle && sp->sub.format == 0) {
1447 sp->pts = pts;
1449 for (i = 0; i < sp->sub.num_rects; i++)
1451 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1453 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1454 y = RGB_TO_Y_CCIR(r, g, b);
1455 u = RGB_TO_U_CCIR(r, g, b, 0);
1456 v = RGB_TO_V_CCIR(r, g, b, 0);
1457 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1461 /* now we can update the picture count */
1462 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1463 is->subpq_windex = 0;
1464 SDL_LockMutex(is->subpq_mutex);
1465 is->subpq_size++;
1466 SDL_UnlockMutex(is->subpq_mutex);
1468 av_free_packet(pkt);
1469 // if (step)
1470 // if (cur_stream)
1471 // stream_pause(cur_stream);
1473 the_end:
1474 return 0;
1477 /* copy samples for viewing in editor window */
1478 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1480 int size, len, channels;
1482 channels = is->audio_st->codec->channels;
1484 size = samples_size / sizeof(short);
1485 while (size > 0) {
1486 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1487 if (len > size)
1488 len = size;
1489 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1490 samples += len;
1491 is->sample_array_index += len;
1492 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1493 is->sample_array_index = 0;
1494 size -= len;
1498 /* return the new audio buffer size (samples can be added or deleted
1499 to get better sync if video or external master clock) */
1500 static int synchronize_audio(VideoState *is, short *samples,
1501 int samples_size1, double pts)
1503 int n, samples_size;
1504 double ref_clock;
1506 n = 2 * is->audio_st->codec->channels;
1507 samples_size = samples_size1;
1509 /* if not master, then we try to remove or add samples to correct the clock */
1510 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1511 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1512 double diff, avg_diff;
1513 int wanted_size, min_size, max_size, nb_samples;
1515 ref_clock = get_master_clock(is);
1516 diff = get_audio_clock(is) - ref_clock;
1518 if (diff < AV_NOSYNC_THRESHOLD) {
1519 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1520 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1521 /* not enough measures to have a correct estimate */
1522 is->audio_diff_avg_count++;
1523 } else {
1524 /* estimate the A-V difference */
1525 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1527 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1528 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1529 nb_samples = samples_size / n;
1531 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1532 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1533 if (wanted_size < min_size)
1534 wanted_size = min_size;
1535 else if (wanted_size > max_size)
1536 wanted_size = max_size;
1538 /* add or remove samples to correction the synchro */
1539 if (wanted_size < samples_size) {
1540 /* remove samples */
1541 samples_size = wanted_size;
1542 } else if (wanted_size > samples_size) {
1543 uint8_t *samples_end, *q;
1544 int nb;
1546 /* add samples */
1547 nb = (samples_size - wanted_size);
1548 samples_end = (uint8_t *)samples + samples_size - n;
1549 q = samples_end + n;
1550 while (nb > 0) {
1551 memcpy(q, samples_end, n);
1552 q += n;
1553 nb -= n;
1555 samples_size = wanted_size;
1558 #if 0
1559 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1560 diff, avg_diff, samples_size - samples_size1,
1561 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1562 #endif
1564 } else {
1565 /* too big difference : may be initial PTS errors, so
1566 reset A-V filter */
1567 is->audio_diff_avg_count = 0;
1568 is->audio_diff_cum = 0;
1572 return samples_size;
1575 /* decode one audio frame and returns its uncompressed size */
1576 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1578 AVPacket *pkt = &is->audio_pkt;
1579 AVCodecContext *dec= is->audio_st->codec;
1580 int n, len1, data_size;
1581 double pts;
1583 for(;;) {
1584 /* NOTE: the audio packet can contain several frames */
1585 while (is->audio_pkt_size > 0) {
1586 data_size = sizeof(is->audio_buf1);
1587 len1 = avcodec_decode_audio2(dec,
1588 (int16_t *)is->audio_buf1, &data_size,
1589 is->audio_pkt_data, is->audio_pkt_size);
1590 if (len1 < 0) {
1591 /* if error, we skip the frame */
1592 is->audio_pkt_size = 0;
1593 break;
1596 is->audio_pkt_data += len1;
1597 is->audio_pkt_size -= len1;
1598 if (data_size <= 0)
1599 continue;
1601 if (dec->sample_fmt != is->audio_src_fmt) {
1602 if (is->reformat_ctx)
1603 av_audio_convert_free(is->reformat_ctx);
1604 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1605 dec->sample_fmt, 1, NULL, 0);
1606 if (!is->reformat_ctx) {
1607 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1608 avcodec_get_sample_fmt_name(dec->sample_fmt),
1609 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1610 break;
1612 is->audio_src_fmt= dec->sample_fmt;
1615 if (is->reformat_ctx) {
1616 const void *ibuf[6]= {is->audio_buf1};
1617 void *obuf[6]= {is->audio_buf2};
1618 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1619 int ostride[6]= {2};
1620 int len= data_size/istride[0];
1621 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1622 printf("av_audio_convert() failed\n");
1623 break;
1625 is->audio_buf= is->audio_buf2;
1626 /* FIXME: existing code assume that data_size equals framesize*channels*2
1627 remove this legacy cruft */
1628 data_size= len*2;
1629 }else{
1630 is->audio_buf= is->audio_buf1;
1633 /* if no pts, then compute it */
1634 pts = is->audio_clock;
1635 *pts_ptr = pts;
1636 n = 2 * dec->channels;
1637 is->audio_clock += (double)data_size /
1638 (double)(n * dec->sample_rate);
1639 #if defined(DEBUG_SYNC)
1641 static double last_clock;
1642 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1643 is->audio_clock - last_clock,
1644 is->audio_clock, pts);
1645 last_clock = is->audio_clock;
1647 #endif
1648 return data_size;
1651 /* free the current packet */
1652 if (pkt->data)
1653 av_free_packet(pkt);
1655 if (is->paused || is->audioq.abort_request) {
1656 return -1;
1659 /* read next packet */
1660 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1661 return -1;
1662 if(pkt->data == flush_pkt.data){
1663 avcodec_flush_buffers(dec);
1664 continue;
1667 is->audio_pkt_data = pkt->data;
1668 is->audio_pkt_size = pkt->size;
1670 /* if update the audio clock with the pts */
1671 if (pkt->pts != AV_NOPTS_VALUE) {
1672 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1677 /* get the current audio output buffer size, in samples. With SDL, we
1678 cannot have a precise information */
1679 static int audio_write_get_buf_size(VideoState *is)
1681 return is->audio_buf_size - is->audio_buf_index;
1685 /* prepare a new audio buffer */
1686 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1688 VideoState *is = opaque;
1689 int audio_size, len1;
1690 double pts;
1692 audio_callback_time = av_gettime();
1694 while (len > 0) {
1695 if (is->audio_buf_index >= is->audio_buf_size) {
1696 audio_size = audio_decode_frame(is, &pts);
1697 if (audio_size < 0) {
1698 /* if error, just output silence */
1699 is->audio_buf = is->audio_buf1;
1700 is->audio_buf_size = 1024;
1701 memset(is->audio_buf, 0, is->audio_buf_size);
1702 } else {
1703 if (is->show_audio)
1704 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1705 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1706 pts);
1707 is->audio_buf_size = audio_size;
1709 is->audio_buf_index = 0;
1711 len1 = is->audio_buf_size - is->audio_buf_index;
1712 if (len1 > len)
1713 len1 = len;
1714 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1715 len -= len1;
1716 stream += len1;
1717 is->audio_buf_index += len1;
1721 /* open a given stream. Return 0 if OK */
1722 static int stream_component_open(VideoState *is, int stream_index)
1724 AVFormatContext *ic = is->ic;
1725 AVCodecContext *enc;
1726 AVCodec *codec;
1727 SDL_AudioSpec wanted_spec, spec;
1729 if (stream_index < 0 || stream_index >= ic->nb_streams)
1730 return -1;
1731 enc = ic->streams[stream_index]->codec;
1733 /* prepare audio output */
1734 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1735 if (enc->channels > 0) {
1736 enc->request_channels = FFMIN(2, enc->channels);
1737 } else {
1738 enc->request_channels = 2;
1742 codec = avcodec_find_decoder(enc->codec_id);
1743 enc->debug_mv = debug_mv;
1744 enc->debug = debug;
1745 enc->workaround_bugs = workaround_bugs;
1746 enc->lowres = lowres;
1747 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1748 enc->idct_algo= idct;
1749 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1750 enc->skip_frame= skip_frame;
1751 enc->skip_idct= skip_idct;
1752 enc->skip_loop_filter= skip_loop_filter;
1753 enc->error_recognition= error_recognition;
1754 enc->error_concealment= error_concealment;
1756 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1758 if (!codec ||
1759 avcodec_open(enc, codec) < 0)
1760 return -1;
1762 /* prepare audio output */
1763 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1764 wanted_spec.freq = enc->sample_rate;
1765 wanted_spec.format = AUDIO_S16SYS;
1766 wanted_spec.channels = enc->channels;
1767 wanted_spec.silence = 0;
1768 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1769 wanted_spec.callback = sdl_audio_callback;
1770 wanted_spec.userdata = is;
1771 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1772 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1773 return -1;
1775 is->audio_hw_buf_size = spec.size;
1776 is->audio_src_fmt= SAMPLE_FMT_S16;
1779 if(thread_count>1)
1780 avcodec_thread_init(enc, thread_count);
1781 enc->thread_count= thread_count;
1782 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1783 switch(enc->codec_type) {
1784 case CODEC_TYPE_AUDIO:
1785 is->audio_stream = stream_index;
1786 is->audio_st = ic->streams[stream_index];
1787 is->audio_buf_size = 0;
1788 is->audio_buf_index = 0;
1790 /* init averaging filter */
1791 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1792 is->audio_diff_avg_count = 0;
1793 /* since we do not have a precise anough audio fifo fullness,
1794 we correct audio sync only if larger than this threshold */
1795 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1797 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1798 packet_queue_init(&is->audioq);
1799 SDL_PauseAudio(0);
1800 break;
1801 case CODEC_TYPE_VIDEO:
1802 is->video_stream = stream_index;
1803 is->video_st = ic->streams[stream_index];
1805 is->frame_last_delay = 40e-3;
1806 is->frame_timer = (double)av_gettime() / 1000000.0;
1807 is->video_current_pts_time = av_gettime();
1809 packet_queue_init(&is->videoq);
1810 is->video_tid = SDL_CreateThread(video_thread, is);
1811 break;
1812 case CODEC_TYPE_SUBTITLE:
1813 is->subtitle_stream = stream_index;
1814 is->subtitle_st = ic->streams[stream_index];
1815 packet_queue_init(&is->subtitleq);
1817 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1818 break;
1819 default:
1820 break;
1822 return 0;
1825 static void stream_component_close(VideoState *is, int stream_index)
1827 AVFormatContext *ic = is->ic;
1828 AVCodecContext *enc;
1830 if (stream_index < 0 || stream_index >= ic->nb_streams)
1831 return;
1832 enc = ic->streams[stream_index]->codec;
1834 switch(enc->codec_type) {
1835 case CODEC_TYPE_AUDIO:
1836 packet_queue_abort(&is->audioq);
1838 SDL_CloseAudio();
1840 packet_queue_end(&is->audioq);
1841 if (is->reformat_ctx)
1842 av_audio_convert_free(is->reformat_ctx);
1843 break;
1844 case CODEC_TYPE_VIDEO:
1845 packet_queue_abort(&is->videoq);
1847 /* note: we also signal this mutex to make sure we deblock the
1848 video thread in all cases */
1849 SDL_LockMutex(is->pictq_mutex);
1850 SDL_CondSignal(is->pictq_cond);
1851 SDL_UnlockMutex(is->pictq_mutex);
1853 SDL_WaitThread(is->video_tid, NULL);
1855 packet_queue_end(&is->videoq);
1856 break;
1857 case CODEC_TYPE_SUBTITLE:
1858 packet_queue_abort(&is->subtitleq);
1860 /* note: we also signal this mutex to make sure we deblock the
1861 video thread in all cases */
1862 SDL_LockMutex(is->subpq_mutex);
1863 is->subtitle_stream_changed = 1;
1865 SDL_CondSignal(is->subpq_cond);
1866 SDL_UnlockMutex(is->subpq_mutex);
1868 SDL_WaitThread(is->subtitle_tid, NULL);
1870 packet_queue_end(&is->subtitleq);
1871 break;
1872 default:
1873 break;
1876 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1877 avcodec_close(enc);
1878 switch(enc->codec_type) {
1879 case CODEC_TYPE_AUDIO:
1880 is->audio_st = NULL;
1881 is->audio_stream = -1;
1882 break;
1883 case CODEC_TYPE_VIDEO:
1884 is->video_st = NULL;
1885 is->video_stream = -1;
1886 break;
1887 case CODEC_TYPE_SUBTITLE:
1888 is->subtitle_st = NULL;
1889 is->subtitle_stream = -1;
1890 break;
1891 default:
1892 break;
1896 static void dump_stream_info(const AVFormatContext *s)
1898 AVMetadataTag *tag = NULL;
1899 while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1900 fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1903 /* since we have only one decoding thread, we can use a global
1904 variable instead of a thread local variable */
1905 static VideoState *global_video_state;
1907 static int decode_interrupt_cb(void)
1909 return (global_video_state && global_video_state->abort_request);
1912 /* this thread gets the stream from the disk or the network */
1913 static int decode_thread(void *arg)
1915 VideoState *is = arg;
1916 AVFormatContext *ic;
1917 int err, i, ret, video_index, audio_index, subtitle_index;
1918 AVPacket pkt1, *pkt = &pkt1;
1919 AVFormatParameters params, *ap = &params;
1921 video_index = -1;
1922 audio_index = -1;
1923 subtitle_index = -1;
1924 is->video_stream = -1;
1925 is->audio_stream = -1;
1926 is->subtitle_stream = -1;
1928 global_video_state = is;
1929 url_set_interrupt_cb(decode_interrupt_cb);
1931 memset(ap, 0, sizeof(*ap));
1933 ap->width = frame_width;
1934 ap->height= frame_height;
1935 ap->time_base= (AVRational){1, 25};
1936 ap->pix_fmt = frame_pix_fmt;
1938 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1939 if (err < 0) {
1940 print_error(is->filename, err);
1941 ret = -1;
1942 goto fail;
1944 is->ic = ic;
1946 if(genpts)
1947 ic->flags |= AVFMT_FLAG_GENPTS;
1949 err = av_find_stream_info(ic);
1950 if (err < 0) {
1951 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1952 ret = -1;
1953 goto fail;
1955 if(ic->pb)
1956 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1958 /* if seeking requested, we execute it */
1959 if (start_time != AV_NOPTS_VALUE) {
1960 int64_t timestamp;
1962 timestamp = start_time;
1963 /* add the stream start time */
1964 if (ic->start_time != AV_NOPTS_VALUE)
1965 timestamp += ic->start_time;
1966 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1967 if (ret < 0) {
1968 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1969 is->filename, (double)timestamp / AV_TIME_BASE);
1973 for(i = 0; i < ic->nb_streams; i++) {
1974 AVCodecContext *enc = ic->streams[i]->codec;
1975 ic->streams[i]->discard = AVDISCARD_ALL;
1976 switch(enc->codec_type) {
1977 case CODEC_TYPE_AUDIO:
1978 if (wanted_audio_stream-- >= 0 && !audio_disable)
1979 audio_index = i;
1980 break;
1981 case CODEC_TYPE_VIDEO:
1982 if (wanted_video_stream-- >= 0 && !video_disable)
1983 video_index = i;
1984 break;
1985 case CODEC_TYPE_SUBTITLE:
1986 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1987 subtitle_index = i;
1988 break;
1989 default:
1990 break;
1993 if (show_status) {
1994 dump_format(ic, 0, is->filename, 0);
1995 dump_stream_info(ic);
1998 /* open the streams */
1999 if (audio_index >= 0) {
2000 stream_component_open(is, audio_index);
2003 if (video_index >= 0) {
2004 stream_component_open(is, video_index);
2005 } else {
2006 if (!display_disable)
2007 is->show_audio = 1;
2010 if (subtitle_index >= 0) {
2011 stream_component_open(is, subtitle_index);
2014 if (is->video_stream < 0 && is->audio_stream < 0) {
2015 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2016 ret = -1;
2017 goto fail;
2020 for(;;) {
2021 if (is->abort_request)
2022 break;
2023 if (is->paused != is->last_paused) {
2024 is->last_paused = is->paused;
2025 if (is->paused)
2026 av_read_pause(ic);
2027 else
2028 av_read_play(ic);
2030 #if CONFIG_RTSP_DEMUXER
2031 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2032 /* wait 10 ms to avoid trying to get another packet */
2033 /* XXX: horrible */
2034 SDL_Delay(10);
2035 continue;
2037 #endif
2038 if (is->seek_req) {
2039 int64_t seek_target= is->seek_pos;
2040 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2041 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2042 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2043 // of the seek_pos/seek_rel variables
2045 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2046 if (ret < 0) {
2047 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2048 }else{
2049 if (is->audio_stream >= 0) {
2050 packet_queue_flush(&is->audioq);
2051 packet_queue_put(&is->audioq, &flush_pkt);
2053 if (is->subtitle_stream >= 0) {
2054 packet_queue_flush(&is->subtitleq);
2055 packet_queue_put(&is->subtitleq, &flush_pkt);
2057 if (is->video_stream >= 0) {
2058 packet_queue_flush(&is->videoq);
2059 packet_queue_put(&is->videoq, &flush_pkt);
2062 is->seek_req = 0;
2065 /* if the queue are full, no need to read more */
2066 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2067 is->videoq.size > MAX_VIDEOQ_SIZE ||
2068 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2069 /* wait 10 ms */
2070 SDL_Delay(10);
2071 continue;
2073 if(url_feof(ic->pb)) {
2074 av_init_packet(pkt);
2075 pkt->data=NULL;
2076 pkt->size=0;
2077 pkt->stream_index= is->video_stream;
2078 packet_queue_put(&is->videoq, pkt);
2079 continue;
2081 ret = av_read_frame(ic, pkt);
2082 if (ret < 0) {
2083 if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) {
2084 SDL_Delay(100); /* wait for user event */
2085 continue;
2086 } else
2087 break;
2089 if (pkt->stream_index == is->audio_stream) {
2090 packet_queue_put(&is->audioq, pkt);
2091 } else if (pkt->stream_index == is->video_stream) {
2092 packet_queue_put(&is->videoq, pkt);
2093 } else if (pkt->stream_index == is->subtitle_stream) {
2094 packet_queue_put(&is->subtitleq, pkt);
2095 } else {
2096 av_free_packet(pkt);
2099 /* wait until the end */
2100 while (!is->abort_request) {
2101 SDL_Delay(100);
2104 ret = 0;
2105 fail:
2106 /* disable interrupting */
2107 global_video_state = NULL;
2109 /* close each stream */
2110 if (is->audio_stream >= 0)
2111 stream_component_close(is, is->audio_stream);
2112 if (is->video_stream >= 0)
2113 stream_component_close(is, is->video_stream);
2114 if (is->subtitle_stream >= 0)
2115 stream_component_close(is, is->subtitle_stream);
2116 if (is->ic) {
2117 av_close_input_file(is->ic);
2118 is->ic = NULL; /* safety */
2120 url_set_interrupt_cb(NULL);
2122 if (ret != 0) {
2123 SDL_Event event;
2125 event.type = FF_QUIT_EVENT;
2126 event.user.data1 = is;
2127 SDL_PushEvent(&event);
2129 return 0;
2132 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2134 VideoState *is;
2136 is = av_mallocz(sizeof(VideoState));
2137 if (!is)
2138 return NULL;
2139 av_strlcpy(is->filename, filename, sizeof(is->filename));
2140 is->iformat = iformat;
2141 is->ytop = 0;
2142 is->xleft = 0;
2144 /* start video display */
2145 is->pictq_mutex = SDL_CreateMutex();
2146 is->pictq_cond = SDL_CreateCond();
2148 is->subpq_mutex = SDL_CreateMutex();
2149 is->subpq_cond = SDL_CreateCond();
2151 /* add the refresh timer to draw the picture */
2152 schedule_refresh(is, 40);
2154 is->av_sync_type = av_sync_type;
2155 is->parse_tid = SDL_CreateThread(decode_thread, is);
2156 if (!is->parse_tid) {
2157 av_free(is);
2158 return NULL;
2160 return is;
2163 static void stream_close(VideoState *is)
2165 VideoPicture *vp;
2166 int i;
2167 /* XXX: use a special url_shutdown call to abort parse cleanly */
2168 is->abort_request = 1;
2169 SDL_WaitThread(is->parse_tid, NULL);
2171 /* free all pictures */
2172 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2173 vp = &is->pictq[i];
2174 if (vp->bmp) {
2175 SDL_FreeYUVOverlay(vp->bmp);
2176 vp->bmp = NULL;
2179 SDL_DestroyMutex(is->pictq_mutex);
2180 SDL_DestroyCond(is->pictq_cond);
2181 SDL_DestroyMutex(is->subpq_mutex);
2182 SDL_DestroyCond(is->subpq_cond);
2185 static void stream_cycle_channel(VideoState *is, int codec_type)
2187 AVFormatContext *ic = is->ic;
2188 int start_index, stream_index;
2189 AVStream *st;
2191 if (codec_type == CODEC_TYPE_VIDEO)
2192 start_index = is->video_stream;
2193 else if (codec_type == CODEC_TYPE_AUDIO)
2194 start_index = is->audio_stream;
2195 else
2196 start_index = is->subtitle_stream;
2197 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2198 return;
2199 stream_index = start_index;
2200 for(;;) {
2201 if (++stream_index >= is->ic->nb_streams)
2203 if (codec_type == CODEC_TYPE_SUBTITLE)
2205 stream_index = -1;
2206 goto the_end;
2207 } else
2208 stream_index = 0;
2210 if (stream_index == start_index)
2211 return;
2212 st = ic->streams[stream_index];
2213 if (st->codec->codec_type == codec_type) {
2214 /* check that parameters are OK */
2215 switch(codec_type) {
2216 case CODEC_TYPE_AUDIO:
2217 if (st->codec->sample_rate != 0 &&
2218 st->codec->channels != 0)
2219 goto the_end;
2220 break;
2221 case CODEC_TYPE_VIDEO:
2222 case CODEC_TYPE_SUBTITLE:
2223 goto the_end;
2224 default:
2225 break;
2229 the_end:
2230 stream_component_close(is, start_index);
2231 stream_component_open(is, stream_index);
2235 static void toggle_full_screen(void)
2237 is_full_screen = !is_full_screen;
2238 if (!fs_screen_width) {
2239 /* use default SDL method */
2240 // SDL_WM_ToggleFullScreen(screen);
2242 video_open(cur_stream);
2245 static void toggle_pause(void)
2247 if (cur_stream)
2248 stream_pause(cur_stream);
2249 step = 0;
2252 static void step_to_next_frame(void)
2254 if (cur_stream) {
2255 /* if the stream is paused unpause it, then step */
2256 if (cur_stream->paused)
2257 stream_pause(cur_stream);
2259 step = 1;
2262 static void do_exit(void)
2264 if (cur_stream) {
2265 stream_close(cur_stream);
2266 cur_stream = NULL;
2268 if (show_status)
2269 printf("\n");
2270 SDL_Quit();
2271 exit(0);
2274 static void toggle_audio_display(void)
2276 if (cur_stream) {
2277 cur_stream->show_audio = !cur_stream->show_audio;
2281 /* handle an event sent by the GUI */
2282 static void event_loop(void)
2284 SDL_Event event;
2285 double incr, pos, frac;
2287 for(;;) {
2288 SDL_WaitEvent(&event);
2289 switch(event.type) {
2290 case SDL_KEYDOWN:
2291 switch(event.key.keysym.sym) {
2292 case SDLK_ESCAPE:
2293 case SDLK_q:
2294 do_exit();
2295 break;
2296 case SDLK_f:
2297 toggle_full_screen();
2298 break;
2299 case SDLK_p:
2300 case SDLK_SPACE:
2301 toggle_pause();
2302 break;
2303 case SDLK_s: //S: Step to next frame
2304 step_to_next_frame();
2305 break;
2306 case SDLK_a:
2307 if (cur_stream)
2308 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2309 break;
2310 case SDLK_v:
2311 if (cur_stream)
2312 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2313 break;
2314 case SDLK_t:
2315 if (cur_stream)
2316 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2317 break;
2318 case SDLK_w:
2319 toggle_audio_display();
2320 break;
2321 case SDLK_LEFT:
2322 incr = -10.0;
2323 goto do_seek;
2324 case SDLK_RIGHT:
2325 incr = 10.0;
2326 goto do_seek;
2327 case SDLK_UP:
2328 incr = 60.0;
2329 goto do_seek;
2330 case SDLK_DOWN:
2331 incr = -60.0;
2332 do_seek:
2333 if (cur_stream) {
2334 if (seek_by_bytes) {
2335 pos = url_ftell(cur_stream->ic->pb);
2336 if (cur_stream->ic->bit_rate)
2337 incr *= cur_stream->ic->bit_rate / 60.0;
2338 else
2339 incr *= 180000.0;
2340 pos += incr;
2341 stream_seek(cur_stream, pos, incr);
2342 } else {
2343 pos = get_master_clock(cur_stream);
2344 pos += incr;
2345 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2348 break;
2349 default:
2350 break;
2352 break;
2353 case SDL_MOUSEBUTTONDOWN:
2354 if (cur_stream) {
2355 int ns, hh, mm, ss;
2356 int tns, thh, tmm, tss;
2357 tns = cur_stream->ic->duration/1000000LL;
2358 thh = tns/3600;
2359 tmm = (tns%3600)/60;
2360 tss = (tns%60);
2361 frac = (double)event.button.x/(double)cur_stream->width;
2362 ns = frac*tns;
2363 hh = ns/3600;
2364 mm = (ns%3600)/60;
2365 ss = (ns%60);
2366 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2367 hh, mm, ss, thh, tmm, tss);
2368 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2370 break;
2371 case SDL_VIDEORESIZE:
2372 if (cur_stream) {
2373 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2374 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2375 screen_width = cur_stream->width = event.resize.w;
2376 screen_height= cur_stream->height= event.resize.h;
2378 break;
2379 case SDL_QUIT:
2380 case FF_QUIT_EVENT:
2381 do_exit();
2382 break;
2383 case FF_ALLOC_EVENT:
2384 video_open(event.user.data1);
2385 alloc_picture(event.user.data1);
2386 break;
2387 case FF_REFRESH_EVENT:
2388 video_refresh_timer(event.user.data1);
2389 break;
2390 default:
2391 break;
2396 static void opt_frame_size(const char *arg)
2398 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2399 fprintf(stderr, "Incorrect frame size\n");
2400 exit(1);
2402 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2403 fprintf(stderr, "Frame size must be a multiple of 2\n");
2404 exit(1);
2408 static int opt_width(const char *opt, const char *arg)
2410 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2411 return 0;
2414 static int opt_height(const char *opt, const char *arg)
2416 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2417 return 0;
2420 static void opt_format(const char *arg)
2422 file_iformat = av_find_input_format(arg);
2423 if (!file_iformat) {
2424 fprintf(stderr, "Unknown input format: %s\n", arg);
2425 exit(1);
2429 static void opt_frame_pix_fmt(const char *arg)
2431 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2434 static int opt_sync(const char *opt, const char *arg)
2436 if (!strcmp(arg, "audio"))
2437 av_sync_type = AV_SYNC_AUDIO_MASTER;
2438 else if (!strcmp(arg, "video"))
2439 av_sync_type = AV_SYNC_VIDEO_MASTER;
2440 else if (!strcmp(arg, "ext"))
2441 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2442 else {
2443 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2444 exit(1);
2446 return 0;
2449 static int opt_seek(const char *opt, const char *arg)
2451 start_time = parse_time_or_die(opt, arg, 1);
2452 return 0;
2455 static int opt_debug(const char *opt, const char *arg)
2457 av_log_set_level(99);
2458 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2459 return 0;
2462 static int opt_vismv(const char *opt, const char *arg)
2464 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2465 return 0;
2468 static int opt_thread_count(const char *opt, const char *arg)
2470 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2471 #if !HAVE_THREADS
2472 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2473 #endif
2474 return 0;
2477 static const OptionDef options[] = {
2478 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2479 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2480 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2481 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2482 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2483 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2484 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2485 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2486 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2487 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2488 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2489 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2490 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2491 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2492 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2493 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2494 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2495 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2496 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2497 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2498 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2499 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2500 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2501 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2502 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2503 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2504 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2505 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2506 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2507 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2508 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2509 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2510 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2511 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2512 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2513 { NULL, },
2516 static void show_help(void)
2518 printf("usage: ffplay [options] input_file\n"
2519 "Simple media player\n");
2520 printf("\n");
2521 show_help_options(options, "Main options:\n",
2522 OPT_EXPERT, 0);
2523 show_help_options(options, "\nAdvanced options:\n",
2524 OPT_EXPERT, OPT_EXPERT);
2525 printf("\nWhile playing:\n"
2526 "q, ESC quit\n"
2527 "f toggle full screen\n"
2528 "p, SPC pause\n"
2529 "a cycle audio channel\n"
2530 "v cycle video channel\n"
2531 "t cycle subtitle channel\n"
2532 "w show audio waves\n"
2533 "left/right seek backward/forward 10 seconds\n"
2534 "down/up seek backward/forward 1 minute\n"
2535 "mouse click seek to percentage in file corresponding to fraction of width\n"
2539 static void opt_input_file(const char *filename)
2541 if (!strcmp(filename, "-"))
2542 filename = "pipe:";
2543 input_filename = filename;
2546 /* Called from the main */
2547 int main(int argc, char **argv)
2549 int flags, i;
2551 /* register all codecs, demux and protocols */
2552 avcodec_register_all();
2553 avdevice_register_all();
2554 av_register_all();
2556 for(i=0; i<CODEC_TYPE_NB; i++){
2557 avcodec_opts[i]= avcodec_alloc_context2(i);
2559 avformat_opts = avformat_alloc_context();
2560 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2562 show_banner();
2564 parse_options(argc, argv, options, opt_input_file);
2566 if (!input_filename) {
2567 fprintf(stderr, "An input file must be specified\n");
2568 exit(1);
2571 if (display_disable) {
2572 video_disable = 1;
2574 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2575 #if !defined(__MINGW32__) && !defined(__APPLE__)
2576 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2577 #endif
2578 if (SDL_Init (flags)) {
2579 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2580 exit(1);
2583 if (!display_disable) {
2584 #if HAVE_SDL_VIDEO_SIZE
2585 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2586 fs_screen_width = vi->current_w;
2587 fs_screen_height = vi->current_h;
2588 #endif
2591 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2592 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2593 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2594 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2596 av_init_packet(&flush_pkt);
2597 flush_pkt.data= "FLUSH";
2599 cur_stream = stream_open(input_filename, file_iformat);
2601 event_loop();
2603 /* never returns */
2605 return 0;