ac3dec: Remove unneeded context variable, num_cpl_subbands. It is also
[FFMpeg-mirror/lagarith.git] / ffplay.c
blobcc12b4a97dd31e23619b65e4b3df7c36a13faa5c
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 int64_t seek_rel;
114 AVFormatContext *ic;
115 int dtg_active_format;
117 int audio_stream;
119 int av_sync_type;
120 double external_clock; /* external clock base */
121 int64_t external_clock_time;
123 double audio_clock;
124 double audio_diff_cum; /* used for AV difference average computation */
125 double audio_diff_avg_coef;
126 double audio_diff_threshold;
127 int audio_diff_avg_count;
128 AVStream *audio_st;
129 PacketQueue audioq;
130 int audio_hw_buf_size;
131 /* samples output by the codec. we reserve more space for avsync
132 compensation */
133 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135 uint8_t *audio_buf;
136 unsigned int audio_buf_size; /* in bytes */
137 int audio_buf_index; /* in bytes */
138 AVPacket audio_pkt_temp;
139 AVPacket audio_pkt;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int wanted_subtitle_stream= -1;
195 static int seek_by_bytes;
196 static int display_disable;
197 static int show_status;
198 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
199 static int64_t start_time = AV_NOPTS_VALUE;
200 static int debug = 0;
201 static int debug_mv = 0;
202 static int step = 0;
203 static int thread_count = 1;
204 static int workaround_bugs = 1;
205 static int fast = 0;
206 static int genpts = 0;
207 static int lowres = 0;
208 static int idct = FF_IDCT_AUTO;
209 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
211 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
212 static int error_recognition = FF_ER_CAREFUL;
213 static int error_concealment = 3;
214 static int decoder_reorder_pts= 0;
216 /* current context */
217 static int is_full_screen;
218 static VideoState *cur_stream;
219 static int64_t audio_callback_time;
221 static AVPacket flush_pkt;
223 #define FF_ALLOC_EVENT (SDL_USEREVENT)
224 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
225 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
227 static SDL_Surface *screen;
229 /* packet queue handling */
230 static void packet_queue_init(PacketQueue *q)
232 memset(q, 0, sizeof(PacketQueue));
233 q->mutex = SDL_CreateMutex();
234 q->cond = SDL_CreateCond();
237 static void packet_queue_flush(PacketQueue *q)
239 AVPacketList *pkt, *pkt1;
241 SDL_LockMutex(q->mutex);
242 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
243 pkt1 = pkt->next;
244 av_free_packet(&pkt->pkt);
245 av_freep(&pkt);
247 q->last_pkt = NULL;
248 q->first_pkt = NULL;
249 q->nb_packets = 0;
250 q->size = 0;
251 SDL_UnlockMutex(q->mutex);
254 static void packet_queue_end(PacketQueue *q)
256 packet_queue_flush(q);
257 SDL_DestroyMutex(q->mutex);
258 SDL_DestroyCond(q->cond);
261 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
263 AVPacketList *pkt1;
265 /* duplicate the packet */
266 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
267 return -1;
269 pkt1 = av_malloc(sizeof(AVPacketList));
270 if (!pkt1)
271 return -1;
272 pkt1->pkt = *pkt;
273 pkt1->next = NULL;
276 SDL_LockMutex(q->mutex);
278 if (!q->last_pkt)
280 q->first_pkt = pkt1;
281 else
282 q->last_pkt->next = pkt1;
283 q->last_pkt = pkt1;
284 q->nb_packets++;
285 q->size += pkt1->pkt.size + sizeof(*pkt1);
286 /* XXX: should duplicate packet data in DV case */
287 SDL_CondSignal(q->cond);
289 SDL_UnlockMutex(q->mutex);
290 return 0;
293 static void packet_queue_abort(PacketQueue *q)
295 SDL_LockMutex(q->mutex);
297 q->abort_request = 1;
299 SDL_CondSignal(q->cond);
301 SDL_UnlockMutex(q->mutex);
304 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
305 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
307 AVPacketList *pkt1;
308 int ret;
310 SDL_LockMutex(q->mutex);
312 for(;;) {
313 if (q->abort_request) {
314 ret = -1;
315 break;
318 pkt1 = q->first_pkt;
319 if (pkt1) {
320 q->first_pkt = pkt1->next;
321 if (!q->first_pkt)
322 q->last_pkt = NULL;
323 q->nb_packets--;
324 q->size -= pkt1->pkt.size + sizeof(*pkt1);
325 *pkt = pkt1->pkt;
326 av_free(pkt1);
327 ret = 1;
328 break;
329 } else if (!block) {
330 ret = 0;
331 break;
332 } else {
333 SDL_CondWait(q->cond, q->mutex);
336 SDL_UnlockMutex(q->mutex);
337 return ret;
340 static inline void fill_rectangle(SDL_Surface *screen,
341 int x, int y, int w, int h, int color)
343 SDL_Rect rect;
344 rect.x = x;
345 rect.y = y;
346 rect.w = w;
347 rect.h = h;
348 SDL_FillRect(screen, &rect, color);
351 #if 0
352 /* draw only the border of a rectangle */
353 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
355 int w1, w2, h1, h2;
357 /* fill the background */
358 w1 = x;
359 if (w1 < 0)
360 w1 = 0;
361 w2 = s->width - (x + w);
362 if (w2 < 0)
363 w2 = 0;
364 h1 = y;
365 if (h1 < 0)
366 h1 = 0;
367 h2 = s->height - (y + h);
368 if (h2 < 0)
369 h2 = 0;
370 fill_rectangle(screen,
371 s->xleft, s->ytop,
372 w1, s->height,
373 color);
374 fill_rectangle(screen,
375 s->xleft + s->width - w2, s->ytop,
376 w2, s->height,
377 color);
378 fill_rectangle(screen,
379 s->xleft + w1, s->ytop,
380 s->width - w1 - w2, h1,
381 color);
382 fill_rectangle(screen,
383 s->xleft + w1, s->ytop + s->height - h2,
384 s->width - w1 - w2, h2,
385 color);
387 #endif
391 #define SCALEBITS 10
392 #define ONE_HALF (1 << (SCALEBITS - 1))
393 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
395 #define RGB_TO_Y_CCIR(r, g, b) \
396 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
397 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
399 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
400 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
401 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
403 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
404 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
405 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
410 #define RGBA_IN(r, g, b, a, s)\
412 unsigned int v = ((const uint32_t *)(s))[0];\
413 a = (v >> 24) & 0xff;\
414 r = (v >> 16) & 0xff;\
415 g = (v >> 8) & 0xff;\
416 b = v & 0xff;\
419 #define YUVA_IN(y, u, v, a, s, pal)\
421 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422 a = (val >> 24) & 0xff;\
423 y = (val >> 16) & 0xff;\
424 u = (val >> 8) & 0xff;\
425 v = val & 0xff;\
428 #define YUVA_OUT(d, y, u, v, a)\
430 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 #define BPP 1
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
438 int wrap, wrap3, width2, skip2;
439 int y, u, v, a, u1, v1, a1, w, h;
440 uint8_t *lum, *cb, *cr;
441 const uint8_t *p;
442 const uint32_t *pal;
443 int dstx, dsty, dstw, dsth;
445 dstw = av_clip(rect->w, 0, imgw);
446 dsth = av_clip(rect->h, 0, imgh);
447 dstx = av_clip(rect->x, 0, imgw - dstw);
448 dsty = av_clip(rect->y, 0, imgh - dsth);
449 lum = dst->data[0] + dsty * dst->linesize[0];
450 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
453 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454 skip2 = dstx >> 1;
455 wrap = dst->linesize[0];
456 wrap3 = rect->pict.linesize[0];
457 p = rect->pict.data[0];
458 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
460 if (dsty & 1) {
461 lum += dstx;
462 cb += skip2;
463 cr += skip2;
465 if (dstx & 1) {
466 YUVA_IN(y, u, v, a, p, pal);
467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470 cb++;
471 cr++;
472 lum++;
473 p += BPP;
475 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476 YUVA_IN(y, u, v, a, p, pal);
477 u1 = u;
478 v1 = v;
479 a1 = a;
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482 YUVA_IN(y, u, v, a, p + BPP, pal);
483 u1 += u;
484 v1 += v;
485 a1 += a;
486 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489 cb++;
490 cr++;
491 p += 2 * BPP;
492 lum += 2;
494 if (w) {
495 YUVA_IN(y, u, v, a, p, pal);
496 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499 p++;
500 lum++;
502 p += wrap3 - dstw * BPP;
503 lum += wrap - dstw - dstx;
504 cb += dst->linesize[1] - width2 - skip2;
505 cr += dst->linesize[2] - width2 - skip2;
507 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508 lum += dstx;
509 cb += skip2;
510 cr += skip2;
512 if (dstx & 1) {
513 YUVA_IN(y, u, v, a, p, pal);
514 u1 = u;
515 v1 = v;
516 a1 = a;
517 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518 p += wrap3;
519 lum += wrap;
520 YUVA_IN(y, u, v, a, p, pal);
521 u1 += u;
522 v1 += v;
523 a1 += a;
524 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527 cb++;
528 cr++;
529 p += -wrap3 + BPP;
530 lum += -wrap + 1;
532 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533 YUVA_IN(y, u, v, a, p, pal);
534 u1 = u;
535 v1 = v;
536 a1 = a;
537 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539 YUVA_IN(y, u, v, a, p + BPP, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544 p += wrap3;
545 lum += wrap;
547 YUVA_IN(y, u, v, a, p, pal);
548 u1 += u;
549 v1 += v;
550 a1 += a;
551 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553 YUVA_IN(y, u, v, a, p + BPP, pal);
554 u1 += u;
555 v1 += v;
556 a1 += a;
557 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
559 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
562 cb++;
563 cr++;
564 p += -wrap3 + 2 * BPP;
565 lum += -wrap + 2;
567 if (w) {
568 YUVA_IN(y, u, v, a, p, pal);
569 u1 = u;
570 v1 = v;
571 a1 = a;
572 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573 p += wrap3;
574 lum += wrap;
575 YUVA_IN(y, u, v, a, p, pal);
576 u1 += u;
577 v1 += v;
578 a1 += a;
579 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582 cb++;
583 cr++;
584 p += -wrap3 + BPP;
585 lum += -wrap + 1;
587 p += wrap3 + (wrap3 - dstw * BPP);
588 lum += wrap + (wrap - dstw - dstx);
589 cb += dst->linesize[1] - width2 - skip2;
590 cr += dst->linesize[2] - width2 - skip2;
592 /* handle odd height */
593 if (h) {
594 lum += dstx;
595 cb += skip2;
596 cr += skip2;
598 if (dstx & 1) {
599 YUVA_IN(y, u, v, a, p, pal);
600 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603 cb++;
604 cr++;
605 lum++;
606 p += BPP;
608 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609 YUVA_IN(y, u, v, a, p, pal);
610 u1 = u;
611 v1 = v;
612 a1 = a;
613 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 YUVA_IN(y, u, v, a, p + BPP, pal);
616 u1 += u;
617 v1 += v;
618 a1 += a;
619 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622 cb++;
623 cr++;
624 p += 2 * BPP;
625 lum += 2;
627 if (w) {
628 YUVA_IN(y, u, v, a, p, pal);
629 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636 static void free_subpicture(SubPicture *sp)
638 int i;
640 for (i = 0; i < sp->sub.num_rects; i++)
642 av_freep(&sp->sub.rects[i]->pict.data[0]);
643 av_freep(&sp->sub.rects[i]->pict.data[1]);
644 av_freep(&sp->sub.rects[i]);
647 av_free(sp->sub.rects);
649 memset(&sp->sub, 0, sizeof(AVSubtitle));
652 static void video_image_display(VideoState *is)
654 VideoPicture *vp;
655 SubPicture *sp;
656 AVPicture pict;
657 float aspect_ratio;
658 int width, height, x, y;
659 SDL_Rect rect;
660 int i;
662 vp = &is->pictq[is->pictq_rindex];
663 if (vp->bmp) {
664 /* XXX: use variable in the frame */
665 if (is->video_st->sample_aspect_ratio.num)
666 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667 else if (is->video_st->codec->sample_aspect_ratio.num)
668 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669 else
670 aspect_ratio = 0;
671 if (aspect_ratio <= 0.0)
672 aspect_ratio = 1.0;
673 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
674 /* if an active format is indicated, then it overrides the
675 mpeg format */
676 #if 0
677 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
678 is->dtg_active_format = is->video_st->codec->dtg_active_format;
679 printf("dtg_active_format=%d\n", is->dtg_active_format);
681 #endif
682 #if 0
683 switch(is->video_st->codec->dtg_active_format) {
684 case FF_DTG_AFD_SAME:
685 default:
686 /* nothing to do */
687 break;
688 case FF_DTG_AFD_4_3:
689 aspect_ratio = 4.0 / 3.0;
690 break;
691 case FF_DTG_AFD_16_9:
692 aspect_ratio = 16.0 / 9.0;
693 break;
694 case FF_DTG_AFD_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_4_3_SP_14_9:
698 aspect_ratio = 14.0 / 9.0;
699 break;
700 case FF_DTG_AFD_16_9_SP_14_9:
701 aspect_ratio = 14.0 / 9.0;
702 break;
703 case FF_DTG_AFD_SP_4_3:
704 aspect_ratio = 4.0 / 3.0;
705 break;
707 #endif
709 if (is->subtitle_st)
711 if (is->subpq_size > 0)
713 sp = &is->subpq[is->subpq_rindex];
715 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
717 SDL_LockYUVOverlay (vp->bmp);
719 pict.data[0] = vp->bmp->pixels[0];
720 pict.data[1] = vp->bmp->pixels[2];
721 pict.data[2] = vp->bmp->pixels[1];
723 pict.linesize[0] = vp->bmp->pitches[0];
724 pict.linesize[1] = vp->bmp->pitches[2];
725 pict.linesize[2] = vp->bmp->pitches[1];
727 for (i = 0; i < sp->sub.num_rects; i++)
728 blend_subrect(&pict, sp->sub.rects[i],
729 vp->bmp->w, vp->bmp->h);
731 SDL_UnlockYUVOverlay (vp->bmp);
737 /* XXX: we suppose the screen has a 1.0 pixel ratio */
738 height = is->height;
739 width = ((int)rint(height * aspect_ratio)) & ~1;
740 if (width > is->width) {
741 width = is->width;
742 height = ((int)rint(width / aspect_ratio)) & ~1;
744 x = (is->width - width) / 2;
745 y = (is->height - height) / 2;
746 if (!is->no_background) {
747 /* fill the background */
748 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749 } else {
750 is->no_background = 0;
752 rect.x = is->xleft + x;
753 rect.y = is->ytop + y;
754 rect.w = width;
755 rect.h = height;
756 SDL_DisplayYUVOverlay(vp->bmp, &rect);
757 } else {
758 #if 0
759 fill_rectangle(screen,
760 is->xleft, is->ytop, is->width, is->height,
761 QERGB(0x00, 0x00, 0x00));
762 #endif
766 static inline int compute_mod(int a, int b)
768 a = a % b;
769 if (a >= 0)
770 return a;
771 else
772 return a + b;
775 static void video_audio_display(VideoState *s)
777 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
778 int ch, channels, h, h2, bgcolor, fgcolor;
779 int16_t time_diff;
781 /* compute display index : center on currently output samples */
782 channels = s->audio_st->codec->channels;
783 nb_display_channels = channels;
784 if (!s->paused) {
785 n = 2 * channels;
786 delay = audio_write_get_buf_size(s);
787 delay /= n;
789 /* to be more precise, we take into account the time spent since
790 the last buffer computation */
791 if (audio_callback_time) {
792 time_diff = av_gettime() - audio_callback_time;
793 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
796 delay -= s->width / 2;
797 if (delay < s->width)
798 delay = s->width;
800 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
802 h= INT_MIN;
803 for(i=0; i<1000; i+=channels){
804 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
805 int a= s->sample_array[idx];
806 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
807 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
808 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
809 int score= a-d;
810 if(h<score && (b^c)<0){
811 h= score;
812 i_start= idx;
816 s->last_i_start = i_start;
817 } else {
818 i_start = s->last_i_start;
821 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
822 fill_rectangle(screen,
823 s->xleft, s->ytop, s->width, s->height,
824 bgcolor);
826 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
828 /* total height for one channel */
829 h = s->height / nb_display_channels;
830 /* graph height / 2 */
831 h2 = (h * 9) / 20;
832 for(ch = 0;ch < nb_display_channels; ch++) {
833 i = i_start + ch;
834 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
835 for(x = 0; x < s->width; x++) {
836 y = (s->sample_array[i] * h2) >> 15;
837 if (y < 0) {
838 y = -y;
839 ys = y1 - y;
840 } else {
841 ys = y1;
843 fill_rectangle(screen,
844 s->xleft + x, ys, 1, y,
845 fgcolor);
846 i += channels;
847 if (i >= SAMPLE_ARRAY_SIZE)
848 i -= SAMPLE_ARRAY_SIZE;
852 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
854 for(ch = 1;ch < nb_display_channels; ch++) {
855 y = s->ytop + ch * h;
856 fill_rectangle(screen,
857 s->xleft, y, s->width, 1,
858 fgcolor);
860 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
863 static int video_open(VideoState *is){
864 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
865 int w,h;
867 if(is_full_screen) flags |= SDL_FULLSCREEN;
868 else flags |= SDL_RESIZABLE;
870 if (is_full_screen && fs_screen_width) {
871 w = fs_screen_width;
872 h = fs_screen_height;
873 } else if(!is_full_screen && screen_width){
874 w = screen_width;
875 h = screen_height;
876 }else if (is->video_st && is->video_st->codec->width){
877 w = is->video_st->codec->width;
878 h = is->video_st->codec->height;
879 } else {
880 w = 640;
881 h = 480;
883 #ifndef __APPLE__
884 screen = SDL_SetVideoMode(w, h, 0, flags);
885 #else
886 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
887 screen = SDL_SetVideoMode(w, h, 24, flags);
888 #endif
889 if (!screen) {
890 fprintf(stderr, "SDL: could not set video mode - exiting\n");
891 return -1;
893 SDL_WM_SetCaption("FFplay", "FFplay");
895 is->width = screen->w;
896 is->height = screen->h;
898 return 0;
901 /* display the current picture, if any */
902 static void video_display(VideoState *is)
904 if(!screen)
905 video_open(cur_stream);
906 if (is->audio_st && is->show_audio)
907 video_audio_display(is);
908 else if (is->video_st)
909 video_image_display(is);
912 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
914 SDL_Event event;
915 event.type = FF_REFRESH_EVENT;
916 event.user.data1 = opaque;
917 SDL_PushEvent(&event);
918 return 0; /* 0 means stop timer */
921 /* schedule a video refresh in 'delay' ms */
922 static void schedule_refresh(VideoState *is, int delay)
924 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
925 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
928 /* get the current audio clock value */
929 static double get_audio_clock(VideoState *is)
931 double pts;
932 int hw_buf_size, bytes_per_sec;
933 pts = is->audio_clock;
934 hw_buf_size = audio_write_get_buf_size(is);
935 bytes_per_sec = 0;
936 if (is->audio_st) {
937 bytes_per_sec = is->audio_st->codec->sample_rate *
938 2 * is->audio_st->codec->channels;
940 if (bytes_per_sec)
941 pts -= (double)hw_buf_size / bytes_per_sec;
942 return pts;
945 /* get the current video clock value */
946 static double get_video_clock(VideoState *is)
948 double delta;
949 if (is->paused) {
950 delta = 0;
951 } else {
952 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
954 return is->video_current_pts + delta;
957 /* get the current external clock value */
958 static double get_external_clock(VideoState *is)
960 int64_t ti;
961 ti = av_gettime();
962 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
965 /* get the current master clock value */
966 static double get_master_clock(VideoState *is)
968 double val;
970 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
971 if (is->video_st)
972 val = get_video_clock(is);
973 else
974 val = get_audio_clock(is);
975 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
976 if (is->audio_st)
977 val = get_audio_clock(is);
978 else
979 val = get_video_clock(is);
980 } else {
981 val = get_external_clock(is);
983 return val;
986 /* seek in the stream */
987 static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
989 if (!is->seek_req) {
990 is->seek_pos = pos;
991 is->seek_rel = rel;
992 if (seek_by_bytes)
993 is->seek_flags |= AVSEEK_FLAG_BYTE;
994 is->seek_req = 1;
998 /* pause or resume the video */
999 static void stream_pause(VideoState *is)
1001 is->paused = !is->paused;
1002 if (!is->paused) {
1003 is->video_current_pts = get_video_clock(is);
1004 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1008 static double compute_frame_delay(double frame_current_pts, VideoState *is)
1010 double actual_delay, delay, sync_threshold, ref_clock, diff;
1012 /* compute nominal delay */
1013 delay = frame_current_pts - is->frame_last_pts;
1014 if (delay <= 0 || delay >= 10.0) {
1015 /* if incorrect delay, use previous one */
1016 delay = is->frame_last_delay;
1017 } else {
1018 is->frame_last_delay = delay;
1020 is->frame_last_pts = frame_current_pts;
1022 /* update delay to follow master synchronisation source */
1023 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1024 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1025 /* if video is slave, we try to correct big delays by
1026 duplicating or deleting a frame */
1027 ref_clock = get_master_clock(is);
1028 diff = frame_current_pts - ref_clock;
1030 /* skip or repeat frame. We take into account the
1031 delay to compute the threshold. I still don't know
1032 if it is the best guess */
1033 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1034 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1035 if (diff <= -sync_threshold)
1036 delay = 0;
1037 else if (diff >= sync_threshold)
1038 delay = 2 * delay;
1042 is->frame_timer += delay;
1043 /* compute the REAL delay (we need to do that to avoid
1044 long term errors */
1045 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1046 if (actual_delay < 0.010) {
1047 /* XXX: should skip picture */
1048 actual_delay = 0.010;
1051 #if defined(DEBUG_SYNC)
1052 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1053 delay, actual_delay, frame_current_pts, -diff);
1054 #endif
1056 return actual_delay;
1059 /* called to display each frame */
1060 static void video_refresh_timer(void *opaque)
1062 VideoState *is = opaque;
1063 VideoPicture *vp;
1065 SubPicture *sp, *sp2;
1067 if (is->video_st) {
1068 if (is->pictq_size == 0) {
1069 /* if no picture, need to wait */
1070 schedule_refresh(is, 1);
1071 } else {
1072 /* dequeue the picture */
1073 vp = &is->pictq[is->pictq_rindex];
1075 /* update current video pts */
1076 is->video_current_pts = vp->pts;
1077 is->video_current_pts_time = av_gettime();
1079 /* launch timer for next picture */
1080 schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1082 if(is->subtitle_st) {
1083 if (is->subtitle_stream_changed) {
1084 SDL_LockMutex(is->subpq_mutex);
1086 while (is->subpq_size) {
1087 free_subpicture(&is->subpq[is->subpq_rindex]);
1089 /* update queue size and signal for next picture */
1090 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1091 is->subpq_rindex = 0;
1093 is->subpq_size--;
1095 is->subtitle_stream_changed = 0;
1097 SDL_CondSignal(is->subpq_cond);
1098 SDL_UnlockMutex(is->subpq_mutex);
1099 } else {
1100 if (is->subpq_size > 0) {
1101 sp = &is->subpq[is->subpq_rindex];
1103 if (is->subpq_size > 1)
1104 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1105 else
1106 sp2 = NULL;
1108 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1109 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1111 free_subpicture(sp);
1113 /* update queue size and signal for next picture */
1114 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1115 is->subpq_rindex = 0;
1117 SDL_LockMutex(is->subpq_mutex);
1118 is->subpq_size--;
1119 SDL_CondSignal(is->subpq_cond);
1120 SDL_UnlockMutex(is->subpq_mutex);
1126 /* display picture */
1127 video_display(is);
1129 /* update queue size and signal for next picture */
1130 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1131 is->pictq_rindex = 0;
1133 SDL_LockMutex(is->pictq_mutex);
1134 is->pictq_size--;
1135 SDL_CondSignal(is->pictq_cond);
1136 SDL_UnlockMutex(is->pictq_mutex);
1138 } else if (is->audio_st) {
1139 /* draw the next audio frame */
1141 schedule_refresh(is, 40);
1143 /* if only audio stream, then display the audio bars (better
1144 than nothing, just to test the implementation */
1146 /* display picture */
1147 video_display(is);
1148 } else {
1149 schedule_refresh(is, 100);
1151 if (show_status) {
1152 static int64_t last_time;
1153 int64_t cur_time;
1154 int aqsize, vqsize, sqsize;
1155 double av_diff;
1157 cur_time = av_gettime();
1158 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1159 aqsize = 0;
1160 vqsize = 0;
1161 sqsize = 0;
1162 if (is->audio_st)
1163 aqsize = is->audioq.size;
1164 if (is->video_st)
1165 vqsize = is->videoq.size;
1166 if (is->subtitle_st)
1167 sqsize = is->subtitleq.size;
1168 av_diff = 0;
1169 if (is->audio_st && is->video_st)
1170 av_diff = get_audio_clock(is) - get_video_clock(is);
1171 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1172 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1173 fflush(stdout);
1174 last_time = cur_time;
1179 /* allocate a picture (needs to do that in main thread to avoid
1180 potential locking problems */
1181 static void alloc_picture(void *opaque)
1183 VideoState *is = opaque;
1184 VideoPicture *vp;
1186 vp = &is->pictq[is->pictq_windex];
1188 if (vp->bmp)
1189 SDL_FreeYUVOverlay(vp->bmp);
1191 #if 0
1192 /* XXX: use generic function */
1193 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1194 switch(is->video_st->codec->pix_fmt) {
1195 case PIX_FMT_YUV420P:
1196 case PIX_FMT_YUV422P:
1197 case PIX_FMT_YUV444P:
1198 case PIX_FMT_YUYV422:
1199 case PIX_FMT_YUV410P:
1200 case PIX_FMT_YUV411P:
1201 is_yuv = 1;
1202 break;
1203 default:
1204 is_yuv = 0;
1205 break;
1207 #endif
1208 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1209 is->video_st->codec->height,
1210 SDL_YV12_OVERLAY,
1211 screen);
1212 vp->width = is->video_st->codec->width;
1213 vp->height = is->video_st->codec->height;
1215 SDL_LockMutex(is->pictq_mutex);
1216 vp->allocated = 1;
1217 SDL_CondSignal(is->pictq_cond);
1218 SDL_UnlockMutex(is->pictq_mutex);
1223 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1225 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1227 VideoPicture *vp;
1228 int dst_pix_fmt;
1229 static struct SwsContext *img_convert_ctx;
1231 /* wait until we have space to put a new picture */
1232 SDL_LockMutex(is->pictq_mutex);
1233 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1234 !is->videoq.abort_request) {
1235 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1237 SDL_UnlockMutex(is->pictq_mutex);
1239 if (is->videoq.abort_request)
1240 return -1;
1242 vp = &is->pictq[is->pictq_windex];
1244 /* alloc or resize hardware picture buffer */
1245 if (!vp->bmp ||
1246 vp->width != is->video_st->codec->width ||
1247 vp->height != is->video_st->codec->height) {
1248 SDL_Event event;
1250 vp->allocated = 0;
1252 /* the allocation must be done in the main thread to avoid
1253 locking problems */
1254 event.type = FF_ALLOC_EVENT;
1255 event.user.data1 = is;
1256 SDL_PushEvent(&event);
1258 /* wait until the picture is allocated */
1259 SDL_LockMutex(is->pictq_mutex);
1260 while (!vp->allocated && !is->videoq.abort_request) {
1261 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1263 SDL_UnlockMutex(is->pictq_mutex);
1265 if (is->videoq.abort_request)
1266 return -1;
1269 /* if the frame is not skipped, then display it */
1270 if (vp->bmp) {
1271 AVPicture pict;
1273 /* get a pointer on the bitmap */
1274 SDL_LockYUVOverlay (vp->bmp);
1276 dst_pix_fmt = PIX_FMT_YUV420P;
1277 memset(&pict,0,sizeof(AVPicture));
1278 pict.data[0] = vp->bmp->pixels[0];
1279 pict.data[1] = vp->bmp->pixels[2];
1280 pict.data[2] = vp->bmp->pixels[1];
1282 pict.linesize[0] = vp->bmp->pitches[0];
1283 pict.linesize[1] = vp->bmp->pitches[2];
1284 pict.linesize[2] = vp->bmp->pitches[1];
1285 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1286 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1287 is->video_st->codec->width, is->video_st->codec->height,
1288 is->video_st->codec->pix_fmt,
1289 is->video_st->codec->width, is->video_st->codec->height,
1290 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1291 if (img_convert_ctx == NULL) {
1292 fprintf(stderr, "Cannot initialize the conversion context\n");
1293 exit(1);
1295 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1296 0, is->video_st->codec->height, pict.data, pict.linesize);
1297 /* update the bitmap content */
1298 SDL_UnlockYUVOverlay(vp->bmp);
1300 vp->pts = pts;
1302 /* now we can update the picture count */
1303 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1304 is->pictq_windex = 0;
1305 SDL_LockMutex(is->pictq_mutex);
1306 is->pictq_size++;
1307 SDL_UnlockMutex(is->pictq_mutex);
1309 return 0;
1313 * compute the exact PTS for the picture if it is omitted in the stream
1314 * @param pts1 the dts of the pkt / pts of the frame
1316 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1318 double frame_delay, pts;
1320 pts = pts1;
1322 if (pts != 0) {
1323 /* update video clock with pts, if present */
1324 is->video_clock = pts;
1325 } else {
1326 pts = is->video_clock;
1328 /* update video clock for next frame */
1329 frame_delay = av_q2d(is->video_st->codec->time_base);
1330 /* for MPEG2, the frame can be repeated, so we update the
1331 clock accordingly */
1332 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1333 is->video_clock += frame_delay;
1335 #if defined(DEBUG_SYNC) && 0
1337 int ftype;
1338 if (src_frame->pict_type == FF_B_TYPE)
1339 ftype = 'B';
1340 else if (src_frame->pict_type == FF_I_TYPE)
1341 ftype = 'I';
1342 else
1343 ftype = 'P';
1344 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1345 ftype, pts, pts1);
1347 #endif
1348 return queue_picture(is, src_frame, pts);
1351 static int video_thread(void *arg)
1353 VideoState *is = arg;
1354 AVPacket pkt1, *pkt = &pkt1;
1355 int len1, got_picture;
1356 AVFrame *frame= avcodec_alloc_frame();
1357 double pts;
1359 for(;;) {
1360 while (is->paused && !is->videoq.abort_request) {
1361 SDL_Delay(10);
1363 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1364 break;
1366 if(pkt->data == flush_pkt.data){
1367 avcodec_flush_buffers(is->video_st->codec);
1368 continue;
1371 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1372 this packet, if any */
1373 is->video_st->codec->reordered_opaque= pkt->pts;
1374 len1 = avcodec_decode_video2(is->video_st->codec,
1375 frame, &got_picture,
1376 pkt);
1378 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1379 && frame->reordered_opaque != AV_NOPTS_VALUE)
1380 pts= frame->reordered_opaque;
1381 else if(pkt->dts != AV_NOPTS_VALUE)
1382 pts= pkt->dts;
1383 else
1384 pts= 0;
1385 pts *= av_q2d(is->video_st->time_base);
1387 // if (len1 < 0)
1388 // break;
1389 if (got_picture) {
1390 if (output_picture2(is, frame, pts) < 0)
1391 goto the_end;
1393 av_free_packet(pkt);
1394 if (step)
1395 if (cur_stream)
1396 stream_pause(cur_stream);
1398 the_end:
1399 av_free(frame);
1400 return 0;
1403 static int subtitle_thread(void *arg)
1405 VideoState *is = arg;
1406 SubPicture *sp;
1407 AVPacket pkt1, *pkt = &pkt1;
1408 int len1, got_subtitle;
1409 double pts;
1410 int i, j;
1411 int r, g, b, y, u, v, a;
1413 for(;;) {
1414 while (is->paused && !is->subtitleq.abort_request) {
1415 SDL_Delay(10);
1417 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1418 break;
1420 if(pkt->data == flush_pkt.data){
1421 avcodec_flush_buffers(is->subtitle_st->codec);
1422 continue;
1424 SDL_LockMutex(is->subpq_mutex);
1425 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1426 !is->subtitleq.abort_request) {
1427 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1429 SDL_UnlockMutex(is->subpq_mutex);
1431 if (is->subtitleq.abort_request)
1432 goto the_end;
1434 sp = &is->subpq[is->subpq_windex];
1436 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1437 this packet, if any */
1438 pts = 0;
1439 if (pkt->pts != AV_NOPTS_VALUE)
1440 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1442 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1443 &sp->sub, &got_subtitle,
1444 pkt);
1445 // if (len1 < 0)
1446 // break;
1447 if (got_subtitle && sp->sub.format == 0) {
1448 sp->pts = pts;
1450 for (i = 0; i < sp->sub.num_rects; i++)
1452 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1454 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1455 y = RGB_TO_Y_CCIR(r, g, b);
1456 u = RGB_TO_U_CCIR(r, g, b, 0);
1457 v = RGB_TO_V_CCIR(r, g, b, 0);
1458 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1462 /* now we can update the picture count */
1463 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1464 is->subpq_windex = 0;
1465 SDL_LockMutex(is->subpq_mutex);
1466 is->subpq_size++;
1467 SDL_UnlockMutex(is->subpq_mutex);
1469 av_free_packet(pkt);
1470 // if (step)
1471 // if (cur_stream)
1472 // stream_pause(cur_stream);
1474 the_end:
1475 return 0;
1478 /* copy samples for viewing in editor window */
1479 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1481 int size, len, channels;
1483 channels = is->audio_st->codec->channels;
1485 size = samples_size / sizeof(short);
1486 while (size > 0) {
1487 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1488 if (len > size)
1489 len = size;
1490 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1491 samples += len;
1492 is->sample_array_index += len;
1493 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1494 is->sample_array_index = 0;
1495 size -= len;
1499 /* return the new audio buffer size (samples can be added or deleted
1500 to get better sync if video or external master clock) */
1501 static int synchronize_audio(VideoState *is, short *samples,
1502 int samples_size1, double pts)
1504 int n, samples_size;
1505 double ref_clock;
1507 n = 2 * is->audio_st->codec->channels;
1508 samples_size = samples_size1;
1510 /* if not master, then we try to remove or add samples to correct the clock */
1511 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1512 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1513 double diff, avg_diff;
1514 int wanted_size, min_size, max_size, nb_samples;
1516 ref_clock = get_master_clock(is);
1517 diff = get_audio_clock(is) - ref_clock;
1519 if (diff < AV_NOSYNC_THRESHOLD) {
1520 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1521 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1522 /* not enough measures to have a correct estimate */
1523 is->audio_diff_avg_count++;
1524 } else {
1525 /* estimate the A-V difference */
1526 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1528 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1529 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1530 nb_samples = samples_size / n;
1532 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1533 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1534 if (wanted_size < min_size)
1535 wanted_size = min_size;
1536 else if (wanted_size > max_size)
1537 wanted_size = max_size;
1539 /* add or remove samples to correction the synchro */
1540 if (wanted_size < samples_size) {
1541 /* remove samples */
1542 samples_size = wanted_size;
1543 } else if (wanted_size > samples_size) {
1544 uint8_t *samples_end, *q;
1545 int nb;
1547 /* add samples */
1548 nb = (samples_size - wanted_size);
1549 samples_end = (uint8_t *)samples + samples_size - n;
1550 q = samples_end + n;
1551 while (nb > 0) {
1552 memcpy(q, samples_end, n);
1553 q += n;
1554 nb -= n;
1556 samples_size = wanted_size;
1559 #if 0
1560 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1561 diff, avg_diff, samples_size - samples_size1,
1562 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1563 #endif
1565 } else {
1566 /* too big difference : may be initial PTS errors, so
1567 reset A-V filter */
1568 is->audio_diff_avg_count = 0;
1569 is->audio_diff_cum = 0;
1573 return samples_size;
1576 /* decode one audio frame and returns its uncompressed size */
1577 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1579 AVPacket *pkt_temp = &is->audio_pkt_temp;
1580 AVPacket *pkt = &is->audio_pkt;
1581 AVCodecContext *dec= is->audio_st->codec;
1582 int n, len1, data_size;
1583 double pts;
1585 for(;;) {
1586 /* NOTE: the audio packet can contain several frames */
1587 while (pkt_temp->size > 0) {
1588 data_size = sizeof(is->audio_buf1);
1589 len1 = avcodec_decode_audio3(dec,
1590 (int16_t *)is->audio_buf1, &data_size,
1591 pkt_temp);
1592 if (len1 < 0) {
1593 /* if error, we skip the frame */
1594 pkt_temp->size = 0;
1595 break;
1598 pkt_temp->data += len1;
1599 pkt_temp->size -= len1;
1600 if (data_size <= 0)
1601 continue;
1603 if (dec->sample_fmt != is->audio_src_fmt) {
1604 if (is->reformat_ctx)
1605 av_audio_convert_free(is->reformat_ctx);
1606 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1607 dec->sample_fmt, 1, NULL, 0);
1608 if (!is->reformat_ctx) {
1609 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1610 avcodec_get_sample_fmt_name(dec->sample_fmt),
1611 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1612 break;
1614 is->audio_src_fmt= dec->sample_fmt;
1617 if (is->reformat_ctx) {
1618 const void *ibuf[6]= {is->audio_buf1};
1619 void *obuf[6]= {is->audio_buf2};
1620 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1621 int ostride[6]= {2};
1622 int len= data_size/istride[0];
1623 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1624 printf("av_audio_convert() failed\n");
1625 break;
1627 is->audio_buf= is->audio_buf2;
1628 /* FIXME: existing code assume that data_size equals framesize*channels*2
1629 remove this legacy cruft */
1630 data_size= len*2;
1631 }else{
1632 is->audio_buf= is->audio_buf1;
1635 /* if no pts, then compute it */
1636 pts = is->audio_clock;
1637 *pts_ptr = pts;
1638 n = 2 * dec->channels;
1639 is->audio_clock += (double)data_size /
1640 (double)(n * dec->sample_rate);
1641 #if defined(DEBUG_SYNC)
1643 static double last_clock;
1644 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1645 is->audio_clock - last_clock,
1646 is->audio_clock, pts);
1647 last_clock = is->audio_clock;
1649 #endif
1650 return data_size;
1653 /* free the current packet */
1654 if (pkt->data)
1655 av_free_packet(pkt);
1657 if (is->paused || is->audioq.abort_request) {
1658 return -1;
1661 /* read next packet */
1662 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1663 return -1;
1664 if(pkt->data == flush_pkt.data){
1665 avcodec_flush_buffers(dec);
1666 continue;
1669 pkt_temp->data = pkt->data;
1670 pkt_temp->size = pkt->size;
1672 /* if update the audio clock with the pts */
1673 if (pkt->pts != AV_NOPTS_VALUE) {
1674 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1679 /* get the current audio output buffer size, in samples. With SDL, we
1680 cannot have a precise information */
1681 static int audio_write_get_buf_size(VideoState *is)
1683 return is->audio_buf_size - is->audio_buf_index;
1687 /* prepare a new audio buffer */
1688 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1690 VideoState *is = opaque;
1691 int audio_size, len1;
1692 double pts;
1694 audio_callback_time = av_gettime();
1696 while (len > 0) {
1697 if (is->audio_buf_index >= is->audio_buf_size) {
1698 audio_size = audio_decode_frame(is, &pts);
1699 if (audio_size < 0) {
1700 /* if error, just output silence */
1701 is->audio_buf = is->audio_buf1;
1702 is->audio_buf_size = 1024;
1703 memset(is->audio_buf, 0, is->audio_buf_size);
1704 } else {
1705 if (is->show_audio)
1706 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1707 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1708 pts);
1709 is->audio_buf_size = audio_size;
1711 is->audio_buf_index = 0;
1713 len1 = is->audio_buf_size - is->audio_buf_index;
1714 if (len1 > len)
1715 len1 = len;
1716 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1717 len -= len1;
1718 stream += len1;
1719 is->audio_buf_index += len1;
1723 /* open a given stream. Return 0 if OK */
1724 static int stream_component_open(VideoState *is, int stream_index)
1726 AVFormatContext *ic = is->ic;
1727 AVCodecContext *enc;
1728 AVCodec *codec;
1729 SDL_AudioSpec wanted_spec, spec;
1731 if (stream_index < 0 || stream_index >= ic->nb_streams)
1732 return -1;
1733 enc = ic->streams[stream_index]->codec;
1735 /* prepare audio output */
1736 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1737 if (enc->channels > 0) {
1738 enc->request_channels = FFMIN(2, enc->channels);
1739 } else {
1740 enc->request_channels = 2;
1744 codec = avcodec_find_decoder(enc->codec_id);
1745 enc->debug_mv = debug_mv;
1746 enc->debug = debug;
1747 enc->workaround_bugs = workaround_bugs;
1748 enc->lowres = lowres;
1749 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1750 enc->idct_algo= idct;
1751 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1752 enc->skip_frame= skip_frame;
1753 enc->skip_idct= skip_idct;
1754 enc->skip_loop_filter= skip_loop_filter;
1755 enc->error_recognition= error_recognition;
1756 enc->error_concealment= error_concealment;
1758 set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1760 if (!codec ||
1761 avcodec_open(enc, codec) < 0)
1762 return -1;
1764 /* prepare audio output */
1765 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1766 wanted_spec.freq = enc->sample_rate;
1767 wanted_spec.format = AUDIO_S16SYS;
1768 wanted_spec.channels = enc->channels;
1769 wanted_spec.silence = 0;
1770 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1771 wanted_spec.callback = sdl_audio_callback;
1772 wanted_spec.userdata = is;
1773 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1774 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1775 return -1;
1777 is->audio_hw_buf_size = spec.size;
1778 is->audio_src_fmt= SAMPLE_FMT_S16;
1781 if(thread_count>1)
1782 avcodec_thread_init(enc, thread_count);
1783 enc->thread_count= thread_count;
1784 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1785 switch(enc->codec_type) {
1786 case CODEC_TYPE_AUDIO:
1787 is->audio_stream = stream_index;
1788 is->audio_st = ic->streams[stream_index];
1789 is->audio_buf_size = 0;
1790 is->audio_buf_index = 0;
1792 /* init averaging filter */
1793 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1794 is->audio_diff_avg_count = 0;
1795 /* since we do not have a precise anough audio fifo fullness,
1796 we correct audio sync only if larger than this threshold */
1797 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1799 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1800 packet_queue_init(&is->audioq);
1801 SDL_PauseAudio(0);
1802 break;
1803 case CODEC_TYPE_VIDEO:
1804 is->video_stream = stream_index;
1805 is->video_st = ic->streams[stream_index];
1807 is->frame_last_delay = 40e-3;
1808 is->frame_timer = (double)av_gettime() / 1000000.0;
1809 is->video_current_pts_time = av_gettime();
1811 packet_queue_init(&is->videoq);
1812 is->video_tid = SDL_CreateThread(video_thread, is);
1813 break;
1814 case CODEC_TYPE_SUBTITLE:
1815 is->subtitle_stream = stream_index;
1816 is->subtitle_st = ic->streams[stream_index];
1817 packet_queue_init(&is->subtitleq);
1819 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1820 break;
1821 default:
1822 break;
1824 return 0;
1827 static void stream_component_close(VideoState *is, int stream_index)
1829 AVFormatContext *ic = is->ic;
1830 AVCodecContext *enc;
1832 if (stream_index < 0 || stream_index >= ic->nb_streams)
1833 return;
1834 enc = ic->streams[stream_index]->codec;
1836 switch(enc->codec_type) {
1837 case CODEC_TYPE_AUDIO:
1838 packet_queue_abort(&is->audioq);
1840 SDL_CloseAudio();
1842 packet_queue_end(&is->audioq);
1843 if (is->reformat_ctx)
1844 av_audio_convert_free(is->reformat_ctx);
1845 break;
1846 case CODEC_TYPE_VIDEO:
1847 packet_queue_abort(&is->videoq);
1849 /* note: we also signal this mutex to make sure we deblock the
1850 video thread in all cases */
1851 SDL_LockMutex(is->pictq_mutex);
1852 SDL_CondSignal(is->pictq_cond);
1853 SDL_UnlockMutex(is->pictq_mutex);
1855 SDL_WaitThread(is->video_tid, NULL);
1857 packet_queue_end(&is->videoq);
1858 break;
1859 case CODEC_TYPE_SUBTITLE:
1860 packet_queue_abort(&is->subtitleq);
1862 /* note: we also signal this mutex to make sure we deblock the
1863 video thread in all cases */
1864 SDL_LockMutex(is->subpq_mutex);
1865 is->subtitle_stream_changed = 1;
1867 SDL_CondSignal(is->subpq_cond);
1868 SDL_UnlockMutex(is->subpq_mutex);
1870 SDL_WaitThread(is->subtitle_tid, NULL);
1872 packet_queue_end(&is->subtitleq);
1873 break;
1874 default:
1875 break;
1878 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1879 avcodec_close(enc);
1880 switch(enc->codec_type) {
1881 case CODEC_TYPE_AUDIO:
1882 is->audio_st = NULL;
1883 is->audio_stream = -1;
1884 break;
1885 case CODEC_TYPE_VIDEO:
1886 is->video_st = NULL;
1887 is->video_stream = -1;
1888 break;
1889 case CODEC_TYPE_SUBTITLE:
1890 is->subtitle_st = NULL;
1891 is->subtitle_stream = -1;
1892 break;
1893 default:
1894 break;
1898 static void dump_stream_info(const AVFormatContext *s)
1900 AVMetadataTag *tag = NULL;
1901 while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1902 fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1905 /* since we have only one decoding thread, we can use a global
1906 variable instead of a thread local variable */
1907 static VideoState *global_video_state;
1909 static int decode_interrupt_cb(void)
1911 return (global_video_state && global_video_state->abort_request);
1914 /* this thread gets the stream from the disk or the network */
1915 static int decode_thread(void *arg)
1917 VideoState *is = arg;
1918 AVFormatContext *ic;
1919 int err, i, ret, video_index, audio_index, subtitle_index;
1920 AVPacket pkt1, *pkt = &pkt1;
1921 AVFormatParameters params, *ap = &params;
1922 int eof=0;
1924 video_index = -1;
1925 audio_index = -1;
1926 subtitle_index = -1;
1927 is->video_stream = -1;
1928 is->audio_stream = -1;
1929 is->subtitle_stream = -1;
1931 global_video_state = is;
1932 url_set_interrupt_cb(decode_interrupt_cb);
1934 memset(ap, 0, sizeof(*ap));
1936 ap->width = frame_width;
1937 ap->height= frame_height;
1938 ap->time_base= (AVRational){1, 25};
1939 ap->pix_fmt = frame_pix_fmt;
1941 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1942 if (err < 0) {
1943 print_error(is->filename, err);
1944 ret = -1;
1945 goto fail;
1947 is->ic = ic;
1949 if(genpts)
1950 ic->flags |= AVFMT_FLAG_GENPTS;
1952 err = av_find_stream_info(ic);
1953 if (err < 0) {
1954 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1955 ret = -1;
1956 goto fail;
1958 if(ic->pb)
1959 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1961 /* if seeking requested, we execute it */
1962 if (start_time != AV_NOPTS_VALUE) {
1963 int64_t timestamp;
1965 timestamp = start_time;
1966 /* add the stream start time */
1967 if (ic->start_time != AV_NOPTS_VALUE)
1968 timestamp += ic->start_time;
1969 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1970 if (ret < 0) {
1971 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1972 is->filename, (double)timestamp / AV_TIME_BASE);
1976 for(i = 0; i < ic->nb_streams; i++) {
1977 AVCodecContext *enc = ic->streams[i]->codec;
1978 ic->streams[i]->discard = AVDISCARD_ALL;
1979 switch(enc->codec_type) {
1980 case CODEC_TYPE_AUDIO:
1981 if (wanted_audio_stream-- >= 0 && !audio_disable)
1982 audio_index = i;
1983 break;
1984 case CODEC_TYPE_VIDEO:
1985 if (wanted_video_stream-- >= 0 && !video_disable)
1986 video_index = i;
1987 break;
1988 case CODEC_TYPE_SUBTITLE:
1989 if (wanted_subtitle_stream-- >= 0 && !video_disable)
1990 subtitle_index = i;
1991 break;
1992 default:
1993 break;
1996 if (show_status) {
1997 dump_format(ic, 0, is->filename, 0);
1998 dump_stream_info(ic);
2001 /* open the streams */
2002 if (audio_index >= 0) {
2003 stream_component_open(is, audio_index);
2006 if (video_index >= 0) {
2007 stream_component_open(is, video_index);
2008 } else {
2009 if (!display_disable)
2010 is->show_audio = 1;
2013 if (subtitle_index >= 0) {
2014 stream_component_open(is, subtitle_index);
2017 if (is->video_stream < 0 && is->audio_stream < 0) {
2018 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2019 ret = -1;
2020 goto fail;
2023 for(;;) {
2024 if (is->abort_request)
2025 break;
2026 if (is->paused != is->last_paused) {
2027 is->last_paused = is->paused;
2028 if (is->paused)
2029 av_read_pause(ic);
2030 else
2031 av_read_play(ic);
2033 #if CONFIG_RTSP_DEMUXER
2034 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2035 /* wait 10 ms to avoid trying to get another packet */
2036 /* XXX: horrible */
2037 SDL_Delay(10);
2038 continue;
2040 #endif
2041 if (is->seek_req) {
2042 int64_t seek_target= is->seek_pos;
2043 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2044 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2045 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2046 // of the seek_pos/seek_rel variables
2048 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2049 if (ret < 0) {
2050 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2051 }else{
2052 if (is->audio_stream >= 0) {
2053 packet_queue_flush(&is->audioq);
2054 packet_queue_put(&is->audioq, &flush_pkt);
2056 if (is->subtitle_stream >= 0) {
2057 packet_queue_flush(&is->subtitleq);
2058 packet_queue_put(&is->subtitleq, &flush_pkt);
2060 if (is->video_stream >= 0) {
2061 packet_queue_flush(&is->videoq);
2062 packet_queue_put(&is->videoq, &flush_pkt);
2065 is->seek_req = 0;
2066 eof= 0;
2069 /* if the queue are full, no need to read more */
2070 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2071 is->videoq.size > MAX_VIDEOQ_SIZE ||
2072 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2073 /* wait 10 ms */
2074 SDL_Delay(10);
2075 continue;
2077 if(url_feof(ic->pb) || eof) {
2078 if(is->video_stream >= 0){
2079 av_init_packet(pkt);
2080 pkt->data=NULL;
2081 pkt->size=0;
2082 pkt->stream_index= is->video_stream;
2083 packet_queue_put(&is->videoq, pkt);
2085 SDL_Delay(10);
2086 continue;
2088 ret = av_read_frame(ic, pkt);
2089 if (ret < 0) {
2090 if (ret == AVERROR_EOF)
2091 eof=1;
2092 if (url_ferror(ic->pb))
2093 break;
2094 SDL_Delay(100); /* wait for user event */
2095 continue;
2097 if (pkt->stream_index == is->audio_stream) {
2098 packet_queue_put(&is->audioq, pkt);
2099 } else if (pkt->stream_index == is->video_stream) {
2100 packet_queue_put(&is->videoq, pkt);
2101 } else if (pkt->stream_index == is->subtitle_stream) {
2102 packet_queue_put(&is->subtitleq, pkt);
2103 } else {
2104 av_free_packet(pkt);
2107 /* wait until the end */
2108 while (!is->abort_request) {
2109 SDL_Delay(100);
2112 ret = 0;
2113 fail:
2114 /* disable interrupting */
2115 global_video_state = NULL;
2117 /* close each stream */
2118 if (is->audio_stream >= 0)
2119 stream_component_close(is, is->audio_stream);
2120 if (is->video_stream >= 0)
2121 stream_component_close(is, is->video_stream);
2122 if (is->subtitle_stream >= 0)
2123 stream_component_close(is, is->subtitle_stream);
2124 if (is->ic) {
2125 av_close_input_file(is->ic);
2126 is->ic = NULL; /* safety */
2128 url_set_interrupt_cb(NULL);
2130 if (ret != 0) {
2131 SDL_Event event;
2133 event.type = FF_QUIT_EVENT;
2134 event.user.data1 = is;
2135 SDL_PushEvent(&event);
2137 return 0;
2140 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2142 VideoState *is;
2144 is = av_mallocz(sizeof(VideoState));
2145 if (!is)
2146 return NULL;
2147 av_strlcpy(is->filename, filename, sizeof(is->filename));
2148 is->iformat = iformat;
2149 is->ytop = 0;
2150 is->xleft = 0;
2152 /* start video display */
2153 is->pictq_mutex = SDL_CreateMutex();
2154 is->pictq_cond = SDL_CreateCond();
2156 is->subpq_mutex = SDL_CreateMutex();
2157 is->subpq_cond = SDL_CreateCond();
2159 /* add the refresh timer to draw the picture */
2160 schedule_refresh(is, 40);
2162 is->av_sync_type = av_sync_type;
2163 is->parse_tid = SDL_CreateThread(decode_thread, is);
2164 if (!is->parse_tid) {
2165 av_free(is);
2166 return NULL;
2168 return is;
2171 static void stream_close(VideoState *is)
2173 VideoPicture *vp;
2174 int i;
2175 /* XXX: use a special url_shutdown call to abort parse cleanly */
2176 is->abort_request = 1;
2177 SDL_WaitThread(is->parse_tid, NULL);
2179 /* free all pictures */
2180 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2181 vp = &is->pictq[i];
2182 if (vp->bmp) {
2183 SDL_FreeYUVOverlay(vp->bmp);
2184 vp->bmp = NULL;
2187 SDL_DestroyMutex(is->pictq_mutex);
2188 SDL_DestroyCond(is->pictq_cond);
2189 SDL_DestroyMutex(is->subpq_mutex);
2190 SDL_DestroyCond(is->subpq_cond);
2193 static void stream_cycle_channel(VideoState *is, int codec_type)
2195 AVFormatContext *ic = is->ic;
2196 int start_index, stream_index;
2197 AVStream *st;
2199 if (codec_type == CODEC_TYPE_VIDEO)
2200 start_index = is->video_stream;
2201 else if (codec_type == CODEC_TYPE_AUDIO)
2202 start_index = is->audio_stream;
2203 else
2204 start_index = is->subtitle_stream;
2205 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2206 return;
2207 stream_index = start_index;
2208 for(;;) {
2209 if (++stream_index >= is->ic->nb_streams)
2211 if (codec_type == CODEC_TYPE_SUBTITLE)
2213 stream_index = -1;
2214 goto the_end;
2215 } else
2216 stream_index = 0;
2218 if (stream_index == start_index)
2219 return;
2220 st = ic->streams[stream_index];
2221 if (st->codec->codec_type == codec_type) {
2222 /* check that parameters are OK */
2223 switch(codec_type) {
2224 case CODEC_TYPE_AUDIO:
2225 if (st->codec->sample_rate != 0 &&
2226 st->codec->channels != 0)
2227 goto the_end;
2228 break;
2229 case CODEC_TYPE_VIDEO:
2230 case CODEC_TYPE_SUBTITLE:
2231 goto the_end;
2232 default:
2233 break;
2237 the_end:
2238 stream_component_close(is, start_index);
2239 stream_component_open(is, stream_index);
2243 static void toggle_full_screen(void)
2245 is_full_screen = !is_full_screen;
2246 if (!fs_screen_width) {
2247 /* use default SDL method */
2248 // SDL_WM_ToggleFullScreen(screen);
2250 video_open(cur_stream);
2253 static void toggle_pause(void)
2255 if (cur_stream)
2256 stream_pause(cur_stream);
2257 step = 0;
2260 static void step_to_next_frame(void)
2262 if (cur_stream) {
2263 /* if the stream is paused unpause it, then step */
2264 if (cur_stream->paused)
2265 stream_pause(cur_stream);
2267 step = 1;
2270 static void do_exit(void)
2272 if (cur_stream) {
2273 stream_close(cur_stream);
2274 cur_stream = NULL;
2276 if (show_status)
2277 printf("\n");
2278 SDL_Quit();
2279 exit(0);
2282 static void toggle_audio_display(void)
2284 if (cur_stream) {
2285 cur_stream->show_audio = !cur_stream->show_audio;
2289 /* handle an event sent by the GUI */
2290 static void event_loop(void)
2292 SDL_Event event;
2293 double incr, pos, frac;
2295 for(;;) {
2296 SDL_WaitEvent(&event);
2297 switch(event.type) {
2298 case SDL_KEYDOWN:
2299 switch(event.key.keysym.sym) {
2300 case SDLK_ESCAPE:
2301 case SDLK_q:
2302 do_exit();
2303 break;
2304 case SDLK_f:
2305 toggle_full_screen();
2306 break;
2307 case SDLK_p:
2308 case SDLK_SPACE:
2309 toggle_pause();
2310 break;
2311 case SDLK_s: //S: Step to next frame
2312 step_to_next_frame();
2313 break;
2314 case SDLK_a:
2315 if (cur_stream)
2316 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2317 break;
2318 case SDLK_v:
2319 if (cur_stream)
2320 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2321 break;
2322 case SDLK_t:
2323 if (cur_stream)
2324 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2325 break;
2326 case SDLK_w:
2327 toggle_audio_display();
2328 break;
2329 case SDLK_LEFT:
2330 incr = -10.0;
2331 goto do_seek;
2332 case SDLK_RIGHT:
2333 incr = 10.0;
2334 goto do_seek;
2335 case SDLK_UP:
2336 incr = 60.0;
2337 goto do_seek;
2338 case SDLK_DOWN:
2339 incr = -60.0;
2340 do_seek:
2341 if (cur_stream) {
2342 if (seek_by_bytes) {
2343 pos = url_ftell(cur_stream->ic->pb);
2344 if (cur_stream->ic->bit_rate)
2345 incr *= cur_stream->ic->bit_rate / 60.0;
2346 else
2347 incr *= 180000.0;
2348 pos += incr;
2349 stream_seek(cur_stream, pos, incr);
2350 } else {
2351 pos = get_master_clock(cur_stream);
2352 pos += incr;
2353 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2356 break;
2357 default:
2358 break;
2360 break;
2361 case SDL_MOUSEBUTTONDOWN:
2362 if (cur_stream) {
2363 int ns, hh, mm, ss;
2364 int tns, thh, tmm, tss;
2365 tns = cur_stream->ic->duration/1000000LL;
2366 thh = tns/3600;
2367 tmm = (tns%3600)/60;
2368 tss = (tns%60);
2369 frac = (double)event.button.x/(double)cur_stream->width;
2370 ns = frac*tns;
2371 hh = ns/3600;
2372 mm = (ns%3600)/60;
2373 ss = (ns%60);
2374 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2375 hh, mm, ss, thh, tmm, tss);
2376 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2378 break;
2379 case SDL_VIDEORESIZE:
2380 if (cur_stream) {
2381 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2382 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2383 screen_width = cur_stream->width = event.resize.w;
2384 screen_height= cur_stream->height= event.resize.h;
2386 break;
2387 case SDL_QUIT:
2388 case FF_QUIT_EVENT:
2389 do_exit();
2390 break;
2391 case FF_ALLOC_EVENT:
2392 video_open(event.user.data1);
2393 alloc_picture(event.user.data1);
2394 break;
2395 case FF_REFRESH_EVENT:
2396 video_refresh_timer(event.user.data1);
2397 break;
2398 default:
2399 break;
2404 static void opt_frame_size(const char *arg)
2406 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2407 fprintf(stderr, "Incorrect frame size\n");
2408 exit(1);
2410 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2411 fprintf(stderr, "Frame size must be a multiple of 2\n");
2412 exit(1);
2416 static int opt_width(const char *opt, const char *arg)
2418 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2419 return 0;
2422 static int opt_height(const char *opt, const char *arg)
2424 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2425 return 0;
2428 static void opt_format(const char *arg)
2430 file_iformat = av_find_input_format(arg);
2431 if (!file_iformat) {
2432 fprintf(stderr, "Unknown input format: %s\n", arg);
2433 exit(1);
2437 static void opt_frame_pix_fmt(const char *arg)
2439 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2442 static int opt_sync(const char *opt, const char *arg)
2444 if (!strcmp(arg, "audio"))
2445 av_sync_type = AV_SYNC_AUDIO_MASTER;
2446 else if (!strcmp(arg, "video"))
2447 av_sync_type = AV_SYNC_VIDEO_MASTER;
2448 else if (!strcmp(arg, "ext"))
2449 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2450 else {
2451 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2452 exit(1);
2454 return 0;
2457 static int opt_seek(const char *opt, const char *arg)
2459 start_time = parse_time_or_die(opt, arg, 1);
2460 return 0;
2463 static int opt_debug(const char *opt, const char *arg)
2465 av_log_set_level(99);
2466 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2467 return 0;
2470 static int opt_vismv(const char *opt, const char *arg)
2472 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2473 return 0;
2476 static int opt_thread_count(const char *opt, const char *arg)
2478 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2479 #if !HAVE_THREADS
2480 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2481 #endif
2482 return 0;
2485 static const OptionDef options[] = {
2486 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2487 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2488 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2489 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2490 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2491 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2492 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2493 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2494 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2495 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2496 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2497 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2498 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2499 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2500 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2501 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2502 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2503 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2504 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2505 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2506 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2507 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2508 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2509 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2510 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2511 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2512 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2513 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2514 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2515 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2516 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2517 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2518 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2519 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2520 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2521 { NULL, },
2524 static void show_help(void)
2526 printf("usage: ffplay [options] input_file\n"
2527 "Simple media player\n");
2528 printf("\n");
2529 show_help_options(options, "Main options:\n",
2530 OPT_EXPERT, 0);
2531 show_help_options(options, "\nAdvanced options:\n",
2532 OPT_EXPERT, OPT_EXPERT);
2533 printf("\nWhile playing:\n"
2534 "q, ESC quit\n"
2535 "f toggle full screen\n"
2536 "p, SPC pause\n"
2537 "a cycle audio channel\n"
2538 "v cycle video channel\n"
2539 "t cycle subtitle channel\n"
2540 "w show audio waves\n"
2541 "left/right seek backward/forward 10 seconds\n"
2542 "down/up seek backward/forward 1 minute\n"
2543 "mouse click seek to percentage in file corresponding to fraction of width\n"
2547 static void opt_input_file(const char *filename)
2549 if (!strcmp(filename, "-"))
2550 filename = "pipe:";
2551 input_filename = filename;
2554 /* Called from the main */
2555 int main(int argc, char **argv)
2557 int flags, i;
2559 /* register all codecs, demux and protocols */
2560 avcodec_register_all();
2561 avdevice_register_all();
2562 av_register_all();
2564 for(i=0; i<CODEC_TYPE_NB; i++){
2565 avcodec_opts[i]= avcodec_alloc_context2(i);
2567 avformat_opts = avformat_alloc_context();
2568 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2570 show_banner();
2572 parse_options(argc, argv, options, opt_input_file);
2574 if (!input_filename) {
2575 fprintf(stderr, "An input file must be specified\n");
2576 exit(1);
2579 if (display_disable) {
2580 video_disable = 1;
2582 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2583 #if !defined(__MINGW32__) && !defined(__APPLE__)
2584 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2585 #endif
2586 if (SDL_Init (flags)) {
2587 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2588 exit(1);
2591 if (!display_disable) {
2592 #if HAVE_SDL_VIDEO_SIZE
2593 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2594 fs_screen_width = vi->current_w;
2595 fs_screen_height = vi->current_h;
2596 #endif
2599 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2600 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2601 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2602 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2604 av_init_packet(&flush_pkt);
2605 flush_pkt.data= "FLUSH";
2607 cur_stream = stream_open(input_filename, file_iformat);
2609 event_loop();
2611 /* never returns */
2613 return 0;