Fill range_map* in VC1Context.
[FFMpeg-mirror/lagarith.git] / ffplay.c
blobebe31d18b937814c767bb05ff48eb80d8f9a6507
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
29 #include "libavcodec/audioconvert.h"
30 #include "libavcodec/opt.h"
32 #include "cmdutils.h"
34 #include <SDL.h>
35 #include <SDL_thread.h>
37 #ifdef __MINGW32__
38 #undef main /* We don't want SDL to override our main() */
39 #endif
41 #undef exit
43 const char program_name[] = "FFplay";
44 const int program_birth_year = 2003;
46 //#define DEBUG_SYNC
48 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52 /* SDL audio buffer size, in samples. Should be small to have precise
53 A/V sync as SDL does not have hardware buffer fullness info. */
54 #define SDL_AUDIO_BUFFER_SIZE 1024
56 /* no AV sync correction is done if below the AV sync threshold */
57 #define AV_SYNC_THRESHOLD 0.01
58 /* no AV correction is done if too big error */
59 #define AV_NOSYNC_THRESHOLD 10.0
61 /* maximum audio speed change to get correct sync */
62 #define SAMPLE_CORRECTION_PERCENT_MAX 10
64 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65 #define AUDIO_DIFF_AVG_NB 20
67 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68 #define SAMPLE_ARRAY_SIZE (2*65536)
70 static int sws_flags = SWS_BICUBIC;
72 typedef struct PacketQueue {
73 AVPacketList *first_pkt, *last_pkt;
74 int nb_packets;
75 int size;
76 int abort_request;
77 SDL_mutex *mutex;
78 SDL_cond *cond;
79 } PacketQueue;
81 #define VIDEO_PICTURE_QUEUE_SIZE 1
82 #define SUBPICTURE_QUEUE_SIZE 4
84 typedef struct VideoPicture {
85 double pts; ///<presentation time stamp for this picture
86 SDL_Overlay *bmp;
87 int width, height; /* source height & width */
88 int allocated;
89 } VideoPicture;
91 typedef struct SubPicture {
92 double pts; /* presentation time stamp for this picture */
93 AVSubtitle sub;
94 } SubPicture;
96 enum {
97 AV_SYNC_AUDIO_MASTER, /* default choice */
98 AV_SYNC_VIDEO_MASTER,
99 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
102 typedef struct VideoState {
103 SDL_Thread *parse_tid;
104 SDL_Thread *video_tid;
105 AVInputFormat *iformat;
106 int no_background;
107 int abort_request;
108 int paused;
109 int last_paused;
110 int seek_req;
111 int seek_flags;
112 int64_t seek_pos;
113 AVFormatContext *ic;
114 int dtg_active_format;
116 int audio_stream;
118 int av_sync_type;
119 double external_clock; /* external clock base */
120 int64_t external_clock_time;
122 double audio_clock;
123 double audio_diff_cum; /* used for AV difference average computation */
124 double audio_diff_avg_coef;
125 double audio_diff_threshold;
126 int audio_diff_avg_count;
127 AVStream *audio_st;
128 PacketQueue audioq;
129 int audio_hw_buf_size;
130 /* samples output by the codec. we reserve more space for avsync
131 compensation */
132 DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133 DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134 uint8_t *audio_buf;
135 unsigned int audio_buf_size; /* in bytes */
136 int audio_buf_index; /* in bytes */
137 AVPacket audio_pkt;
138 uint8_t *audio_pkt_data;
139 int audio_pkt_size;
140 enum SampleFormat audio_src_fmt;
141 AVAudioConvert *reformat_ctx;
143 int show_audio; /* if true, display audio samples */
144 int16_t sample_array[SAMPLE_ARRAY_SIZE];
145 int sample_array_index;
146 int last_i_start;
148 SDL_Thread *subtitle_tid;
149 int subtitle_stream;
150 int subtitle_stream_changed;
151 AVStream *subtitle_st;
152 PacketQueue subtitleq;
153 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154 int subpq_size, subpq_rindex, subpq_windex;
155 SDL_mutex *subpq_mutex;
156 SDL_cond *subpq_cond;
158 double frame_timer;
159 double frame_last_pts;
160 double frame_last_delay;
161 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
162 int video_stream;
163 AVStream *video_st;
164 PacketQueue videoq;
165 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
166 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168 int pictq_size, pictq_rindex, pictq_windex;
169 SDL_mutex *pictq_mutex;
170 SDL_cond *pictq_cond;
172 // QETimer *video_timer;
173 char filename[1024];
174 int width, height, xleft, ytop;
175 } VideoState;
177 static void show_help(void);
178 static int audio_write_get_buf_size(VideoState *is);
180 /* options specified by the user */
181 static AVInputFormat *file_iformat;
182 static const char *input_filename;
183 static int fs_screen_width;
184 static int fs_screen_height;
185 static int screen_width = 0;
186 static int screen_height = 0;
187 static int frame_width = 0;
188 static int frame_height = 0;
189 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190 static int audio_disable;
191 static int video_disable;
192 static int wanted_audio_stream= 0;
193 static int wanted_video_stream= 0;
194 static int seek_by_bytes;
195 static int display_disable;
196 static int show_status;
197 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
198 static int64_t start_time = AV_NOPTS_VALUE;
199 static int debug = 0;
200 static int debug_mv = 0;
201 static int step = 0;
202 static int thread_count = 1;
203 static int workaround_bugs = 1;
204 static int fast = 0;
205 static int genpts = 0;
206 static int lowres = 0;
207 static int idct = FF_IDCT_AUTO;
208 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
209 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
210 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
211 static int error_recognition = FF_ER_CAREFUL;
212 static int error_concealment = 3;
213 static int decoder_reorder_pts= 0;
215 /* current context */
216 static int is_full_screen;
217 static VideoState *cur_stream;
218 static int64_t audio_callback_time;
220 static AVPacket flush_pkt;
222 #define FF_ALLOC_EVENT (SDL_USEREVENT)
223 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
226 static SDL_Surface *screen;
228 /* packet queue handling */
229 static void packet_queue_init(PacketQueue *q)
231 memset(q, 0, sizeof(PacketQueue));
232 q->mutex = SDL_CreateMutex();
233 q->cond = SDL_CreateCond();
236 static void packet_queue_flush(PacketQueue *q)
238 AVPacketList *pkt, *pkt1;
240 SDL_LockMutex(q->mutex);
241 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242 pkt1 = pkt->next;
243 av_free_packet(&pkt->pkt);
244 av_freep(&pkt);
246 q->last_pkt = NULL;
247 q->first_pkt = NULL;
248 q->nb_packets = 0;
249 q->size = 0;
250 SDL_UnlockMutex(q->mutex);
253 static void packet_queue_end(PacketQueue *q)
255 packet_queue_flush(q);
256 SDL_DestroyMutex(q->mutex);
257 SDL_DestroyCond(q->cond);
260 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
262 AVPacketList *pkt1;
264 /* duplicate the packet */
265 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266 return -1;
268 pkt1 = av_malloc(sizeof(AVPacketList));
269 if (!pkt1)
270 return -1;
271 pkt1->pkt = *pkt;
272 pkt1->next = NULL;
275 SDL_LockMutex(q->mutex);
277 if (!q->last_pkt)
279 q->first_pkt = pkt1;
280 else
281 q->last_pkt->next = pkt1;
282 q->last_pkt = pkt1;
283 q->nb_packets++;
284 q->size += pkt1->pkt.size;
285 /* XXX: should duplicate packet data in DV case */
286 SDL_CondSignal(q->cond);
288 SDL_UnlockMutex(q->mutex);
289 return 0;
292 static void packet_queue_abort(PacketQueue *q)
294 SDL_LockMutex(q->mutex);
296 q->abort_request = 1;
298 SDL_CondSignal(q->cond);
300 SDL_UnlockMutex(q->mutex);
303 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
304 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
306 AVPacketList *pkt1;
307 int ret;
309 SDL_LockMutex(q->mutex);
311 for(;;) {
312 if (q->abort_request) {
313 ret = -1;
314 break;
317 pkt1 = q->first_pkt;
318 if (pkt1) {
319 q->first_pkt = pkt1->next;
320 if (!q->first_pkt)
321 q->last_pkt = NULL;
322 q->nb_packets--;
323 q->size -= pkt1->pkt.size;
324 *pkt = pkt1->pkt;
325 av_free(pkt1);
326 ret = 1;
327 break;
328 } else if (!block) {
329 ret = 0;
330 break;
331 } else {
332 SDL_CondWait(q->cond, q->mutex);
335 SDL_UnlockMutex(q->mutex);
336 return ret;
339 static inline void fill_rectangle(SDL_Surface *screen,
340 int x, int y, int w, int h, int color)
342 SDL_Rect rect;
343 rect.x = x;
344 rect.y = y;
345 rect.w = w;
346 rect.h = h;
347 SDL_FillRect(screen, &rect, color);
350 #if 0
351 /* draw only the border of a rectangle */
352 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
354 int w1, w2, h1, h2;
356 /* fill the background */
357 w1 = x;
358 if (w1 < 0)
359 w1 = 0;
360 w2 = s->width - (x + w);
361 if (w2 < 0)
362 w2 = 0;
363 h1 = y;
364 if (h1 < 0)
365 h1 = 0;
366 h2 = s->height - (y + h);
367 if (h2 < 0)
368 h2 = 0;
369 fill_rectangle(screen,
370 s->xleft, s->ytop,
371 w1, s->height,
372 color);
373 fill_rectangle(screen,
374 s->xleft + s->width - w2, s->ytop,
375 w2, s->height,
376 color);
377 fill_rectangle(screen,
378 s->xleft + w1, s->ytop,
379 s->width - w1 - w2, h1,
380 color);
381 fill_rectangle(screen,
382 s->xleft + w1, s->ytop + s->height - h2,
383 s->width - w1 - w2, h2,
384 color);
386 #endif
390 #define SCALEBITS 10
391 #define ONE_HALF (1 << (SCALEBITS - 1))
392 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
394 #define RGB_TO_Y_CCIR(r, g, b) \
395 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
398 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
400 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
402 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
404 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409 #define RGBA_IN(r, g, b, a, s)\
411 unsigned int v = ((const uint32_t *)(s))[0];\
412 a = (v >> 24) & 0xff;\
413 r = (v >> 16) & 0xff;\
414 g = (v >> 8) & 0xff;\
415 b = v & 0xff;\
418 #define YUVA_IN(y, u, v, a, s, pal)\
420 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421 a = (val >> 24) & 0xff;\
422 y = (val >> 16) & 0xff;\
423 u = (val >> 8) & 0xff;\
424 v = val & 0xff;\
427 #define YUVA_OUT(d, y, u, v, a)\
429 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 #define BPP 1
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 int wrap, wrap3, width2, skip2;
438 int y, u, v, a, u1, v1, a1, w, h;
439 uint8_t *lum, *cb, *cr;
440 const uint8_t *p;
441 const uint32_t *pal;
442 int dstx, dsty, dstw, dsth;
444 dstw = av_clip(rect->w, 0, imgw);
445 dsth = av_clip(rect->h, 0, imgh);
446 dstx = av_clip(rect->x, 0, imgw - dstw);
447 dsty = av_clip(rect->y, 0, imgh - dsth);
448 lum = dst->data[0] + dsty * dst->linesize[0];
449 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452 width2 = (dstw + 1) >> 1;
453 skip2 = dstx >> 1;
454 wrap = dst->linesize[0];
455 wrap3 = rect->pict.linesize[0];
456 p = rect->pict.data[0];
457 pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
459 if (dsty & 1) {
460 lum += dstx;
461 cb += skip2;
462 cr += skip2;
464 if (dstx & 1) {
465 YUVA_IN(y, u, v, a, p, pal);
466 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469 cb++;
470 cr++;
471 lum++;
472 p += BPP;
474 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475 YUVA_IN(y, u, v, a, p, pal);
476 u1 = u;
477 v1 = v;
478 a1 = a;
479 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481 YUVA_IN(y, u, v, a, p + BPP, pal);
482 u1 += u;
483 v1 += v;
484 a1 += a;
485 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488 cb++;
489 cr++;
490 p += 2 * BPP;
491 lum += 2;
493 if (w) {
494 YUVA_IN(y, u, v, a, p, pal);
495 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499 p += wrap3 + (wrap3 - dstw * BPP);
500 lum += wrap + (wrap - dstw - dstx);
501 cb += dst->linesize[1] - width2 - skip2;
502 cr += dst->linesize[2] - width2 - skip2;
504 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505 lum += dstx;
506 cb += skip2;
507 cr += skip2;
509 if (dstx & 1) {
510 YUVA_IN(y, u, v, a, p, pal);
511 u1 = u;
512 v1 = v;
513 a1 = a;
514 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515 p += wrap3;
516 lum += wrap;
517 YUVA_IN(y, u, v, a, p, pal);
518 u1 += u;
519 v1 += v;
520 a1 += a;
521 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524 cb++;
525 cr++;
526 p += -wrap3 + BPP;
527 lum += -wrap + 1;
529 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 = u;
532 v1 = v;
533 a1 = a;
534 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536 YUVA_IN(y, u, v, a, p, pal);
537 u1 += u;
538 v1 += v;
539 a1 += a;
540 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541 p += wrap3;
542 lum += wrap;
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
550 YUVA_IN(y, u, v, a, p, pal);
551 u1 += u;
552 v1 += v;
553 a1 += a;
554 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
556 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
559 cb++;
560 cr++;
561 p += -wrap3 + 2 * BPP;
562 lum += -wrap + 2;
564 if (w) {
565 YUVA_IN(y, u, v, a, p, pal);
566 u1 = u;
567 v1 = v;
568 a1 = a;
569 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570 p += wrap3;
571 lum += wrap;
572 YUVA_IN(y, u, v, a, p, pal);
573 u1 += u;
574 v1 += v;
575 a1 += a;
576 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579 cb++;
580 cr++;
581 p += -wrap3 + BPP;
582 lum += -wrap + 1;
584 p += wrap3 + (wrap3 - dstw * BPP);
585 lum += wrap + (wrap - dstw - dstx);
586 cb += dst->linesize[1] - width2 - skip2;
587 cr += dst->linesize[2] - width2 - skip2;
589 /* handle odd height */
590 if (h) {
591 lum += dstx;
592 cb += skip2;
593 cr += skip2;
595 if (dstx & 1) {
596 YUVA_IN(y, u, v, a, p, pal);
597 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600 cb++;
601 cr++;
602 lum++;
603 p += BPP;
605 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606 YUVA_IN(y, u, v, a, p, pal);
607 u1 = u;
608 v1 = v;
609 a1 = a;
610 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612 YUVA_IN(y, u, v, a, p + BPP, pal);
613 u1 += u;
614 v1 += v;
615 a1 += a;
616 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619 cb++;
620 cr++;
621 p += 2 * BPP;
622 lum += 2;
624 if (w) {
625 YUVA_IN(y, u, v, a, p, pal);
626 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
633 static void free_subpicture(SubPicture *sp)
635 int i;
637 for (i = 0; i < sp->sub.num_rects; i++)
639 av_freep(&sp->sub.rects[i]->pict.data[0]);
640 av_freep(&sp->sub.rects[i]->pict.data[1]);
641 av_freep(&sp->sub.rects[i]);
644 av_free(sp->sub.rects);
646 memset(&sp->sub, 0, sizeof(AVSubtitle));
649 static void video_image_display(VideoState *is)
651 VideoPicture *vp;
652 SubPicture *sp;
653 AVPicture pict;
654 float aspect_ratio;
655 int width, height, x, y;
656 SDL_Rect rect;
657 int i;
659 vp = &is->pictq[is->pictq_rindex];
660 if (vp->bmp) {
661 /* XXX: use variable in the frame */
662 if (is->video_st->sample_aspect_ratio.num)
663 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664 else if (is->video_st->codec->sample_aspect_ratio.num)
665 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666 else
667 aspect_ratio = 0;
668 if (aspect_ratio <= 0.0)
669 aspect_ratio = 1.0;
670 aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
671 /* if an active format is indicated, then it overrides the
672 mpeg format */
673 #if 0
674 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
675 is->dtg_active_format = is->video_st->codec->dtg_active_format;
676 printf("dtg_active_format=%d\n", is->dtg_active_format);
678 #endif
679 #if 0
680 switch(is->video_st->codec->dtg_active_format) {
681 case FF_DTG_AFD_SAME:
682 default:
683 /* nothing to do */
684 break;
685 case FF_DTG_AFD_4_3:
686 aspect_ratio = 4.0 / 3.0;
687 break;
688 case FF_DTG_AFD_16_9:
689 aspect_ratio = 16.0 / 9.0;
690 break;
691 case FF_DTG_AFD_14_9:
692 aspect_ratio = 14.0 / 9.0;
693 break;
694 case FF_DTG_AFD_4_3_SP_14_9:
695 aspect_ratio = 14.0 / 9.0;
696 break;
697 case FF_DTG_AFD_16_9_SP_14_9:
698 aspect_ratio = 14.0 / 9.0;
699 break;
700 case FF_DTG_AFD_SP_4_3:
701 aspect_ratio = 4.0 / 3.0;
702 break;
704 #endif
706 if (is->subtitle_st)
708 if (is->subpq_size > 0)
710 sp = &is->subpq[is->subpq_rindex];
712 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
714 SDL_LockYUVOverlay (vp->bmp);
716 pict.data[0] = vp->bmp->pixels[0];
717 pict.data[1] = vp->bmp->pixels[2];
718 pict.data[2] = vp->bmp->pixels[1];
720 pict.linesize[0] = vp->bmp->pitches[0];
721 pict.linesize[1] = vp->bmp->pitches[2];
722 pict.linesize[2] = vp->bmp->pitches[1];
724 for (i = 0; i < sp->sub.num_rects; i++)
725 blend_subrect(&pict, sp->sub.rects[i],
726 vp->bmp->w, vp->bmp->h);
728 SDL_UnlockYUVOverlay (vp->bmp);
734 /* XXX: we suppose the screen has a 1.0 pixel ratio */
735 height = is->height;
736 width = ((int)rint(height * aspect_ratio)) & ~1;
737 if (width > is->width) {
738 width = is->width;
739 height = ((int)rint(width / aspect_ratio)) & ~1;
741 x = (is->width - width) / 2;
742 y = (is->height - height) / 2;
743 if (!is->no_background) {
744 /* fill the background */
745 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
746 } else {
747 is->no_background = 0;
749 rect.x = is->xleft + x;
750 rect.y = is->ytop + y;
751 rect.w = width;
752 rect.h = height;
753 SDL_DisplayYUVOverlay(vp->bmp, &rect);
754 } else {
755 #if 0
756 fill_rectangle(screen,
757 is->xleft, is->ytop, is->width, is->height,
758 QERGB(0x00, 0x00, 0x00));
759 #endif
763 static inline int compute_mod(int a, int b)
765 a = a % b;
766 if (a >= 0)
767 return a;
768 else
769 return a + b;
772 static void video_audio_display(VideoState *s)
774 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
775 int ch, channels, h, h2, bgcolor, fgcolor;
776 int16_t time_diff;
778 /* compute display index : center on currently output samples */
779 channels = s->audio_st->codec->channels;
780 nb_display_channels = channels;
781 if (!s->paused) {
782 n = 2 * channels;
783 delay = audio_write_get_buf_size(s);
784 delay /= n;
786 /* to be more precise, we take into account the time spent since
787 the last buffer computation */
788 if (audio_callback_time) {
789 time_diff = av_gettime() - audio_callback_time;
790 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
793 delay -= s->width / 2;
794 if (delay < s->width)
795 delay = s->width;
797 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
799 h= INT_MIN;
800 for(i=0; i<1000; i+=channels){
801 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
802 int a= s->sample_array[idx];
803 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
804 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
805 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
806 int score= a-d;
807 if(h<score && (b^c)<0){
808 h= score;
809 i_start= idx;
813 s->last_i_start = i_start;
814 } else {
815 i_start = s->last_i_start;
818 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
819 fill_rectangle(screen,
820 s->xleft, s->ytop, s->width, s->height,
821 bgcolor);
823 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
825 /* total height for one channel */
826 h = s->height / nb_display_channels;
827 /* graph height / 2 */
828 h2 = (h * 9) / 20;
829 for(ch = 0;ch < nb_display_channels; ch++) {
830 i = i_start + ch;
831 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
832 for(x = 0; x < s->width; x++) {
833 y = (s->sample_array[i] * h2) >> 15;
834 if (y < 0) {
835 y = -y;
836 ys = y1 - y;
837 } else {
838 ys = y1;
840 fill_rectangle(screen,
841 s->xleft + x, ys, 1, y,
842 fgcolor);
843 i += channels;
844 if (i >= SAMPLE_ARRAY_SIZE)
845 i -= SAMPLE_ARRAY_SIZE;
849 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
851 for(ch = 1;ch < nb_display_channels; ch++) {
852 y = s->ytop + ch * h;
853 fill_rectangle(screen,
854 s->xleft, y, s->width, 1,
855 fgcolor);
857 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
860 static int video_open(VideoState *is){
861 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
862 int w,h;
864 if(is_full_screen) flags |= SDL_FULLSCREEN;
865 else flags |= SDL_RESIZABLE;
867 if (is_full_screen && fs_screen_width) {
868 w = fs_screen_width;
869 h = fs_screen_height;
870 } else if(!is_full_screen && screen_width){
871 w = screen_width;
872 h = screen_height;
873 }else if (is->video_st && is->video_st->codec->width){
874 w = is->video_st->codec->width;
875 h = is->video_st->codec->height;
876 } else {
877 w = 640;
878 h = 480;
880 #ifndef __APPLE__
881 screen = SDL_SetVideoMode(w, h, 0, flags);
882 #else
883 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
884 screen = SDL_SetVideoMode(w, h, 24, flags);
885 #endif
886 if (!screen) {
887 fprintf(stderr, "SDL: could not set video mode - exiting\n");
888 return -1;
890 SDL_WM_SetCaption("FFplay", "FFplay");
892 is->width = screen->w;
893 is->height = screen->h;
895 return 0;
898 /* display the current picture, if any */
899 static void video_display(VideoState *is)
901 if(!screen)
902 video_open(cur_stream);
903 if (is->audio_st && is->show_audio)
904 video_audio_display(is);
905 else if (is->video_st)
906 video_image_display(is);
909 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
911 SDL_Event event;
912 event.type = FF_REFRESH_EVENT;
913 event.user.data1 = opaque;
914 SDL_PushEvent(&event);
915 return 0; /* 0 means stop timer */
918 /* schedule a video refresh in 'delay' ms */
919 static void schedule_refresh(VideoState *is, int delay)
921 if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
922 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
925 /* get the current audio clock value */
926 static double get_audio_clock(VideoState *is)
928 double pts;
929 int hw_buf_size, bytes_per_sec;
930 pts = is->audio_clock;
931 hw_buf_size = audio_write_get_buf_size(is);
932 bytes_per_sec = 0;
933 if (is->audio_st) {
934 bytes_per_sec = is->audio_st->codec->sample_rate *
935 2 * is->audio_st->codec->channels;
937 if (bytes_per_sec)
938 pts -= (double)hw_buf_size / bytes_per_sec;
939 return pts;
942 /* get the current video clock value */
943 static double get_video_clock(VideoState *is)
945 double delta;
946 if (is->paused) {
947 delta = 0;
948 } else {
949 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
951 return is->video_current_pts + delta;
954 /* get the current external clock value */
955 static double get_external_clock(VideoState *is)
957 int64_t ti;
958 ti = av_gettime();
959 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
962 /* get the current master clock value */
963 static double get_master_clock(VideoState *is)
965 double val;
967 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
968 if (is->video_st)
969 val = get_video_clock(is);
970 else
971 val = get_audio_clock(is);
972 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
973 if (is->audio_st)
974 val = get_audio_clock(is);
975 else
976 val = get_video_clock(is);
977 } else {
978 val = get_external_clock(is);
980 return val;
983 /* seek in the stream */
984 static void stream_seek(VideoState *is, int64_t pos, int rel)
986 if (!is->seek_req) {
987 is->seek_pos = pos;
988 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
989 if (seek_by_bytes)
990 is->seek_flags |= AVSEEK_FLAG_BYTE;
991 is->seek_req = 1;
995 /* pause or resume the video */
996 static void stream_pause(VideoState *is)
998 is->paused = !is->paused;
999 if (!is->paused) {
1000 is->video_current_pts = get_video_clock(is);
1001 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1005 /* called to display each frame */
1006 static void video_refresh_timer(void *opaque)
1008 VideoState *is = opaque;
1009 VideoPicture *vp;
1010 double actual_delay, delay, sync_threshold, ref_clock, diff;
1012 SubPicture *sp, *sp2;
1014 if (is->video_st) {
1015 if (is->pictq_size == 0) {
1016 /* if no picture, need to wait */
1017 schedule_refresh(is, 1);
1018 } else {
1019 /* dequeue the picture */
1020 vp = &is->pictq[is->pictq_rindex];
1022 /* update current video pts */
1023 is->video_current_pts = vp->pts;
1024 is->video_current_pts_time = av_gettime();
1026 /* compute nominal delay */
1027 delay = vp->pts - is->frame_last_pts;
1028 if (delay <= 0 || delay >= 10.0) {
1029 /* if incorrect delay, use previous one */
1030 delay = is->frame_last_delay;
1032 is->frame_last_delay = delay;
1033 is->frame_last_pts = vp->pts;
1035 /* update delay to follow master synchronisation source */
1036 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1037 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1038 /* if video is slave, we try to correct big delays by
1039 duplicating or deleting a frame */
1040 ref_clock = get_master_clock(is);
1041 diff = vp->pts - ref_clock;
1043 /* skip or repeat frame. We take into account the
1044 delay to compute the threshold. I still don't know
1045 if it is the best guess */
1046 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1047 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1048 if (diff <= -sync_threshold)
1049 delay = 0;
1050 else if (diff >= sync_threshold)
1051 delay = 2 * delay;
1055 is->frame_timer += delay;
1056 /* compute the REAL delay (we need to do that to avoid
1057 long term errors */
1058 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1059 if (actual_delay < 0.010) {
1060 /* XXX: should skip picture */
1061 actual_delay = 0.010;
1063 /* launch timer for next picture */
1064 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1066 #if defined(DEBUG_SYNC)
1067 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1068 delay, actual_delay, vp->pts, -diff);
1069 #endif
1071 if(is->subtitle_st) {
1072 if (is->subtitle_stream_changed) {
1073 SDL_LockMutex(is->subpq_mutex);
1075 while (is->subpq_size) {
1076 free_subpicture(&is->subpq[is->subpq_rindex]);
1078 /* update queue size and signal for next picture */
1079 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1080 is->subpq_rindex = 0;
1082 is->subpq_size--;
1084 is->subtitle_stream_changed = 0;
1086 SDL_CondSignal(is->subpq_cond);
1087 SDL_UnlockMutex(is->subpq_mutex);
1088 } else {
1089 if (is->subpq_size > 0) {
1090 sp = &is->subpq[is->subpq_rindex];
1092 if (is->subpq_size > 1)
1093 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1094 else
1095 sp2 = NULL;
1097 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1098 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1100 free_subpicture(sp);
1102 /* update queue size and signal for next picture */
1103 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1104 is->subpq_rindex = 0;
1106 SDL_LockMutex(is->subpq_mutex);
1107 is->subpq_size--;
1108 SDL_CondSignal(is->subpq_cond);
1109 SDL_UnlockMutex(is->subpq_mutex);
1115 /* display picture */
1116 video_display(is);
1118 /* update queue size and signal for next picture */
1119 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1120 is->pictq_rindex = 0;
1122 SDL_LockMutex(is->pictq_mutex);
1123 is->pictq_size--;
1124 SDL_CondSignal(is->pictq_cond);
1125 SDL_UnlockMutex(is->pictq_mutex);
1127 } else if (is->audio_st) {
1128 /* draw the next audio frame */
1130 schedule_refresh(is, 40);
1132 /* if only audio stream, then display the audio bars (better
1133 than nothing, just to test the implementation */
1135 /* display picture */
1136 video_display(is);
1137 } else {
1138 schedule_refresh(is, 100);
1140 if (show_status) {
1141 static int64_t last_time;
1142 int64_t cur_time;
1143 int aqsize, vqsize, sqsize;
1144 double av_diff;
1146 cur_time = av_gettime();
1147 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1148 aqsize = 0;
1149 vqsize = 0;
1150 sqsize = 0;
1151 if (is->audio_st)
1152 aqsize = is->audioq.size;
1153 if (is->video_st)
1154 vqsize = is->videoq.size;
1155 if (is->subtitle_st)
1156 sqsize = is->subtitleq.size;
1157 av_diff = 0;
1158 if (is->audio_st && is->video_st)
1159 av_diff = get_audio_clock(is) - get_video_clock(is);
1160 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1161 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1162 fflush(stdout);
1163 last_time = cur_time;
1168 /* allocate a picture (needs to do that in main thread to avoid
1169 potential locking problems */
1170 static void alloc_picture(void *opaque)
1172 VideoState *is = opaque;
1173 VideoPicture *vp;
1175 vp = &is->pictq[is->pictq_windex];
1177 if (vp->bmp)
1178 SDL_FreeYUVOverlay(vp->bmp);
1180 #if 0
1181 /* XXX: use generic function */
1182 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1183 switch(is->video_st->codec->pix_fmt) {
1184 case PIX_FMT_YUV420P:
1185 case PIX_FMT_YUV422P:
1186 case PIX_FMT_YUV444P:
1187 case PIX_FMT_YUYV422:
1188 case PIX_FMT_YUV410P:
1189 case PIX_FMT_YUV411P:
1190 is_yuv = 1;
1191 break;
1192 default:
1193 is_yuv = 0;
1194 break;
1196 #endif
1197 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1198 is->video_st->codec->height,
1199 SDL_YV12_OVERLAY,
1200 screen);
1201 vp->width = is->video_st->codec->width;
1202 vp->height = is->video_st->codec->height;
1204 SDL_LockMutex(is->pictq_mutex);
1205 vp->allocated = 1;
1206 SDL_CondSignal(is->pictq_cond);
1207 SDL_UnlockMutex(is->pictq_mutex);
1212 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1214 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1216 VideoPicture *vp;
1217 int dst_pix_fmt;
1218 AVPicture pict;
1219 static struct SwsContext *img_convert_ctx;
1221 /* wait until we have space to put a new picture */
1222 SDL_LockMutex(is->pictq_mutex);
1223 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1224 !is->videoq.abort_request) {
1225 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1227 SDL_UnlockMutex(is->pictq_mutex);
1229 if (is->videoq.abort_request)
1230 return -1;
1232 vp = &is->pictq[is->pictq_windex];
1234 /* alloc or resize hardware picture buffer */
1235 if (!vp->bmp ||
1236 vp->width != is->video_st->codec->width ||
1237 vp->height != is->video_st->codec->height) {
1238 SDL_Event event;
1240 vp->allocated = 0;
1242 /* the allocation must be done in the main thread to avoid
1243 locking problems */
1244 event.type = FF_ALLOC_EVENT;
1245 event.user.data1 = is;
1246 SDL_PushEvent(&event);
1248 /* wait until the picture is allocated */
1249 SDL_LockMutex(is->pictq_mutex);
1250 while (!vp->allocated && !is->videoq.abort_request) {
1251 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1253 SDL_UnlockMutex(is->pictq_mutex);
1255 if (is->videoq.abort_request)
1256 return -1;
1259 /* if the frame is not skipped, then display it */
1260 if (vp->bmp) {
1261 /* get a pointer on the bitmap */
1262 SDL_LockYUVOverlay (vp->bmp);
1264 dst_pix_fmt = PIX_FMT_YUV420P;
1265 pict.data[0] = vp->bmp->pixels[0];
1266 pict.data[1] = vp->bmp->pixels[2];
1267 pict.data[2] = vp->bmp->pixels[1];
1269 pict.linesize[0] = vp->bmp->pitches[0];
1270 pict.linesize[1] = vp->bmp->pitches[2];
1271 pict.linesize[2] = vp->bmp->pitches[1];
1272 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1273 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1274 is->video_st->codec->width, is->video_st->codec->height,
1275 is->video_st->codec->pix_fmt,
1276 is->video_st->codec->width, is->video_st->codec->height,
1277 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1278 if (img_convert_ctx == NULL) {
1279 fprintf(stderr, "Cannot initialize the conversion context\n");
1280 exit(1);
1282 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1283 0, is->video_st->codec->height, pict.data, pict.linesize);
1284 /* update the bitmap content */
1285 SDL_UnlockYUVOverlay(vp->bmp);
1287 vp->pts = pts;
1289 /* now we can update the picture count */
1290 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1291 is->pictq_windex = 0;
1292 SDL_LockMutex(is->pictq_mutex);
1293 is->pictq_size++;
1294 SDL_UnlockMutex(is->pictq_mutex);
1296 return 0;
1300 * compute the exact PTS for the picture if it is omitted in the stream
1301 * @param pts1 the dts of the pkt / pts of the frame
1303 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1305 double frame_delay, pts;
1307 pts = pts1;
1309 if (pts != 0) {
1310 /* update video clock with pts, if present */
1311 is->video_clock = pts;
1312 } else {
1313 pts = is->video_clock;
1315 /* update video clock for next frame */
1316 frame_delay = av_q2d(is->video_st->codec->time_base);
1317 /* for MPEG2, the frame can be repeated, so we update the
1318 clock accordingly */
1319 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320 is->video_clock += frame_delay;
1322 #if defined(DEBUG_SYNC) && 0
1324 int ftype;
1325 if (src_frame->pict_type == FF_B_TYPE)
1326 ftype = 'B';
1327 else if (src_frame->pict_type == FF_I_TYPE)
1328 ftype = 'I';
1329 else
1330 ftype = 'P';
1331 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1332 ftype, pts, pts1);
1334 #endif
1335 return queue_picture(is, src_frame, pts);
1338 static int video_thread(void *arg)
1340 VideoState *is = arg;
1341 AVPacket pkt1, *pkt = &pkt1;
1342 int len1, got_picture;
1343 AVFrame *frame= avcodec_alloc_frame();
1344 double pts;
1346 for(;;) {
1347 while (is->paused && !is->videoq.abort_request) {
1348 SDL_Delay(10);
1350 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1351 break;
1353 if(pkt->data == flush_pkt.data){
1354 avcodec_flush_buffers(is->video_st->codec);
1355 continue;
1358 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1359 this packet, if any */
1360 is->video_st->codec->reordered_opaque= pkt->pts;
1361 len1 = avcodec_decode_video(is->video_st->codec,
1362 frame, &got_picture,
1363 pkt->data, pkt->size);
1365 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1366 && frame->reordered_opaque != AV_NOPTS_VALUE)
1367 pts= frame->reordered_opaque;
1368 else if(pkt->dts != AV_NOPTS_VALUE)
1369 pts= pkt->dts;
1370 else
1371 pts= 0;
1372 pts *= av_q2d(is->video_st->time_base);
1374 // if (len1 < 0)
1375 // break;
1376 if (got_picture) {
1377 if (output_picture2(is, frame, pts) < 0)
1378 goto the_end;
1380 av_free_packet(pkt);
1381 if (step)
1382 if (cur_stream)
1383 stream_pause(cur_stream);
1385 the_end:
1386 av_free(frame);
1387 return 0;
1390 static int subtitle_thread(void *arg)
1392 VideoState *is = arg;
1393 SubPicture *sp;
1394 AVPacket pkt1, *pkt = &pkt1;
1395 int len1, got_subtitle;
1396 double pts;
1397 int i, j;
1398 int r, g, b, y, u, v, a;
1400 for(;;) {
1401 while (is->paused && !is->subtitleq.abort_request) {
1402 SDL_Delay(10);
1404 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1405 break;
1407 if(pkt->data == flush_pkt.data){
1408 avcodec_flush_buffers(is->subtitle_st->codec);
1409 continue;
1411 SDL_LockMutex(is->subpq_mutex);
1412 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1413 !is->subtitleq.abort_request) {
1414 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1416 SDL_UnlockMutex(is->subpq_mutex);
1418 if (is->subtitleq.abort_request)
1419 goto the_end;
1421 sp = &is->subpq[is->subpq_windex];
1423 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1424 this packet, if any */
1425 pts = 0;
1426 if (pkt->pts != AV_NOPTS_VALUE)
1427 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1429 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1430 &sp->sub, &got_subtitle,
1431 pkt->data, pkt->size);
1432 // if (len1 < 0)
1433 // break;
1434 if (got_subtitle && sp->sub.format == 0) {
1435 sp->pts = pts;
1437 for (i = 0; i < sp->sub.num_rects; i++)
1439 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1441 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1442 y = RGB_TO_Y_CCIR(r, g, b);
1443 u = RGB_TO_U_CCIR(r, g, b, 0);
1444 v = RGB_TO_V_CCIR(r, g, b, 0);
1445 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1449 /* now we can update the picture count */
1450 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1451 is->subpq_windex = 0;
1452 SDL_LockMutex(is->subpq_mutex);
1453 is->subpq_size++;
1454 SDL_UnlockMutex(is->subpq_mutex);
1456 av_free_packet(pkt);
1457 // if (step)
1458 // if (cur_stream)
1459 // stream_pause(cur_stream);
1461 the_end:
1462 return 0;
1465 /* copy samples for viewing in editor window */
1466 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1468 int size, len, channels;
1470 channels = is->audio_st->codec->channels;
1472 size = samples_size / sizeof(short);
1473 while (size > 0) {
1474 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1475 if (len > size)
1476 len = size;
1477 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1478 samples += len;
1479 is->sample_array_index += len;
1480 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1481 is->sample_array_index = 0;
1482 size -= len;
1486 /* return the new audio buffer size (samples can be added or deleted
1487 to get better sync if video or external master clock) */
1488 static int synchronize_audio(VideoState *is, short *samples,
1489 int samples_size1, double pts)
1491 int n, samples_size;
1492 double ref_clock;
1494 n = 2 * is->audio_st->codec->channels;
1495 samples_size = samples_size1;
1497 /* if not master, then we try to remove or add samples to correct the clock */
1498 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1499 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1500 double diff, avg_diff;
1501 int wanted_size, min_size, max_size, nb_samples;
1503 ref_clock = get_master_clock(is);
1504 diff = get_audio_clock(is) - ref_clock;
1506 if (diff < AV_NOSYNC_THRESHOLD) {
1507 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1508 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1509 /* not enough measures to have a correct estimate */
1510 is->audio_diff_avg_count++;
1511 } else {
1512 /* estimate the A-V difference */
1513 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1515 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1516 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1517 nb_samples = samples_size / n;
1519 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1520 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1521 if (wanted_size < min_size)
1522 wanted_size = min_size;
1523 else if (wanted_size > max_size)
1524 wanted_size = max_size;
1526 /* add or remove samples to correction the synchro */
1527 if (wanted_size < samples_size) {
1528 /* remove samples */
1529 samples_size = wanted_size;
1530 } else if (wanted_size > samples_size) {
1531 uint8_t *samples_end, *q;
1532 int nb;
1534 /* add samples */
1535 nb = (samples_size - wanted_size);
1536 samples_end = (uint8_t *)samples + samples_size - n;
1537 q = samples_end + n;
1538 while (nb > 0) {
1539 memcpy(q, samples_end, n);
1540 q += n;
1541 nb -= n;
1543 samples_size = wanted_size;
1546 #if 0
1547 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1548 diff, avg_diff, samples_size - samples_size1,
1549 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1550 #endif
1552 } else {
1553 /* too big difference : may be initial PTS errors, so
1554 reset A-V filter */
1555 is->audio_diff_avg_count = 0;
1556 is->audio_diff_cum = 0;
1560 return samples_size;
1563 /* decode one audio frame and returns its uncompressed size */
1564 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1566 AVPacket *pkt = &is->audio_pkt;
1567 AVCodecContext *dec= is->audio_st->codec;
1568 int n, len1, data_size;
1569 double pts;
1571 for(;;) {
1572 /* NOTE: the audio packet can contain several frames */
1573 while (is->audio_pkt_size > 0) {
1574 data_size = sizeof(is->audio_buf1);
1575 len1 = avcodec_decode_audio2(dec,
1576 (int16_t *)is->audio_buf1, &data_size,
1577 is->audio_pkt_data, is->audio_pkt_size);
1578 if (len1 < 0) {
1579 /* if error, we skip the frame */
1580 is->audio_pkt_size = 0;
1581 break;
1584 is->audio_pkt_data += len1;
1585 is->audio_pkt_size -= len1;
1586 if (data_size <= 0)
1587 continue;
1589 if (dec->sample_fmt != is->audio_src_fmt) {
1590 if (is->reformat_ctx)
1591 av_audio_convert_free(is->reformat_ctx);
1592 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1593 dec->sample_fmt, 1, NULL, 0);
1594 if (!is->reformat_ctx) {
1595 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1596 avcodec_get_sample_fmt_name(dec->sample_fmt),
1597 avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1598 break;
1600 is->audio_src_fmt= dec->sample_fmt;
1603 if (is->reformat_ctx) {
1604 const void *ibuf[6]= {is->audio_buf1};
1605 void *obuf[6]= {is->audio_buf2};
1606 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1607 int ostride[6]= {2};
1608 int len= data_size/istride[0];
1609 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1610 printf("av_audio_convert() failed\n");
1611 break;
1613 is->audio_buf= is->audio_buf2;
1614 /* FIXME: existing code assume that data_size equals framesize*channels*2
1615 remove this legacy cruft */
1616 data_size= len*2;
1617 }else{
1618 is->audio_buf= is->audio_buf1;
1621 /* if no pts, then compute it */
1622 pts = is->audio_clock;
1623 *pts_ptr = pts;
1624 n = 2 * dec->channels;
1625 is->audio_clock += (double)data_size /
1626 (double)(n * dec->sample_rate);
1627 #if defined(DEBUG_SYNC)
1629 static double last_clock;
1630 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1631 is->audio_clock - last_clock,
1632 is->audio_clock, pts);
1633 last_clock = is->audio_clock;
1635 #endif
1636 return data_size;
1639 /* free the current packet */
1640 if (pkt->data)
1641 av_free_packet(pkt);
1643 if (is->paused || is->audioq.abort_request) {
1644 return -1;
1647 /* read next packet */
1648 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1649 return -1;
1650 if(pkt->data == flush_pkt.data){
1651 avcodec_flush_buffers(dec);
1652 continue;
1655 is->audio_pkt_data = pkt->data;
1656 is->audio_pkt_size = pkt->size;
1658 /* if update the audio clock with the pts */
1659 if (pkt->pts != AV_NOPTS_VALUE) {
1660 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1665 /* get the current audio output buffer size, in samples. With SDL, we
1666 cannot have a precise information */
1667 static int audio_write_get_buf_size(VideoState *is)
1669 return is->audio_buf_size - is->audio_buf_index;
1673 /* prepare a new audio buffer */
1674 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1676 VideoState *is = opaque;
1677 int audio_size, len1;
1678 double pts;
1680 audio_callback_time = av_gettime();
1682 while (len > 0) {
1683 if (is->audio_buf_index >= is->audio_buf_size) {
1684 audio_size = audio_decode_frame(is, &pts);
1685 if (audio_size < 0) {
1686 /* if error, just output silence */
1687 is->audio_buf = is->audio_buf1;
1688 is->audio_buf_size = 1024;
1689 memset(is->audio_buf, 0, is->audio_buf_size);
1690 } else {
1691 if (is->show_audio)
1692 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1693 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1694 pts);
1695 is->audio_buf_size = audio_size;
1697 is->audio_buf_index = 0;
1699 len1 = is->audio_buf_size - is->audio_buf_index;
1700 if (len1 > len)
1701 len1 = len;
1702 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1703 len -= len1;
1704 stream += len1;
1705 is->audio_buf_index += len1;
1709 /* open a given stream. Return 0 if OK */
1710 static int stream_component_open(VideoState *is, int stream_index)
1712 AVFormatContext *ic = is->ic;
1713 AVCodecContext *enc;
1714 AVCodec *codec;
1715 SDL_AudioSpec wanted_spec, spec;
1717 if (stream_index < 0 || stream_index >= ic->nb_streams)
1718 return -1;
1719 enc = ic->streams[stream_index]->codec;
1721 /* prepare audio output */
1722 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1723 if (enc->channels > 0) {
1724 enc->request_channels = FFMIN(2, enc->channels);
1725 } else {
1726 enc->request_channels = 2;
1730 codec = avcodec_find_decoder(enc->codec_id);
1731 enc->debug_mv = debug_mv;
1732 enc->debug = debug;
1733 enc->workaround_bugs = workaround_bugs;
1734 enc->lowres = lowres;
1735 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1736 enc->idct_algo= idct;
1737 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1738 enc->skip_frame= skip_frame;
1739 enc->skip_idct= skip_idct;
1740 enc->skip_loop_filter= skip_loop_filter;
1741 enc->error_recognition= error_recognition;
1742 enc->error_concealment= error_concealment;
1744 set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1746 if (!codec ||
1747 avcodec_open(enc, codec) < 0)
1748 return -1;
1750 /* prepare audio output */
1751 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1752 wanted_spec.freq = enc->sample_rate;
1753 wanted_spec.format = AUDIO_S16SYS;
1754 wanted_spec.channels = enc->channels;
1755 wanted_spec.silence = 0;
1756 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1757 wanted_spec.callback = sdl_audio_callback;
1758 wanted_spec.userdata = is;
1759 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1760 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1761 return -1;
1763 is->audio_hw_buf_size = spec.size;
1764 is->audio_src_fmt= SAMPLE_FMT_S16;
1767 if(thread_count>1)
1768 avcodec_thread_init(enc, thread_count);
1769 enc->thread_count= thread_count;
1770 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1771 switch(enc->codec_type) {
1772 case CODEC_TYPE_AUDIO:
1773 is->audio_stream = stream_index;
1774 is->audio_st = ic->streams[stream_index];
1775 is->audio_buf_size = 0;
1776 is->audio_buf_index = 0;
1778 /* init averaging filter */
1779 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1780 is->audio_diff_avg_count = 0;
1781 /* since we do not have a precise anough audio fifo fullness,
1782 we correct audio sync only if larger than this threshold */
1783 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1785 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1786 packet_queue_init(&is->audioq);
1787 SDL_PauseAudio(0);
1788 break;
1789 case CODEC_TYPE_VIDEO:
1790 is->video_stream = stream_index;
1791 is->video_st = ic->streams[stream_index];
1793 is->frame_last_delay = 40e-3;
1794 is->frame_timer = (double)av_gettime() / 1000000.0;
1795 is->video_current_pts_time = av_gettime();
1797 packet_queue_init(&is->videoq);
1798 is->video_tid = SDL_CreateThread(video_thread, is);
1799 break;
1800 case CODEC_TYPE_SUBTITLE:
1801 is->subtitle_stream = stream_index;
1802 is->subtitle_st = ic->streams[stream_index];
1803 packet_queue_init(&is->subtitleq);
1805 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1806 break;
1807 default:
1808 break;
1810 return 0;
1813 static void stream_component_close(VideoState *is, int stream_index)
1815 AVFormatContext *ic = is->ic;
1816 AVCodecContext *enc;
1818 if (stream_index < 0 || stream_index >= ic->nb_streams)
1819 return;
1820 enc = ic->streams[stream_index]->codec;
1822 switch(enc->codec_type) {
1823 case CODEC_TYPE_AUDIO:
1824 packet_queue_abort(&is->audioq);
1826 SDL_CloseAudio();
1828 packet_queue_end(&is->audioq);
1829 if (is->reformat_ctx)
1830 av_audio_convert_free(is->reformat_ctx);
1831 break;
1832 case CODEC_TYPE_VIDEO:
1833 packet_queue_abort(&is->videoq);
1835 /* note: we also signal this mutex to make sure we deblock the
1836 video thread in all cases */
1837 SDL_LockMutex(is->pictq_mutex);
1838 SDL_CondSignal(is->pictq_cond);
1839 SDL_UnlockMutex(is->pictq_mutex);
1841 SDL_WaitThread(is->video_tid, NULL);
1843 packet_queue_end(&is->videoq);
1844 break;
1845 case CODEC_TYPE_SUBTITLE:
1846 packet_queue_abort(&is->subtitleq);
1848 /* note: we also signal this mutex to make sure we deblock the
1849 video thread in all cases */
1850 SDL_LockMutex(is->subpq_mutex);
1851 is->subtitle_stream_changed = 1;
1853 SDL_CondSignal(is->subpq_cond);
1854 SDL_UnlockMutex(is->subpq_mutex);
1856 SDL_WaitThread(is->subtitle_tid, NULL);
1858 packet_queue_end(&is->subtitleq);
1859 break;
1860 default:
1861 break;
1864 ic->streams[stream_index]->discard = AVDISCARD_ALL;
1865 avcodec_close(enc);
1866 switch(enc->codec_type) {
1867 case CODEC_TYPE_AUDIO:
1868 is->audio_st = NULL;
1869 is->audio_stream = -1;
1870 break;
1871 case CODEC_TYPE_VIDEO:
1872 is->video_st = NULL;
1873 is->video_stream = -1;
1874 break;
1875 case CODEC_TYPE_SUBTITLE:
1876 is->subtitle_st = NULL;
1877 is->subtitle_stream = -1;
1878 break;
1879 default:
1880 break;
1884 static void dump_stream_info(const AVFormatContext *s)
1886 if (s->track != 0)
1887 fprintf(stderr, "Track: %d\n", s->track);
1888 if (s->title[0] != '\0')
1889 fprintf(stderr, "Title: %s\n", s->title);
1890 if (s->author[0] != '\0')
1891 fprintf(stderr, "Author: %s\n", s->author);
1892 if (s->copyright[0] != '\0')
1893 fprintf(stderr, "Copyright: %s\n", s->copyright);
1894 if (s->comment[0] != '\0')
1895 fprintf(stderr, "Comment: %s\n", s->comment);
1896 if (s->album[0] != '\0')
1897 fprintf(stderr, "Album: %s\n", s->album);
1898 if (s->year != 0)
1899 fprintf(stderr, "Year: %d\n", s->year);
1900 if (s->genre[0] != '\0')
1901 fprintf(stderr, "Genre: %s\n", s->genre);
1904 /* since we have only one decoding thread, we can use a global
1905 variable instead of a thread local variable */
1906 static VideoState *global_video_state;
1908 static int decode_interrupt_cb(void)
1910 return (global_video_state && global_video_state->abort_request);
1913 /* this thread gets the stream from the disk or the network */
1914 static int decode_thread(void *arg)
1916 VideoState *is = arg;
1917 AVFormatContext *ic;
1918 int err, i, ret, video_index, audio_index;
1919 AVPacket pkt1, *pkt = &pkt1;
1920 AVFormatParameters params, *ap = &params;
1922 video_index = -1;
1923 audio_index = -1;
1924 is->video_stream = -1;
1925 is->audio_stream = -1;
1926 is->subtitle_stream = -1;
1928 global_video_state = is;
1929 url_set_interrupt_cb(decode_interrupt_cb);
1931 memset(ap, 0, sizeof(*ap));
1933 ap->width = frame_width;
1934 ap->height= frame_height;
1935 ap->time_base= (AVRational){1, 25};
1936 ap->pix_fmt = frame_pix_fmt;
1938 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1939 if (err < 0) {
1940 print_error(is->filename, err);
1941 ret = -1;
1942 goto fail;
1944 is->ic = ic;
1946 if(genpts)
1947 ic->flags |= AVFMT_FLAG_GENPTS;
1949 err = av_find_stream_info(ic);
1950 if (err < 0) {
1951 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1952 ret = -1;
1953 goto fail;
1955 if(ic->pb)
1956 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1958 /* if seeking requested, we execute it */
1959 if (start_time != AV_NOPTS_VALUE) {
1960 int64_t timestamp;
1962 timestamp = start_time;
1963 /* add the stream start time */
1964 if (ic->start_time != AV_NOPTS_VALUE)
1965 timestamp += ic->start_time;
1966 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1967 if (ret < 0) {
1968 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1969 is->filename, (double)timestamp / AV_TIME_BASE);
1973 for(i = 0; i < ic->nb_streams; i++) {
1974 AVCodecContext *enc = ic->streams[i]->codec;
1975 ic->streams[i]->discard = AVDISCARD_ALL;
1976 switch(enc->codec_type) {
1977 case CODEC_TYPE_AUDIO:
1978 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1979 audio_index = i;
1980 break;
1981 case CODEC_TYPE_VIDEO:
1982 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1983 video_index = i;
1984 break;
1985 default:
1986 break;
1989 if (show_status) {
1990 dump_format(ic, 0, is->filename, 0);
1991 dump_stream_info(ic);
1994 /* open the streams */
1995 if (audio_index >= 0) {
1996 stream_component_open(is, audio_index);
1999 if (video_index >= 0) {
2000 stream_component_open(is, video_index);
2001 } else {
2002 if (!display_disable)
2003 is->show_audio = 1;
2006 if (is->video_stream < 0 && is->audio_stream < 0) {
2007 fprintf(stderr, "%s: could not open codecs\n", is->filename);
2008 ret = -1;
2009 goto fail;
2012 for(;;) {
2013 if (is->abort_request)
2014 break;
2015 if (is->paused != is->last_paused) {
2016 is->last_paused = is->paused;
2017 if (is->paused)
2018 av_read_pause(ic);
2019 else
2020 av_read_play(ic);
2022 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2023 if (is->paused &&
2024 (!strcmp(ic->iformat->name, "rtsp") ||
2025 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2026 /* wait 10 ms to avoid trying to get another packet */
2027 /* XXX: horrible */
2028 SDL_Delay(10);
2029 continue;
2031 #endif
2032 if (is->seek_req) {
2033 int stream_index= -1;
2034 int64_t seek_target= is->seek_pos;
2036 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2037 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2038 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2040 if(stream_index>=0){
2041 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2044 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2045 if (ret < 0) {
2046 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2047 }else{
2048 if (is->audio_stream >= 0) {
2049 packet_queue_flush(&is->audioq);
2050 packet_queue_put(&is->audioq, &flush_pkt);
2052 if (is->subtitle_stream >= 0) {
2053 packet_queue_flush(&is->subtitleq);
2054 packet_queue_put(&is->subtitleq, &flush_pkt);
2056 if (is->video_stream >= 0) {
2057 packet_queue_flush(&is->videoq);
2058 packet_queue_put(&is->videoq, &flush_pkt);
2061 is->seek_req = 0;
2064 /* if the queue are full, no need to read more */
2065 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2066 is->videoq.size > MAX_VIDEOQ_SIZE ||
2067 is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2068 /* wait 10 ms */
2069 SDL_Delay(10);
2070 continue;
2072 if(url_feof(ic->pb)) {
2073 av_init_packet(pkt);
2074 pkt->data=
2075 pkt->size=0;
2076 pkt->stream_index= is->video_stream;
2077 packet_queue_put(&is->videoq, pkt);
2078 continue;
2080 ret = av_read_frame(ic, pkt);
2081 if (ret < 0) {
2082 if (url_ferror(ic->pb) == 0) {
2083 SDL_Delay(100); /* wait for user event */
2084 continue;
2085 } else
2086 break;
2088 if (pkt->stream_index == is->audio_stream) {
2089 packet_queue_put(&is->audioq, pkt);
2090 } else if (pkt->stream_index == is->video_stream) {
2091 packet_queue_put(&is->videoq, pkt);
2092 } else if (pkt->stream_index == is->subtitle_stream) {
2093 packet_queue_put(&is->subtitleq, pkt);
2094 } else {
2095 av_free_packet(pkt);
2098 /* wait until the end */
2099 while (!is->abort_request) {
2100 SDL_Delay(100);
2103 ret = 0;
2104 fail:
2105 /* disable interrupting */
2106 global_video_state = NULL;
2108 /* close each stream */
2109 if (is->audio_stream >= 0)
2110 stream_component_close(is, is->audio_stream);
2111 if (is->video_stream >= 0)
2112 stream_component_close(is, is->video_stream);
2113 if (is->subtitle_stream >= 0)
2114 stream_component_close(is, is->subtitle_stream);
2115 if (is->ic) {
2116 av_close_input_file(is->ic);
2117 is->ic = NULL; /* safety */
2119 url_set_interrupt_cb(NULL);
2121 if (ret != 0) {
2122 SDL_Event event;
2124 event.type = FF_QUIT_EVENT;
2125 event.user.data1 = is;
2126 SDL_PushEvent(&event);
2128 return 0;
2131 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2133 VideoState *is;
2135 is = av_mallocz(sizeof(VideoState));
2136 if (!is)
2137 return NULL;
2138 av_strlcpy(is->filename, filename, sizeof(is->filename));
2139 is->iformat = iformat;
2140 is->ytop = 0;
2141 is->xleft = 0;
2143 /* start video display */
2144 is->pictq_mutex = SDL_CreateMutex();
2145 is->pictq_cond = SDL_CreateCond();
2147 is->subpq_mutex = SDL_CreateMutex();
2148 is->subpq_cond = SDL_CreateCond();
2150 /* add the refresh timer to draw the picture */
2151 schedule_refresh(is, 40);
2153 is->av_sync_type = av_sync_type;
2154 is->parse_tid = SDL_CreateThread(decode_thread, is);
2155 if (!is->parse_tid) {
2156 av_free(is);
2157 return NULL;
2159 return is;
2162 static void stream_close(VideoState *is)
2164 VideoPicture *vp;
2165 int i;
2166 /* XXX: use a special url_shutdown call to abort parse cleanly */
2167 is->abort_request = 1;
2168 SDL_WaitThread(is->parse_tid, NULL);
2170 /* free all pictures */
2171 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2172 vp = &is->pictq[i];
2173 if (vp->bmp) {
2174 SDL_FreeYUVOverlay(vp->bmp);
2175 vp->bmp = NULL;
2178 SDL_DestroyMutex(is->pictq_mutex);
2179 SDL_DestroyCond(is->pictq_cond);
2180 SDL_DestroyMutex(is->subpq_mutex);
2181 SDL_DestroyCond(is->subpq_cond);
2184 static void stream_cycle_channel(VideoState *is, int codec_type)
2186 AVFormatContext *ic = is->ic;
2187 int start_index, stream_index;
2188 AVStream *st;
2190 if (codec_type == CODEC_TYPE_VIDEO)
2191 start_index = is->video_stream;
2192 else if (codec_type == CODEC_TYPE_AUDIO)
2193 start_index = is->audio_stream;
2194 else
2195 start_index = is->subtitle_stream;
2196 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2197 return;
2198 stream_index = start_index;
2199 for(;;) {
2200 if (++stream_index >= is->ic->nb_streams)
2202 if (codec_type == CODEC_TYPE_SUBTITLE)
2204 stream_index = -1;
2205 goto the_end;
2206 } else
2207 stream_index = 0;
2209 if (stream_index == start_index)
2210 return;
2211 st = ic->streams[stream_index];
2212 if (st->codec->codec_type == codec_type) {
2213 /* check that parameters are OK */
2214 switch(codec_type) {
2215 case CODEC_TYPE_AUDIO:
2216 if (st->codec->sample_rate != 0 &&
2217 st->codec->channels != 0)
2218 goto the_end;
2219 break;
2220 case CODEC_TYPE_VIDEO:
2221 case CODEC_TYPE_SUBTITLE:
2222 goto the_end;
2223 default:
2224 break;
2228 the_end:
2229 stream_component_close(is, start_index);
2230 stream_component_open(is, stream_index);
2234 static void toggle_full_screen(void)
2236 is_full_screen = !is_full_screen;
2237 if (!fs_screen_width) {
2238 /* use default SDL method */
2239 // SDL_WM_ToggleFullScreen(screen);
2241 video_open(cur_stream);
2244 static void toggle_pause(void)
2246 if (cur_stream)
2247 stream_pause(cur_stream);
2248 step = 0;
2251 static void step_to_next_frame(void)
2253 if (cur_stream) {
2254 /* if the stream is paused unpause it, then step */
2255 if (cur_stream->paused)
2256 stream_pause(cur_stream);
2258 step = 1;
2261 static void do_exit(void)
2263 if (cur_stream) {
2264 stream_close(cur_stream);
2265 cur_stream = NULL;
2267 if (show_status)
2268 printf("\n");
2269 SDL_Quit();
2270 exit(0);
2273 static void toggle_audio_display(void)
2275 if (cur_stream) {
2276 cur_stream->show_audio = !cur_stream->show_audio;
2280 /* handle an event sent by the GUI */
2281 static void event_loop(void)
2283 SDL_Event event;
2284 double incr, pos, frac;
2286 for(;;) {
2287 SDL_WaitEvent(&event);
2288 switch(event.type) {
2289 case SDL_KEYDOWN:
2290 switch(event.key.keysym.sym) {
2291 case SDLK_ESCAPE:
2292 case SDLK_q:
2293 do_exit();
2294 break;
2295 case SDLK_f:
2296 toggle_full_screen();
2297 break;
2298 case SDLK_p:
2299 case SDLK_SPACE:
2300 toggle_pause();
2301 break;
2302 case SDLK_s: //S: Step to next frame
2303 step_to_next_frame();
2304 break;
2305 case SDLK_a:
2306 if (cur_stream)
2307 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2308 break;
2309 case SDLK_v:
2310 if (cur_stream)
2311 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2312 break;
2313 case SDLK_t:
2314 if (cur_stream)
2315 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2316 break;
2317 case SDLK_w:
2318 toggle_audio_display();
2319 break;
2320 case SDLK_LEFT:
2321 incr = -10.0;
2322 goto do_seek;
2323 case SDLK_RIGHT:
2324 incr = 10.0;
2325 goto do_seek;
2326 case SDLK_UP:
2327 incr = 60.0;
2328 goto do_seek;
2329 case SDLK_DOWN:
2330 incr = -60.0;
2331 do_seek:
2332 if (cur_stream) {
2333 if (seek_by_bytes) {
2334 pos = url_ftell(cur_stream->ic->pb);
2335 if (cur_stream->ic->bit_rate)
2336 incr *= cur_stream->ic->bit_rate / 60.0;
2337 else
2338 incr *= 180000.0;
2339 pos += incr;
2340 stream_seek(cur_stream, pos, incr);
2341 } else {
2342 pos = get_master_clock(cur_stream);
2343 pos += incr;
2344 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2347 break;
2348 default:
2349 break;
2351 break;
2352 case SDL_MOUSEBUTTONDOWN:
2353 if (cur_stream) {
2354 int ns, hh, mm, ss;
2355 int tns, thh, tmm, tss;
2356 tns = cur_stream->ic->duration/1000000LL;
2357 thh = tns/3600;
2358 tmm = (tns%3600)/60;
2359 tss = (tns%60);
2360 frac = (double)event.button.x/(double)cur_stream->width;
2361 ns = frac*tns;
2362 hh = ns/3600;
2363 mm = (ns%3600)/60;
2364 ss = (ns%60);
2365 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2366 hh, mm, ss, thh, tmm, tss);
2367 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2369 break;
2370 case SDL_VIDEORESIZE:
2371 if (cur_stream) {
2372 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2373 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2374 screen_width = cur_stream->width = event.resize.w;
2375 screen_height= cur_stream->height= event.resize.h;
2377 break;
2378 case SDL_QUIT:
2379 case FF_QUIT_EVENT:
2380 do_exit();
2381 break;
2382 case FF_ALLOC_EVENT:
2383 video_open(event.user.data1);
2384 alloc_picture(event.user.data1);
2385 break;
2386 case FF_REFRESH_EVENT:
2387 video_refresh_timer(event.user.data1);
2388 break;
2389 default:
2390 break;
2395 static void opt_frame_size(const char *arg)
2397 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2398 fprintf(stderr, "Incorrect frame size\n");
2399 exit(1);
2401 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2402 fprintf(stderr, "Frame size must be a multiple of 2\n");
2403 exit(1);
2407 static int opt_width(const char *opt, const char *arg)
2409 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2410 return 0;
2413 static int opt_height(const char *opt, const char *arg)
2415 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2416 return 0;
2419 static void opt_format(const char *arg)
2421 file_iformat = av_find_input_format(arg);
2422 if (!file_iformat) {
2423 fprintf(stderr, "Unknown input format: %s\n", arg);
2424 exit(1);
2428 static void opt_frame_pix_fmt(const char *arg)
2430 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2433 static int opt_sync(const char *opt, const char *arg)
2435 if (!strcmp(arg, "audio"))
2436 av_sync_type = AV_SYNC_AUDIO_MASTER;
2437 else if (!strcmp(arg, "video"))
2438 av_sync_type = AV_SYNC_VIDEO_MASTER;
2439 else if (!strcmp(arg, "ext"))
2440 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2441 else {
2442 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2443 exit(1);
2445 return 0;
2448 static int opt_seek(const char *opt, const char *arg)
2450 start_time = parse_time_or_die(opt, arg, 1);
2451 return 0;
2454 static int opt_debug(const char *opt, const char *arg)
2456 av_log_set_level(99);
2457 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2458 return 0;
2461 static int opt_vismv(const char *opt, const char *arg)
2463 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2464 return 0;
2467 static int opt_thread_count(const char *opt, const char *arg)
2469 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2470 #if !defined(HAVE_THREADS)
2471 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2472 #endif
2473 return 0;
2476 static const OptionDef options[] = {
2477 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2478 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2479 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2480 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2481 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2482 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2483 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2484 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2485 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2486 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2487 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2488 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2489 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2490 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2491 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2492 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2493 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2494 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2495 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2496 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2497 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2498 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2499 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2500 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2501 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2502 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2503 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2504 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2505 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2506 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
2507 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2508 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2509 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2510 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2511 { NULL, },
2514 static void show_help(void)
2516 printf("usage: ffplay [options] input_file\n"
2517 "Simple media player\n");
2518 printf("\n");
2519 show_help_options(options, "Main options:\n",
2520 OPT_EXPERT, 0);
2521 show_help_options(options, "\nAdvanced options:\n",
2522 OPT_EXPERT, OPT_EXPERT);
2523 printf("\nWhile playing:\n"
2524 "q, ESC quit\n"
2525 "f toggle full screen\n"
2526 "p, SPC pause\n"
2527 "a cycle audio channel\n"
2528 "v cycle video channel\n"
2529 "t cycle subtitle channel\n"
2530 "w show audio waves\n"
2531 "left/right seek backward/forward 10 seconds\n"
2532 "down/up seek backward/forward 1 minute\n"
2533 "mouse click seek to percentage in file corresponding to fraction of width\n"
2537 static void opt_input_file(const char *filename)
2539 if (!strcmp(filename, "-"))
2540 filename = "pipe:";
2541 input_filename = filename;
2544 /* Called from the main */
2545 int main(int argc, char **argv)
2547 int flags, i;
2549 /* register all codecs, demux and protocols */
2550 avcodec_register_all();
2551 avdevice_register_all();
2552 av_register_all();
2554 for(i=0; i<CODEC_TYPE_NB; i++){
2555 avctx_opts[i]= avcodec_alloc_context2(i);
2557 avformat_opts = av_alloc_format_context();
2558 sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2560 show_banner();
2562 parse_options(argc, argv, options, opt_input_file);
2564 if (!input_filename) {
2565 fprintf(stderr, "An input file must be specified\n");
2566 exit(1);
2569 if (display_disable) {
2570 video_disable = 1;
2572 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2573 #if !defined(__MINGW32__) && !defined(__APPLE__)
2574 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2575 #endif
2576 if (SDL_Init (flags)) {
2577 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2578 exit(1);
2581 if (!display_disable) {
2582 #ifdef HAVE_SDL_VIDEO_SIZE
2583 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2584 fs_screen_width = vi->current_w;
2585 fs_screen_height = vi->current_h;
2586 #endif
2589 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2590 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2591 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2592 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2594 av_init_packet(&flush_pkt);
2595 flush_pkt.data= "FLUSH";
2597 cur_stream = stream_open(input_filename, file_iformat);
2599 event_loop();
2601 /* never returns */
2603 return 0;