Add my last name to copyright headers
[ffmpeg-lucabe.git] / ffplay.c
blob7d983515d9618b79c88420b6bf17098975aede18
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "avformat.h"
25 #include "swscale.h"
26 #include "avstring.h"
28 #include "version.h"
29 #include "cmdutils.h"
31 #include <SDL.h>
32 #include <SDL_thread.h>
34 #ifdef __MINGW32__
35 #undef main /* We don't want SDL to override our main() */
36 #endif
38 #ifdef CONFIG_OS2
39 #define INCL_DOS
40 #include <os2.h>
41 #include <stdio.h>
43 void MorphToPM()
45 PPIB pib;
46 PTIB tib;
48 DosGetInfoBlocks(&tib, &pib);
50 // Change flag from VIO to PM:
51 if (pib->pib_ultype==2) pib->pib_ultype = 3;
53 #endif
55 #undef exit
57 //#define DEBUG_SYNC
59 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
60 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
61 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
63 /* SDL audio buffer size, in samples. Should be small to have precise
64 A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
72 /* maximum audio speed change to get correct sync */
73 #define SAMPLE_CORRECTION_PERCENT_MAX 10
75 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
76 #define AUDIO_DIFF_AVG_NB 20
78 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
79 #define SAMPLE_ARRAY_SIZE (2*65536)
81 static int sws_flags = SWS_BICUBIC;
83 typedef struct PacketQueue {
84 AVPacketList *first_pkt, *last_pkt;
85 int nb_packets;
86 int size;
87 int abort_request;
88 SDL_mutex *mutex;
89 SDL_cond *cond;
90 } PacketQueue;
92 #define VIDEO_PICTURE_QUEUE_SIZE 1
93 #define SUBPICTURE_QUEUE_SIZE 4
95 typedef struct VideoPicture {
96 double pts; ///<presentation time stamp for this picture
97 SDL_Overlay *bmp;
98 int width, height; /* source height & width */
99 int allocated;
100 } VideoPicture;
102 typedef struct SubPicture {
103 double pts; /* presentation time stamp for this picture */
104 AVSubtitle sub;
105 } SubPicture;
107 enum {
108 AV_SYNC_AUDIO_MASTER, /* default choice */
109 AV_SYNC_VIDEO_MASTER,
110 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
113 typedef struct VideoState {
114 SDL_Thread *parse_tid;
115 SDL_Thread *video_tid;
116 AVInputFormat *iformat;
117 int no_background;
118 int abort_request;
119 int paused;
120 int last_paused;
121 int seek_req;
122 int seek_flags;
123 int64_t seek_pos;
124 AVFormatContext *ic;
125 int dtg_active_format;
127 int audio_stream;
129 int av_sync_type;
130 double external_clock; /* external clock base */
131 int64_t external_clock_time;
133 double audio_clock;
134 double audio_diff_cum; /* used for AV difference average computation */
135 double audio_diff_avg_coef;
136 double audio_diff_threshold;
137 int audio_diff_avg_count;
138 AVStream *audio_st;
139 PacketQueue audioq;
140 int audio_hw_buf_size;
141 /* samples output by the codec. we reserve more space for avsync
142 compensation */
143 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
144 unsigned int audio_buf_size; /* in bytes */
145 int audio_buf_index; /* in bytes */
146 AVPacket audio_pkt;
147 uint8_t *audio_pkt_data;
148 int audio_pkt_size;
150 int show_audio; /* if true, display audio samples */
151 int16_t sample_array[SAMPLE_ARRAY_SIZE];
152 int sample_array_index;
153 int last_i_start;
155 SDL_Thread *subtitle_tid;
156 int subtitle_stream;
157 int subtitle_stream_changed;
158 AVStream *subtitle_st;
159 PacketQueue subtitleq;
160 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
161 int subpq_size, subpq_rindex, subpq_windex;
162 SDL_mutex *subpq_mutex;
163 SDL_cond *subpq_cond;
165 double frame_timer;
166 double frame_last_pts;
167 double frame_last_delay;
168 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
169 int video_stream;
170 AVStream *video_st;
171 PacketQueue videoq;
172 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
173 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
174 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
175 int pictq_size, pictq_rindex, pictq_windex;
176 SDL_mutex *pictq_mutex;
177 SDL_cond *pictq_cond;
179 // QETimer *video_timer;
180 char filename[1024];
181 int width, height, xleft, ytop;
182 } VideoState;
184 void show_help(void);
185 static int audio_write_get_buf_size(VideoState *is);
187 /* options specified by the user */
188 static AVInputFormat *file_iformat;
189 static const char *input_filename;
190 static int fs_screen_width;
191 static int fs_screen_height;
192 static int screen_width = 0;
193 static int screen_height = 0;
194 static int frame_width = 0;
195 static int frame_height = 0;
196 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
197 static int audio_disable;
198 static int video_disable;
199 static int wanted_audio_stream= 0;
200 static int seek_by_bytes;
201 static int display_disable;
202 static int show_status;
203 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
204 static int64_t start_time = AV_NOPTS_VALUE;
205 static int debug = 0;
206 static int debug_mv = 0;
207 static int step = 0;
208 static int thread_count = 1;
209 static int workaround_bugs = 1;
210 static int fast = 0;
211 static int genpts = 0;
212 static int lowres = 0;
213 static int idct = FF_IDCT_AUTO;
214 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
215 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
216 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
217 static int error_resilience = FF_ER_CAREFUL;
218 static int error_concealment = 3;
219 static int decoder_reorder_pts= 0;
221 /* current context */
222 static int is_full_screen;
223 static VideoState *cur_stream;
224 static int64_t audio_callback_time;
226 AVPacket flush_pkt;
228 #define FF_ALLOC_EVENT (SDL_USEREVENT)
229 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
230 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
232 SDL_Surface *screen;
234 /* packet queue handling */
235 static void packet_queue_init(PacketQueue *q)
237 memset(q, 0, sizeof(PacketQueue));
238 q->mutex = SDL_CreateMutex();
239 q->cond = SDL_CreateCond();
242 static void packet_queue_flush(PacketQueue *q)
244 AVPacketList *pkt, *pkt1;
246 SDL_LockMutex(q->mutex);
247 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
248 pkt1 = pkt->next;
249 av_free_packet(&pkt->pkt);
250 av_freep(&pkt);
252 q->last_pkt = NULL;
253 q->first_pkt = NULL;
254 q->nb_packets = 0;
255 q->size = 0;
256 SDL_UnlockMutex(q->mutex);
259 static void packet_queue_end(PacketQueue *q)
261 packet_queue_flush(q);
262 SDL_DestroyMutex(q->mutex);
263 SDL_DestroyCond(q->cond);
266 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
268 AVPacketList *pkt1;
270 /* duplicate the packet */
271 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
272 return -1;
274 pkt1 = av_malloc(sizeof(AVPacketList));
275 if (!pkt1)
276 return -1;
277 pkt1->pkt = *pkt;
278 pkt1->next = NULL;
281 SDL_LockMutex(q->mutex);
283 if (!q->last_pkt)
285 q->first_pkt = pkt1;
286 else
287 q->last_pkt->next = pkt1;
288 q->last_pkt = pkt1;
289 q->nb_packets++;
290 q->size += pkt1->pkt.size;
291 /* XXX: should duplicate packet data in DV case */
292 SDL_CondSignal(q->cond);
294 SDL_UnlockMutex(q->mutex);
295 return 0;
298 static void packet_queue_abort(PacketQueue *q)
300 SDL_LockMutex(q->mutex);
302 q->abort_request = 1;
304 SDL_CondSignal(q->cond);
306 SDL_UnlockMutex(q->mutex);
309 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
310 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
312 AVPacketList *pkt1;
313 int ret;
315 SDL_LockMutex(q->mutex);
317 for(;;) {
318 if (q->abort_request) {
319 ret = -1;
320 break;
323 pkt1 = q->first_pkt;
324 if (pkt1) {
325 q->first_pkt = pkt1->next;
326 if (!q->first_pkt)
327 q->last_pkt = NULL;
328 q->nb_packets--;
329 q->size -= pkt1->pkt.size;
330 *pkt = pkt1->pkt;
331 av_free(pkt1);
332 ret = 1;
333 break;
334 } else if (!block) {
335 ret = 0;
336 break;
337 } else {
338 SDL_CondWait(q->cond, q->mutex);
341 SDL_UnlockMutex(q->mutex);
342 return ret;
345 static inline void fill_rectangle(SDL_Surface *screen,
346 int x, int y, int w, int h, int color)
348 SDL_Rect rect;
349 rect.x = x;
350 rect.y = y;
351 rect.w = w;
352 rect.h = h;
353 SDL_FillRect(screen, &rect, color);
356 #if 0
357 /* draw only the border of a rectangle */
358 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
360 int w1, w2, h1, h2;
362 /* fill the background */
363 w1 = x;
364 if (w1 < 0)
365 w1 = 0;
366 w2 = s->width - (x + w);
367 if (w2 < 0)
368 w2 = 0;
369 h1 = y;
370 if (h1 < 0)
371 h1 = 0;
372 h2 = s->height - (y + h);
373 if (h2 < 0)
374 h2 = 0;
375 fill_rectangle(screen,
376 s->xleft, s->ytop,
377 w1, s->height,
378 color);
379 fill_rectangle(screen,
380 s->xleft + s->width - w2, s->ytop,
381 w2, s->height,
382 color);
383 fill_rectangle(screen,
384 s->xleft + w1, s->ytop,
385 s->width - w1 - w2, h1,
386 color);
387 fill_rectangle(screen,
388 s->xleft + w1, s->ytop + s->height - h2,
389 s->width - w1 - w2, h2,
390 color);
392 #endif
396 #define SCALEBITS 10
397 #define ONE_HALF (1 << (SCALEBITS - 1))
398 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
400 #define RGB_TO_Y_CCIR(r, g, b) \
401 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
402 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
404 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
405 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
406 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
408 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
409 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
410 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
412 #define ALPHA_BLEND(a, oldp, newp, s)\
413 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
415 #define RGBA_IN(r, g, b, a, s)\
417 unsigned int v = ((const uint32_t *)(s))[0];\
418 a = (v >> 24) & 0xff;\
419 r = (v >> 16) & 0xff;\
420 g = (v >> 8) & 0xff;\
421 b = v & 0xff;\
424 #define YUVA_IN(y, u, v, a, s, pal)\
426 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
427 a = (val >> 24) & 0xff;\
428 y = (val >> 16) & 0xff;\
429 u = (val >> 8) & 0xff;\
430 v = val & 0xff;\
433 #define YUVA_OUT(d, y, u, v, a)\
435 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
439 #define BPP 1
441 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
443 int wrap, wrap3, width2, skip2;
444 int y, u, v, a, u1, v1, a1, w, h;
445 uint8_t *lum, *cb, *cr;
446 const uint8_t *p;
447 const uint32_t *pal;
449 lum = dst->data[0] + rect->y * dst->linesize[0];
450 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
451 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
453 width2 = (rect->w + 1) >> 1;
454 skip2 = rect->x >> 1;
455 wrap = dst->linesize[0];
456 wrap3 = rect->linesize;
457 p = rect->bitmap;
458 pal = rect->rgba_palette; /* Now in YCrCb! */
460 if (rect->y & 1) {
461 lum += rect->x;
462 cb += skip2;
463 cr += skip2;
465 if (rect->x & 1) {
466 YUVA_IN(y, u, v, a, p, pal);
467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470 cb++;
471 cr++;
472 lum++;
473 p += BPP;
475 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
476 YUVA_IN(y, u, v, a, p, pal);
477 u1 = u;
478 v1 = v;
479 a1 = a;
480 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482 YUVA_IN(y, u, v, a, p + BPP, pal);
483 u1 += u;
484 v1 += v;
485 a1 += a;
486 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489 cb++;
490 cr++;
491 p += 2 * BPP;
492 lum += 2;
494 if (w) {
495 YUVA_IN(y, u, v, a, p, pal);
496 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
500 p += wrap3 + (wrap3 - rect->w * BPP);
501 lum += wrap + (wrap - rect->w - rect->x);
502 cb += dst->linesize[1] - width2 - skip2;
503 cr += dst->linesize[2] - width2 - skip2;
505 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
506 lum += rect->x;
507 cb += skip2;
508 cr += skip2;
510 if (rect->x & 1) {
511 YUVA_IN(y, u, v, a, p, pal);
512 u1 = u;
513 v1 = v;
514 a1 = a;
515 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516 p += wrap3;
517 lum += wrap;
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 += u;
520 v1 += v;
521 a1 += a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
524 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
525 cb++;
526 cr++;
527 p += -wrap3 + BPP;
528 lum += -wrap + 1;
530 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 = u;
533 v1 = v;
534 a1 = a;
535 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537 YUVA_IN(y, u, v, a, p, pal);
538 u1 += u;
539 v1 += v;
540 a1 += a;
541 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542 p += wrap3;
543 lum += wrap;
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551 YUVA_IN(y, u, v, a, p, pal);
552 u1 += u;
553 v1 += v;
554 a1 += a;
555 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
558 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560 cb++;
561 cr++;
562 p += -wrap3 + 2 * BPP;
563 lum += -wrap + 2;
565 if (w) {
566 YUVA_IN(y, u, v, a, p, pal);
567 u1 = u;
568 v1 = v;
569 a1 = a;
570 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571 p += wrap3;
572 lum += wrap;
573 YUVA_IN(y, u, v, a, p, pal);
574 u1 += u;
575 v1 += v;
576 a1 += a;
577 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
579 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
580 cb++;
581 cr++;
582 p += -wrap3 + BPP;
583 lum += -wrap + 1;
585 p += wrap3 + (wrap3 - rect->w * BPP);
586 lum += wrap + (wrap - rect->w - rect->x);
587 cb += dst->linesize[1] - width2 - skip2;
588 cr += dst->linesize[2] - width2 - skip2;
590 /* handle odd height */
591 if (h) {
592 lum += rect->x;
593 cb += skip2;
594 cr += skip2;
596 if (rect->x & 1) {
597 YUVA_IN(y, u, v, a, p, pal);
598 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601 cb++;
602 cr++;
603 lum++;
604 p += BPP;
606 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
607 YUVA_IN(y, u, v, a, p, pal);
608 u1 = u;
609 v1 = v;
610 a1 = a;
611 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613 YUVA_IN(y, u, v, a, p + BPP, pal);
614 u1 += u;
615 v1 += v;
616 a1 += a;
617 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
619 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
620 cb++;
621 cr++;
622 p += 2 * BPP;
623 lum += 2;
625 if (w) {
626 YUVA_IN(y, u, v, a, p, pal);
627 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634 static void free_subpicture(SubPicture *sp)
636 int i;
638 for (i = 0; i < sp->sub.num_rects; i++)
640 av_free(sp->sub.rects[i].bitmap);
641 av_free(sp->sub.rects[i].rgba_palette);
644 av_free(sp->sub.rects);
646 memset(&sp->sub, 0, sizeof(AVSubtitle));
649 static void video_image_display(VideoState *is)
651 VideoPicture *vp;
652 SubPicture *sp;
653 AVPicture pict;
654 float aspect_ratio;
655 int width, height, x, y;
656 SDL_Rect rect;
657 int i;
659 vp = &is->pictq[is->pictq_rindex];
660 if (vp->bmp) {
661 /* XXX: use variable in the frame */
662 if (is->video_st->codec->sample_aspect_ratio.num == 0)
663 aspect_ratio = 0;
664 else
665 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
666 * is->video_st->codec->width / is->video_st->codec->height;;
667 if (aspect_ratio <= 0.0)
668 aspect_ratio = (float)is->video_st->codec->width /
669 (float)is->video_st->codec->height;
670 /* if an active format is indicated, then it overrides the
671 mpeg format */
672 #if 0
673 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
674 is->dtg_active_format = is->video_st->codec->dtg_active_format;
675 printf("dtg_active_format=%d\n", is->dtg_active_format);
677 #endif
678 #if 0
679 switch(is->video_st->codec->dtg_active_format) {
680 case FF_DTG_AFD_SAME:
681 default:
682 /* nothing to do */
683 break;
684 case FF_DTG_AFD_4_3:
685 aspect_ratio = 4.0 / 3.0;
686 break;
687 case FF_DTG_AFD_16_9:
688 aspect_ratio = 16.0 / 9.0;
689 break;
690 case FF_DTG_AFD_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_4_3_SP_14_9:
694 aspect_ratio = 14.0 / 9.0;
695 break;
696 case FF_DTG_AFD_16_9_SP_14_9:
697 aspect_ratio = 14.0 / 9.0;
698 break;
699 case FF_DTG_AFD_SP_4_3:
700 aspect_ratio = 4.0 / 3.0;
701 break;
703 #endif
705 if (is->subtitle_st)
707 if (is->subpq_size > 0)
709 sp = &is->subpq[is->subpq_rindex];
711 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
713 SDL_LockYUVOverlay (vp->bmp);
715 pict.data[0] = vp->bmp->pixels[0];
716 pict.data[1] = vp->bmp->pixels[2];
717 pict.data[2] = vp->bmp->pixels[1];
719 pict.linesize[0] = vp->bmp->pitches[0];
720 pict.linesize[1] = vp->bmp->pitches[2];
721 pict.linesize[2] = vp->bmp->pitches[1];
723 for (i = 0; i < sp->sub.num_rects; i++)
724 blend_subrect(&pict, &sp->sub.rects[i]);
726 SDL_UnlockYUVOverlay (vp->bmp);
732 /* XXX: we suppose the screen has a 1.0 pixel ratio */
733 height = is->height;
734 width = ((int)rint(height * aspect_ratio)) & -3;
735 if (width > is->width) {
736 width = is->width;
737 height = ((int)rint(width / aspect_ratio)) & -3;
739 x = (is->width - width) / 2;
740 y = (is->height - height) / 2;
741 if (!is->no_background) {
742 /* fill the background */
743 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
744 } else {
745 is->no_background = 0;
747 rect.x = is->xleft + x;
748 rect.y = is->ytop + y;
749 rect.w = width;
750 rect.h = height;
751 SDL_DisplayYUVOverlay(vp->bmp, &rect);
752 } else {
753 #if 0
754 fill_rectangle(screen,
755 is->xleft, is->ytop, is->width, is->height,
756 QERGB(0x00, 0x00, 0x00));
757 #endif
761 static inline int compute_mod(int a, int b)
763 a = a % b;
764 if (a >= 0)
765 return a;
766 else
767 return a + b;
770 static void video_audio_display(VideoState *s)
772 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
773 int ch, channels, h, h2, bgcolor, fgcolor;
774 int16_t time_diff;
776 /* compute display index : center on currently output samples */
777 channels = s->audio_st->codec->channels;
778 nb_display_channels = channels;
779 if (!s->paused) {
780 n = 2 * channels;
781 delay = audio_write_get_buf_size(s);
782 delay /= n;
784 /* to be more precise, we take into account the time spent since
785 the last buffer computation */
786 if (audio_callback_time) {
787 time_diff = av_gettime() - audio_callback_time;
788 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
791 delay -= s->width / 2;
792 if (delay < s->width)
793 delay = s->width;
795 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
797 h= INT_MIN;
798 for(i=0; i<1000; i+=channels){
799 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
800 int a= s->sample_array[idx];
801 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
802 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
803 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
804 int score= a-d;
805 if(h<score && (b^c)<0){
806 h= score;
807 i_start= idx;
811 s->last_i_start = i_start;
812 } else {
813 i_start = s->last_i_start;
816 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
817 fill_rectangle(screen,
818 s->xleft, s->ytop, s->width, s->height,
819 bgcolor);
821 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
823 /* total height for one channel */
824 h = s->height / nb_display_channels;
825 /* graph height / 2 */
826 h2 = (h * 9) / 20;
827 for(ch = 0;ch < nb_display_channels; ch++) {
828 i = i_start + ch;
829 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
830 for(x = 0; x < s->width; x++) {
831 y = (s->sample_array[i] * h2) >> 15;
832 if (y < 0) {
833 y = -y;
834 ys = y1 - y;
835 } else {
836 ys = y1;
838 fill_rectangle(screen,
839 s->xleft + x, ys, 1, y,
840 fgcolor);
841 i += channels;
842 if (i >= SAMPLE_ARRAY_SIZE)
843 i -= SAMPLE_ARRAY_SIZE;
847 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
849 for(ch = 1;ch < nb_display_channels; ch++) {
850 y = s->ytop + ch * h;
851 fill_rectangle(screen,
852 s->xleft, y, s->width, 1,
853 fgcolor);
855 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
858 static int video_open(VideoState *is){
859 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
860 int w,h;
862 if(is_full_screen) flags |= SDL_FULLSCREEN;
863 else flags |= SDL_RESIZABLE;
865 if (is_full_screen && fs_screen_width) {
866 w = fs_screen_width;
867 h = fs_screen_height;
868 } else if(!is_full_screen && screen_width){
869 w = screen_width;
870 h = screen_height;
871 }else if (is->video_st && is->video_st->codec->width){
872 w = is->video_st->codec->width;
873 h = is->video_st->codec->height;
874 } else {
875 w = 640;
876 h = 480;
878 #ifndef CONFIG_DARWIN
879 screen = SDL_SetVideoMode(w, h, 0, flags);
880 #else
881 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
882 screen = SDL_SetVideoMode(w, h, 24, flags);
883 #endif
884 if (!screen) {
885 fprintf(stderr, "SDL: could not set video mode - exiting\n");
886 return -1;
888 SDL_WM_SetCaption("FFplay", "FFplay");
890 is->width = screen->w;
891 is->height = screen->h;
893 return 0;
896 /* display the current picture, if any */
897 static void video_display(VideoState *is)
899 if(!screen)
900 video_open(cur_stream);
901 if (is->audio_st && is->show_audio)
902 video_audio_display(is);
903 else if (is->video_st)
904 video_image_display(is);
907 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
909 SDL_Event event;
910 event.type = FF_REFRESH_EVENT;
911 event.user.data1 = opaque;
912 SDL_PushEvent(&event);
913 return 0; /* 0 means stop timer */
916 /* schedule a video refresh in 'delay' ms */
917 static void schedule_refresh(VideoState *is, int delay)
919 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
922 /* get the current audio clock value */
923 static double get_audio_clock(VideoState *is)
925 double pts;
926 int hw_buf_size, bytes_per_sec;
927 pts = is->audio_clock;
928 hw_buf_size = audio_write_get_buf_size(is);
929 bytes_per_sec = 0;
930 if (is->audio_st) {
931 bytes_per_sec = is->audio_st->codec->sample_rate *
932 2 * is->audio_st->codec->channels;
934 if (bytes_per_sec)
935 pts -= (double)hw_buf_size / bytes_per_sec;
936 return pts;
939 /* get the current video clock value */
940 static double get_video_clock(VideoState *is)
942 double delta;
943 if (is->paused) {
944 delta = 0;
945 } else {
946 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
948 return is->video_current_pts + delta;
951 /* get the current external clock value */
952 static double get_external_clock(VideoState *is)
954 int64_t ti;
955 ti = av_gettime();
956 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
959 /* get the current master clock value */
960 static double get_master_clock(VideoState *is)
962 double val;
964 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
965 if (is->video_st)
966 val = get_video_clock(is);
967 else
968 val = get_audio_clock(is);
969 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
970 if (is->audio_st)
971 val = get_audio_clock(is);
972 else
973 val = get_video_clock(is);
974 } else {
975 val = get_external_clock(is);
977 return val;
980 /* seek in the stream */
981 static void stream_seek(VideoState *is, int64_t pos, int rel)
983 if (!is->seek_req) {
984 is->seek_pos = pos;
985 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
986 if (seek_by_bytes)
987 is->seek_flags |= AVSEEK_FLAG_BYTE;
988 is->seek_req = 1;
992 /* pause or resume the video */
993 static void stream_pause(VideoState *is)
995 is->paused = !is->paused;
996 if (!is->paused) {
997 is->video_current_pts = get_video_clock(is);
998 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1002 /* called to display each frame */
1003 static void video_refresh_timer(void *opaque)
1005 VideoState *is = opaque;
1006 VideoPicture *vp;
1007 double actual_delay, delay, sync_threshold, ref_clock, diff;
1009 SubPicture *sp, *sp2;
1011 if (is->video_st) {
1012 if (is->pictq_size == 0) {
1013 /* if no picture, need to wait */
1014 schedule_refresh(is, 1);
1015 } else {
1016 /* dequeue the picture */
1017 vp = &is->pictq[is->pictq_rindex];
1019 /* update current video pts */
1020 is->video_current_pts = vp->pts;
1021 is->video_current_pts_time = av_gettime();
1023 /* compute nominal delay */
1024 delay = vp->pts - is->frame_last_pts;
1025 if (delay <= 0 || delay >= 1.0) {
1026 /* if incorrect delay, use previous one */
1027 delay = is->frame_last_delay;
1029 is->frame_last_delay = delay;
1030 is->frame_last_pts = vp->pts;
1032 /* update delay to follow master synchronisation source */
1033 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1034 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1035 /* if video is slave, we try to correct big delays by
1036 duplicating or deleting a frame */
1037 ref_clock = get_master_clock(is);
1038 diff = vp->pts - ref_clock;
1040 /* skip or repeat frame. We take into account the
1041 delay to compute the threshold. I still don't know
1042 if it is the best guess */
1043 sync_threshold = AV_SYNC_THRESHOLD;
1044 if (delay > sync_threshold)
1045 sync_threshold = delay;
1046 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1047 if (diff <= -sync_threshold)
1048 delay = 0;
1049 else if (diff >= sync_threshold)
1050 delay = 2 * delay;
1054 is->frame_timer += delay;
1055 /* compute the REAL delay (we need to do that to avoid
1056 long term errors */
1057 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1058 if (actual_delay < 0.010) {
1059 /* XXX: should skip picture */
1060 actual_delay = 0.010;
1062 /* launch timer for next picture */
1063 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1065 #if defined(DEBUG_SYNC)
1066 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1067 delay, actual_delay, vp->pts, -diff);
1068 #endif
1070 if(is->subtitle_st) {
1071 if (is->subtitle_stream_changed) {
1072 SDL_LockMutex(is->subpq_mutex);
1074 while (is->subpq_size) {
1075 free_subpicture(&is->subpq[is->subpq_rindex]);
1077 /* update queue size and signal for next picture */
1078 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1079 is->subpq_rindex = 0;
1081 is->subpq_size--;
1083 is->subtitle_stream_changed = 0;
1085 SDL_CondSignal(is->subpq_cond);
1086 SDL_UnlockMutex(is->subpq_mutex);
1087 } else {
1088 if (is->subpq_size > 0) {
1089 sp = &is->subpq[is->subpq_rindex];
1091 if (is->subpq_size > 1)
1092 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1093 else
1094 sp2 = NULL;
1096 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1097 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1099 free_subpicture(sp);
1101 /* update queue size and signal for next picture */
1102 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1103 is->subpq_rindex = 0;
1105 SDL_LockMutex(is->subpq_mutex);
1106 is->subpq_size--;
1107 SDL_CondSignal(is->subpq_cond);
1108 SDL_UnlockMutex(is->subpq_mutex);
1114 /* display picture */
1115 video_display(is);
1117 /* update queue size and signal for next picture */
1118 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1119 is->pictq_rindex = 0;
1121 SDL_LockMutex(is->pictq_mutex);
1122 is->pictq_size--;
1123 SDL_CondSignal(is->pictq_cond);
1124 SDL_UnlockMutex(is->pictq_mutex);
1126 } else if (is->audio_st) {
1127 /* draw the next audio frame */
1129 schedule_refresh(is, 40);
1131 /* if only audio stream, then display the audio bars (better
1132 than nothing, just to test the implementation */
1134 /* display picture */
1135 video_display(is);
1136 } else {
1137 schedule_refresh(is, 100);
1139 if (show_status) {
1140 static int64_t last_time;
1141 int64_t cur_time;
1142 int aqsize, vqsize, sqsize;
1143 double av_diff;
1145 cur_time = av_gettime();
1146 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1147 aqsize = 0;
1148 vqsize = 0;
1149 sqsize = 0;
1150 if (is->audio_st)
1151 aqsize = is->audioq.size;
1152 if (is->video_st)
1153 vqsize = is->videoq.size;
1154 if (is->subtitle_st)
1155 sqsize = is->subtitleq.size;
1156 av_diff = 0;
1157 if (is->audio_st && is->video_st)
1158 av_diff = get_audio_clock(is) - get_video_clock(is);
1159 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1160 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1161 fflush(stdout);
1162 last_time = cur_time;
1167 /* allocate a picture (needs to do that in main thread to avoid
1168 potential locking problems */
1169 static void alloc_picture(void *opaque)
1171 VideoState *is = opaque;
1172 VideoPicture *vp;
1174 vp = &is->pictq[is->pictq_windex];
1176 if (vp->bmp)
1177 SDL_FreeYUVOverlay(vp->bmp);
1179 #if 0
1180 /* XXX: use generic function */
1181 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1182 switch(is->video_st->codec->pix_fmt) {
1183 case PIX_FMT_YUV420P:
1184 case PIX_FMT_YUV422P:
1185 case PIX_FMT_YUV444P:
1186 case PIX_FMT_YUYV422:
1187 case PIX_FMT_YUV410P:
1188 case PIX_FMT_YUV411P:
1189 is_yuv = 1;
1190 break;
1191 default:
1192 is_yuv = 0;
1193 break;
1195 #endif
1196 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1197 is->video_st->codec->height,
1198 SDL_YV12_OVERLAY,
1199 screen);
1200 vp->width = is->video_st->codec->width;
1201 vp->height = is->video_st->codec->height;
1203 SDL_LockMutex(is->pictq_mutex);
1204 vp->allocated = 1;
1205 SDL_CondSignal(is->pictq_cond);
1206 SDL_UnlockMutex(is->pictq_mutex);
1211 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1213 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1215 VideoPicture *vp;
1216 int dst_pix_fmt;
1217 AVPicture pict;
1218 static struct SwsContext *img_convert_ctx;
1220 /* wait until we have space to put a new picture */
1221 SDL_LockMutex(is->pictq_mutex);
1222 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1223 !is->videoq.abort_request) {
1224 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1226 SDL_UnlockMutex(is->pictq_mutex);
1228 if (is->videoq.abort_request)
1229 return -1;
1231 vp = &is->pictq[is->pictq_windex];
1233 /* alloc or resize hardware picture buffer */
1234 if (!vp->bmp ||
1235 vp->width != is->video_st->codec->width ||
1236 vp->height != is->video_st->codec->height) {
1237 SDL_Event event;
1239 vp->allocated = 0;
1241 /* the allocation must be done in the main thread to avoid
1242 locking problems */
1243 event.type = FF_ALLOC_EVENT;
1244 event.user.data1 = is;
1245 SDL_PushEvent(&event);
1247 /* wait until the picture is allocated */
1248 SDL_LockMutex(is->pictq_mutex);
1249 while (!vp->allocated && !is->videoq.abort_request) {
1250 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1252 SDL_UnlockMutex(is->pictq_mutex);
1254 if (is->videoq.abort_request)
1255 return -1;
1258 /* if the frame is not skipped, then display it */
1259 if (vp->bmp) {
1260 /* get a pointer on the bitmap */
1261 SDL_LockYUVOverlay (vp->bmp);
1263 dst_pix_fmt = PIX_FMT_YUV420P;
1264 pict.data[0] = vp->bmp->pixels[0];
1265 pict.data[1] = vp->bmp->pixels[2];
1266 pict.data[2] = vp->bmp->pixels[1];
1268 pict.linesize[0] = vp->bmp->pitches[0];
1269 pict.linesize[1] = vp->bmp->pitches[2];
1270 pict.linesize[2] = vp->bmp->pitches[1];
1271 if (img_convert_ctx == NULL) {
1272 img_convert_ctx = sws_getContext(is->video_st->codec->width,
1273 is->video_st->codec->height, is->video_st->codec->pix_fmt,
1274 is->video_st->codec->width, is->video_st->codec->height,
1275 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1276 if (img_convert_ctx == NULL) {
1277 fprintf(stderr, "Cannot initialize the conversion context\n");
1278 exit(1);
1281 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1282 0, is->video_st->codec->height, pict.data, pict.linesize);
1283 /* update the bitmap content */
1284 SDL_UnlockYUVOverlay(vp->bmp);
1286 vp->pts = pts;
1288 /* now we can update the picture count */
1289 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1290 is->pictq_windex = 0;
1291 SDL_LockMutex(is->pictq_mutex);
1292 is->pictq_size++;
1293 SDL_UnlockMutex(is->pictq_mutex);
1295 return 0;
1299 * compute the exact PTS for the picture if it is omitted in the stream
1300 * @param pts1 the dts of the pkt / pts of the frame
1302 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1304 double frame_delay, pts;
1306 pts = pts1;
1308 if (pts != 0) {
1309 /* update video clock with pts, if present */
1310 is->video_clock = pts;
1311 } else {
1312 pts = is->video_clock;
1314 /* update video clock for next frame */
1315 frame_delay = av_q2d(is->video_st->codec->time_base);
1316 /* for MPEG2, the frame can be repeated, so we update the
1317 clock accordingly */
1318 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1319 is->video_clock += frame_delay;
1321 #if defined(DEBUG_SYNC) && 0
1323 int ftype;
1324 if (src_frame->pict_type == FF_B_TYPE)
1325 ftype = 'B';
1326 else if (src_frame->pict_type == FF_I_TYPE)
1327 ftype = 'I';
1328 else
1329 ftype = 'P';
1330 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1331 ftype, pts, pts1);
1333 #endif
1334 return queue_picture(is, src_frame, pts);
1337 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1339 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1340 int ret= avcodec_default_get_buffer(c, pic);
1341 uint64_t *pts= av_malloc(sizeof(uint64_t));
1342 *pts= global_video_pkt_pts;
1343 pic->opaque= pts;
1344 return ret;
1347 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1348 if(pic) av_freep(&pic->opaque);
1349 avcodec_default_release_buffer(c, pic);
1352 static int video_thread(void *arg)
1354 VideoState *is = arg;
1355 AVPacket pkt1, *pkt = &pkt1;
1356 int len1, got_picture;
1357 AVFrame *frame= avcodec_alloc_frame();
1358 double pts;
1360 for(;;) {
1361 while (is->paused && !is->videoq.abort_request) {
1362 SDL_Delay(10);
1364 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1365 break;
1367 if(pkt->data == flush_pkt.data){
1368 avcodec_flush_buffers(is->video_st->codec);
1369 continue;
1372 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1373 this packet, if any */
1374 global_video_pkt_pts= pkt->pts;
1375 len1 = avcodec_decode_video(is->video_st->codec,
1376 frame, &got_picture,
1377 pkt->data, pkt->size);
1379 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1380 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1381 pts= *(uint64_t*)frame->opaque;
1382 else if(pkt->dts != AV_NOPTS_VALUE)
1383 pts= pkt->dts;
1384 else
1385 pts= 0;
1386 pts *= av_q2d(is->video_st->time_base);
1388 // if (len1 < 0)
1389 // break;
1390 if (got_picture) {
1391 if (output_picture2(is, frame, pts) < 0)
1392 goto the_end;
1394 av_free_packet(pkt);
1395 if (step)
1396 if (cur_stream)
1397 stream_pause(cur_stream);
1399 the_end:
1400 av_free(frame);
1401 return 0;
1404 static int subtitle_thread(void *arg)
1406 VideoState *is = arg;
1407 SubPicture *sp;
1408 AVPacket pkt1, *pkt = &pkt1;
1409 int len1, got_subtitle;
1410 double pts;
1411 int i, j;
1412 int r, g, b, y, u, v, a;
1414 for(;;) {
1415 while (is->paused && !is->subtitleq.abort_request) {
1416 SDL_Delay(10);
1418 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1419 break;
1421 if(pkt->data == flush_pkt.data){
1422 avcodec_flush_buffers(is->subtitle_st->codec);
1423 continue;
1425 SDL_LockMutex(is->subpq_mutex);
1426 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1427 !is->subtitleq.abort_request) {
1428 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1430 SDL_UnlockMutex(is->subpq_mutex);
1432 if (is->subtitleq.abort_request)
1433 goto the_end;
1435 sp = &is->subpq[is->subpq_windex];
1437 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1438 this packet, if any */
1439 pts = 0;
1440 if (pkt->pts != AV_NOPTS_VALUE)
1441 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1443 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1444 &sp->sub, &got_subtitle,
1445 pkt->data, pkt->size);
1446 // if (len1 < 0)
1447 // break;
1448 if (got_subtitle && sp->sub.format == 0) {
1449 sp->pts = pts;
1451 for (i = 0; i < sp->sub.num_rects; i++)
1453 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1455 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1456 y = RGB_TO_Y_CCIR(r, g, b);
1457 u = RGB_TO_U_CCIR(r, g, b, 0);
1458 v = RGB_TO_V_CCIR(r, g, b, 0);
1459 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1463 /* now we can update the picture count */
1464 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1465 is->subpq_windex = 0;
1466 SDL_LockMutex(is->subpq_mutex);
1467 is->subpq_size++;
1468 SDL_UnlockMutex(is->subpq_mutex);
1470 av_free_packet(pkt);
1471 // if (step)
1472 // if (cur_stream)
1473 // stream_pause(cur_stream);
1475 the_end:
1476 return 0;
1479 /* copy samples for viewing in editor window */
1480 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1482 int size, len, channels;
1484 channels = is->audio_st->codec->channels;
1486 size = samples_size / sizeof(short);
1487 while (size > 0) {
1488 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1489 if (len > size)
1490 len = size;
1491 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1492 samples += len;
1493 is->sample_array_index += len;
1494 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1495 is->sample_array_index = 0;
1496 size -= len;
1500 /* return the new audio buffer size (samples can be added or deleted
1501 to get better sync if video or external master clock) */
1502 static int synchronize_audio(VideoState *is, short *samples,
1503 int samples_size1, double pts)
1505 int n, samples_size;
1506 double ref_clock;
1508 n = 2 * is->audio_st->codec->channels;
1509 samples_size = samples_size1;
1511 /* if not master, then we try to remove or add samples to correct the clock */
1512 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1513 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1514 double diff, avg_diff;
1515 int wanted_size, min_size, max_size, nb_samples;
1517 ref_clock = get_master_clock(is);
1518 diff = get_audio_clock(is) - ref_clock;
1520 if (diff < AV_NOSYNC_THRESHOLD) {
1521 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1522 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1523 /* not enough measures to have a correct estimate */
1524 is->audio_diff_avg_count++;
1525 } else {
1526 /* estimate the A-V difference */
1527 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1529 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1530 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1531 nb_samples = samples_size / n;
1533 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1534 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1535 if (wanted_size < min_size)
1536 wanted_size = min_size;
1537 else if (wanted_size > max_size)
1538 wanted_size = max_size;
1540 /* add or remove samples to correction the synchro */
1541 if (wanted_size < samples_size) {
1542 /* remove samples */
1543 samples_size = wanted_size;
1544 } else if (wanted_size > samples_size) {
1545 uint8_t *samples_end, *q;
1546 int nb;
1548 /* add samples */
1549 nb = (samples_size - wanted_size);
1550 samples_end = (uint8_t *)samples + samples_size - n;
1551 q = samples_end + n;
1552 while (nb > 0) {
1553 memcpy(q, samples_end, n);
1554 q += n;
1555 nb -= n;
1557 samples_size = wanted_size;
1560 #if 0
1561 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1562 diff, avg_diff, samples_size - samples_size1,
1563 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1564 #endif
1566 } else {
1567 /* too big difference : may be initial PTS errors, so
1568 reset A-V filter */
1569 is->audio_diff_avg_count = 0;
1570 is->audio_diff_cum = 0;
1574 return samples_size;
1577 /* decode one audio frame and returns its uncompressed size */
1578 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1580 AVPacket *pkt = &is->audio_pkt;
1581 int n, len1, data_size;
1582 double pts;
1584 for(;;) {
1585 /* NOTE: the audio packet can contain several frames */
1586 while (is->audio_pkt_size > 0) {
1587 data_size = buf_size;
1588 len1 = avcodec_decode_audio2(is->audio_st->codec,
1589 (int16_t *)audio_buf, &data_size,
1590 is->audio_pkt_data, is->audio_pkt_size);
1591 if (len1 < 0) {
1592 /* if error, we skip the frame */
1593 is->audio_pkt_size = 0;
1594 break;
1597 is->audio_pkt_data += len1;
1598 is->audio_pkt_size -= len1;
1599 if (data_size <= 0)
1600 continue;
1601 /* if no pts, then compute it */
1602 pts = is->audio_clock;
1603 *pts_ptr = pts;
1604 n = 2 * is->audio_st->codec->channels;
1605 is->audio_clock += (double)data_size /
1606 (double)(n * is->audio_st->codec->sample_rate);
1607 #if defined(DEBUG_SYNC)
1609 static double last_clock;
1610 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1611 is->audio_clock - last_clock,
1612 is->audio_clock, pts);
1613 last_clock = is->audio_clock;
1615 #endif
1616 return data_size;
1619 /* free the current packet */
1620 if (pkt->data)
1621 av_free_packet(pkt);
1623 if (is->paused || is->audioq.abort_request) {
1624 return -1;
1627 /* read next packet */
1628 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1629 return -1;
1630 if(pkt->data == flush_pkt.data){
1631 avcodec_flush_buffers(is->audio_st->codec);
1632 continue;
1635 is->audio_pkt_data = pkt->data;
1636 is->audio_pkt_size = pkt->size;
1638 /* if update the audio clock with the pts */
1639 if (pkt->pts != AV_NOPTS_VALUE) {
1640 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1645 /* get the current audio output buffer size, in samples. With SDL, we
1646 cannot have a precise information */
1647 static int audio_write_get_buf_size(VideoState *is)
1649 return is->audio_buf_size - is->audio_buf_index;
1653 /* prepare a new audio buffer */
1654 void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1656 VideoState *is = opaque;
1657 int audio_size, len1;
1658 double pts;
1660 audio_callback_time = av_gettime();
1662 while (len > 0) {
1663 if (is->audio_buf_index >= is->audio_buf_size) {
1664 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1665 if (audio_size < 0) {
1666 /* if error, just output silence */
1667 is->audio_buf_size = 1024;
1668 memset(is->audio_buf, 0, is->audio_buf_size);
1669 } else {
1670 if (is->show_audio)
1671 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1672 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1673 pts);
1674 is->audio_buf_size = audio_size;
1676 is->audio_buf_index = 0;
1678 len1 = is->audio_buf_size - is->audio_buf_index;
1679 if (len1 > len)
1680 len1 = len;
1681 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1682 len -= len1;
1683 stream += len1;
1684 is->audio_buf_index += len1;
1688 /* open a given stream. Return 0 if OK */
1689 static int stream_component_open(VideoState *is, int stream_index)
1691 AVFormatContext *ic = is->ic;
1692 AVCodecContext *enc;
1693 AVCodec *codec;
1694 SDL_AudioSpec wanted_spec, spec;
1696 if (stream_index < 0 || stream_index >= ic->nb_streams)
1697 return -1;
1698 enc = ic->streams[stream_index]->codec;
1700 /* prepare audio output */
1701 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1702 wanted_spec.freq = enc->sample_rate;
1703 wanted_spec.format = AUDIO_S16SYS;
1704 /* hack for AC3. XXX: suppress that */
1705 if (enc->channels > 2)
1706 enc->channels = 2;
1707 wanted_spec.channels = enc->channels;
1708 wanted_spec.silence = 0;
1709 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1710 wanted_spec.callback = sdl_audio_callback;
1711 wanted_spec.userdata = is;
1712 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1713 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1714 return -1;
1716 is->audio_hw_buf_size = spec.size;
1719 codec = avcodec_find_decoder(enc->codec_id);
1720 enc->debug_mv = debug_mv;
1721 enc->debug = debug;
1722 enc->workaround_bugs = workaround_bugs;
1723 enc->lowres = lowres;
1724 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1725 enc->idct_algo= idct;
1726 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1727 enc->skip_frame= skip_frame;
1728 enc->skip_idct= skip_idct;
1729 enc->skip_loop_filter= skip_loop_filter;
1730 enc->error_resilience= error_resilience;
1731 enc->error_concealment= error_concealment;
1732 if (!codec ||
1733 avcodec_open(enc, codec) < 0)
1734 return -1;
1735 if(thread_count>1)
1736 avcodec_thread_init(enc, thread_count);
1737 enc->thread_count= thread_count;
1738 switch(enc->codec_type) {
1739 case CODEC_TYPE_AUDIO:
1740 is->audio_stream = stream_index;
1741 is->audio_st = ic->streams[stream_index];
1742 is->audio_buf_size = 0;
1743 is->audio_buf_index = 0;
1745 /* init averaging filter */
1746 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1747 is->audio_diff_avg_count = 0;
1748 /* since we do not have a precise anough audio fifo fullness,
1749 we correct audio sync only if larger than this threshold */
1750 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1752 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1753 packet_queue_init(&is->audioq);
1754 SDL_PauseAudio(0);
1755 break;
1756 case CODEC_TYPE_VIDEO:
1757 is->video_stream = stream_index;
1758 is->video_st = ic->streams[stream_index];
1760 is->frame_last_delay = 40e-3;
1761 is->frame_timer = (double)av_gettime() / 1000000.0;
1762 is->video_current_pts_time = av_gettime();
1764 packet_queue_init(&is->videoq);
1765 is->video_tid = SDL_CreateThread(video_thread, is);
1767 enc-> get_buffer= my_get_buffer;
1768 enc->release_buffer= my_release_buffer;
1769 break;
1770 case CODEC_TYPE_SUBTITLE:
1771 is->subtitle_stream = stream_index;
1772 is->subtitle_st = ic->streams[stream_index];
1773 packet_queue_init(&is->subtitleq);
1775 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1776 break;
1777 default:
1778 break;
1780 return 0;
1783 static void stream_component_close(VideoState *is, int stream_index)
1785 AVFormatContext *ic = is->ic;
1786 AVCodecContext *enc;
1788 if (stream_index < 0 || stream_index >= ic->nb_streams)
1789 return;
1790 enc = ic->streams[stream_index]->codec;
1792 switch(enc->codec_type) {
1793 case CODEC_TYPE_AUDIO:
1794 packet_queue_abort(&is->audioq);
1796 SDL_CloseAudio();
1798 packet_queue_end(&is->audioq);
1799 break;
1800 case CODEC_TYPE_VIDEO:
1801 packet_queue_abort(&is->videoq);
1803 /* note: we also signal this mutex to make sure we deblock the
1804 video thread in all cases */
1805 SDL_LockMutex(is->pictq_mutex);
1806 SDL_CondSignal(is->pictq_cond);
1807 SDL_UnlockMutex(is->pictq_mutex);
1809 SDL_WaitThread(is->video_tid, NULL);
1811 packet_queue_end(&is->videoq);
1812 break;
1813 case CODEC_TYPE_SUBTITLE:
1814 packet_queue_abort(&is->subtitleq);
1816 /* note: we also signal this mutex to make sure we deblock the
1817 video thread in all cases */
1818 SDL_LockMutex(is->subpq_mutex);
1819 is->subtitle_stream_changed = 1;
1821 SDL_CondSignal(is->subpq_cond);
1822 SDL_UnlockMutex(is->subpq_mutex);
1824 SDL_WaitThread(is->subtitle_tid, NULL);
1826 packet_queue_end(&is->subtitleq);
1827 break;
1828 default:
1829 break;
1832 avcodec_close(enc);
1833 switch(enc->codec_type) {
1834 case CODEC_TYPE_AUDIO:
1835 is->audio_st = NULL;
1836 is->audio_stream = -1;
1837 break;
1838 case CODEC_TYPE_VIDEO:
1839 is->video_st = NULL;
1840 is->video_stream = -1;
1841 break;
1842 case CODEC_TYPE_SUBTITLE:
1843 is->subtitle_st = NULL;
1844 is->subtitle_stream = -1;
1845 break;
1846 default:
1847 break;
1851 static void dump_stream_info(const AVFormatContext *s)
1853 if (s->track != 0)
1854 fprintf(stderr, "Track: %d\n", s->track);
1855 if (s->title[0] != '\0')
1856 fprintf(stderr, "Title: %s\n", s->title);
1857 if (s->author[0] != '\0')
1858 fprintf(stderr, "Author: %s\n", s->author);
1859 if (s->copyright[0] != '\0')
1860 fprintf(stderr, "Copyright: %s\n", s->copyright);
1861 if (s->comment[0] != '\0')
1862 fprintf(stderr, "Comment: %s\n", s->comment);
1863 if (s->album[0] != '\0')
1864 fprintf(stderr, "Album: %s\n", s->album);
1865 if (s->year != 0)
1866 fprintf(stderr, "Year: %d\n", s->year);
1867 if (s->genre[0] != '\0')
1868 fprintf(stderr, "Genre: %s\n", s->genre);
1871 /* since we have only one decoding thread, we can use a global
1872 variable instead of a thread local variable */
1873 static VideoState *global_video_state;
1875 static int decode_interrupt_cb(void)
1877 return (global_video_state && global_video_state->abort_request);
1880 /* this thread gets the stream from the disk or the network */
1881 static int decode_thread(void *arg)
1883 VideoState *is = arg;
1884 AVFormatContext *ic;
1885 int err, i, ret, video_index, audio_index, use_play;
1886 AVPacket pkt1, *pkt = &pkt1;
1887 AVFormatParameters params, *ap = &params;
1889 video_index = -1;
1890 audio_index = -1;
1891 is->video_stream = -1;
1892 is->audio_stream = -1;
1893 is->subtitle_stream = -1;
1895 global_video_state = is;
1896 url_set_interrupt_cb(decode_interrupt_cb);
1898 memset(ap, 0, sizeof(*ap));
1899 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1900 stream */
1902 ap->width = frame_width;
1903 ap->height= frame_height;
1904 ap->time_base= (AVRational){1, 25};
1905 ap->pix_fmt = frame_pix_fmt;
1907 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1908 if (err < 0) {
1909 print_error(is->filename, err);
1910 ret = -1;
1911 goto fail;
1913 is->ic = ic;
1914 #ifdef CONFIG_RTSP_DEMUXER
1915 use_play = (ic->iformat == &rtsp_demuxer);
1916 #else
1917 use_play = 0;
1918 #endif
1920 if(genpts)
1921 ic->flags |= AVFMT_FLAG_GENPTS;
1923 if (!use_play) {
1924 err = av_find_stream_info(ic);
1925 if (err < 0) {
1926 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1927 ret = -1;
1928 goto fail;
1930 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1933 /* if seeking requested, we execute it */
1934 if (start_time != AV_NOPTS_VALUE) {
1935 int64_t timestamp;
1937 timestamp = start_time;
1938 /* add the stream start time */
1939 if (ic->start_time != AV_NOPTS_VALUE)
1940 timestamp += ic->start_time;
1941 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1942 if (ret < 0) {
1943 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1944 is->filename, (double)timestamp / AV_TIME_BASE);
1948 /* now we can begin to play (RTSP stream only) */
1949 av_read_play(ic);
1951 if (use_play) {
1952 err = av_find_stream_info(ic);
1953 if (err < 0) {
1954 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1955 ret = -1;
1956 goto fail;
1960 for(i = 0; i < ic->nb_streams; i++) {
1961 AVCodecContext *enc = ic->streams[i]->codec;
1962 switch(enc->codec_type) {
1963 case CODEC_TYPE_AUDIO:
1964 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1965 audio_index = i;
1966 break;
1967 case CODEC_TYPE_VIDEO:
1968 if (video_index < 0 && !video_disable)
1969 video_index = i;
1970 break;
1971 default:
1972 break;
1975 if (show_status) {
1976 dump_format(ic, 0, is->filename, 0);
1977 dump_stream_info(ic);
1980 /* open the streams */
1981 if (audio_index >= 0) {
1982 stream_component_open(is, audio_index);
1985 if (video_index >= 0) {
1986 stream_component_open(is, video_index);
1987 } else {
1988 if (!display_disable)
1989 is->show_audio = 1;
1992 if (is->video_stream < 0 && is->audio_stream < 0) {
1993 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1994 ret = -1;
1995 goto fail;
1998 for(;;) {
1999 if (is->abort_request)
2000 break;
2001 if (is->paused != is->last_paused) {
2002 is->last_paused = is->paused;
2003 if (is->paused)
2004 av_read_pause(ic);
2005 else
2006 av_read_play(ic);
2008 #ifdef CONFIG_RTSP_DEMUXER
2009 if (is->paused && ic->iformat == &rtsp_demuxer) {
2010 /* wait 10 ms to avoid trying to get another packet */
2011 /* XXX: horrible */
2012 SDL_Delay(10);
2013 continue;
2015 #endif
2016 if (is->seek_req) {
2017 int stream_index= -1;
2018 int64_t seek_target= is->seek_pos;
2020 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2021 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2022 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2024 if(stream_index>=0){
2025 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2028 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2029 if (ret < 0) {
2030 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2031 }else{
2032 if (is->audio_stream >= 0) {
2033 packet_queue_flush(&is->audioq);
2034 packet_queue_put(&is->audioq, &flush_pkt);
2036 if (is->subtitle_stream >= 0) {
2037 packet_queue_flush(&is->subtitleq);
2038 packet_queue_put(&is->subtitleq, &flush_pkt);
2040 if (is->video_stream >= 0) {
2041 packet_queue_flush(&is->videoq);
2042 packet_queue_put(&is->videoq, &flush_pkt);
2045 is->seek_req = 0;
2048 /* if the queue are full, no need to read more */
2049 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2050 is->videoq.size > MAX_VIDEOQ_SIZE ||
2051 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2052 url_feof(&ic->pb)) {
2053 /* wait 10 ms */
2054 SDL_Delay(10);
2055 continue;
2057 ret = av_read_frame(ic, pkt);
2058 if (ret < 0) {
2059 if (url_ferror(&ic->pb) == 0) {
2060 SDL_Delay(100); /* wait for user event */
2061 continue;
2062 } else
2063 break;
2065 if (pkt->stream_index == is->audio_stream) {
2066 packet_queue_put(&is->audioq, pkt);
2067 } else if (pkt->stream_index == is->video_stream) {
2068 packet_queue_put(&is->videoq, pkt);
2069 } else if (pkt->stream_index == is->subtitle_stream) {
2070 packet_queue_put(&is->subtitleq, pkt);
2071 } else {
2072 av_free_packet(pkt);
2075 /* wait until the end */
2076 while (!is->abort_request) {
2077 SDL_Delay(100);
2080 ret = 0;
2081 fail:
2082 /* disable interrupting */
2083 global_video_state = NULL;
2085 /* close each stream */
2086 if (is->audio_stream >= 0)
2087 stream_component_close(is, is->audio_stream);
2088 if (is->video_stream >= 0)
2089 stream_component_close(is, is->video_stream);
2090 if (is->subtitle_stream >= 0)
2091 stream_component_close(is, is->subtitle_stream);
2092 if (is->ic) {
2093 av_close_input_file(is->ic);
2094 is->ic = NULL; /* safety */
2096 url_set_interrupt_cb(NULL);
2098 if (ret != 0) {
2099 SDL_Event event;
2101 event.type = FF_QUIT_EVENT;
2102 event.user.data1 = is;
2103 SDL_PushEvent(&event);
2105 return 0;
2108 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2110 VideoState *is;
2112 is = av_mallocz(sizeof(VideoState));
2113 if (!is)
2114 return NULL;
2115 av_strlcpy(is->filename, filename, sizeof(is->filename));
2116 is->iformat = iformat;
2117 is->ytop = 0;
2118 is->xleft = 0;
2120 /* start video display */
2121 is->pictq_mutex = SDL_CreateMutex();
2122 is->pictq_cond = SDL_CreateCond();
2124 is->subpq_mutex = SDL_CreateMutex();
2125 is->subpq_cond = SDL_CreateCond();
2127 /* add the refresh timer to draw the picture */
2128 schedule_refresh(is, 40);
2130 is->av_sync_type = av_sync_type;
2131 is->parse_tid = SDL_CreateThread(decode_thread, is);
2132 if (!is->parse_tid) {
2133 av_free(is);
2134 return NULL;
2136 return is;
2139 static void stream_close(VideoState *is)
2141 VideoPicture *vp;
2142 int i;
2143 /* XXX: use a special url_shutdown call to abort parse cleanly */
2144 is->abort_request = 1;
2145 SDL_WaitThread(is->parse_tid, NULL);
2147 /* free all pictures */
2148 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2149 vp = &is->pictq[i];
2150 if (vp->bmp) {
2151 SDL_FreeYUVOverlay(vp->bmp);
2152 vp->bmp = NULL;
2155 SDL_DestroyMutex(is->pictq_mutex);
2156 SDL_DestroyCond(is->pictq_cond);
2157 SDL_DestroyMutex(is->subpq_mutex);
2158 SDL_DestroyCond(is->subpq_cond);
2161 static void stream_cycle_channel(VideoState *is, int codec_type)
2163 AVFormatContext *ic = is->ic;
2164 int start_index, stream_index;
2165 AVStream *st;
2167 if (codec_type == CODEC_TYPE_VIDEO)
2168 start_index = is->video_stream;
2169 else if (codec_type == CODEC_TYPE_AUDIO)
2170 start_index = is->audio_stream;
2171 else
2172 start_index = is->subtitle_stream;
2173 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2174 return;
2175 stream_index = start_index;
2176 for(;;) {
2177 if (++stream_index >= is->ic->nb_streams)
2179 if (codec_type == CODEC_TYPE_SUBTITLE)
2181 stream_index = -1;
2182 goto the_end;
2183 } else
2184 stream_index = 0;
2186 if (stream_index == start_index)
2187 return;
2188 st = ic->streams[stream_index];
2189 if (st->codec->codec_type == codec_type) {
2190 /* check that parameters are OK */
2191 switch(codec_type) {
2192 case CODEC_TYPE_AUDIO:
2193 if (st->codec->sample_rate != 0 &&
2194 st->codec->channels != 0)
2195 goto the_end;
2196 break;
2197 case CODEC_TYPE_VIDEO:
2198 case CODEC_TYPE_SUBTITLE:
2199 goto the_end;
2200 default:
2201 break;
2205 the_end:
2206 stream_component_close(is, start_index);
2207 stream_component_open(is, stream_index);
2211 static void toggle_full_screen(void)
2213 is_full_screen = !is_full_screen;
2214 if (!fs_screen_width) {
2215 /* use default SDL method */
2216 // SDL_WM_ToggleFullScreen(screen);
2218 video_open(cur_stream);
2221 static void toggle_pause(void)
2223 if (cur_stream)
2224 stream_pause(cur_stream);
2225 step = 0;
2228 static void step_to_next_frame(void)
2230 if (cur_stream) {
2231 if (cur_stream->paused)
2232 cur_stream->paused=0;
2233 cur_stream->video_current_pts = get_video_clock(cur_stream);
2235 step = 1;
2238 static void do_exit(void)
2240 if (cur_stream) {
2241 stream_close(cur_stream);
2242 cur_stream = NULL;
2244 if (show_status)
2245 printf("\n");
2246 SDL_Quit();
2247 exit(0);
2250 static void toggle_audio_display(void)
2252 if (cur_stream) {
2253 cur_stream->show_audio = !cur_stream->show_audio;
2257 /* handle an event sent by the GUI */
2258 static void event_loop(void)
2260 SDL_Event event;
2261 double incr, pos, frac;
2263 for(;;) {
2264 SDL_WaitEvent(&event);
2265 switch(event.type) {
2266 case SDL_KEYDOWN:
2267 switch(event.key.keysym.sym) {
2268 case SDLK_ESCAPE:
2269 case SDLK_q:
2270 do_exit();
2271 break;
2272 case SDLK_f:
2273 toggle_full_screen();
2274 break;
2275 case SDLK_p:
2276 case SDLK_SPACE:
2277 toggle_pause();
2278 break;
2279 case SDLK_s: //S: Step to next frame
2280 step_to_next_frame();
2281 break;
2282 case SDLK_a:
2283 if (cur_stream)
2284 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2285 break;
2286 case SDLK_v:
2287 if (cur_stream)
2288 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2289 break;
2290 case SDLK_t:
2291 if (cur_stream)
2292 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2293 break;
2294 case SDLK_w:
2295 toggle_audio_display();
2296 break;
2297 case SDLK_LEFT:
2298 incr = -10.0;
2299 goto do_seek;
2300 case SDLK_RIGHT:
2301 incr = 10.0;
2302 goto do_seek;
2303 case SDLK_UP:
2304 incr = 60.0;
2305 goto do_seek;
2306 case SDLK_DOWN:
2307 incr = -60.0;
2308 do_seek:
2309 if (cur_stream) {
2310 if (seek_by_bytes) {
2311 pos = url_ftell(&cur_stream->ic->pb);
2312 if (cur_stream->ic->bit_rate)
2313 incr *= cur_stream->ic->bit_rate / 60.0;
2314 else
2315 incr *= 180000.0;
2316 pos += incr;
2317 stream_seek(cur_stream, pos, incr);
2318 } else {
2319 pos = get_master_clock(cur_stream);
2320 pos += incr;
2321 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2324 break;
2325 default:
2326 break;
2328 break;
2329 case SDL_MOUSEBUTTONDOWN:
2330 if (cur_stream) {
2331 int ns, hh, mm, ss;
2332 int tns, thh, tmm, tss;
2333 tns = cur_stream->ic->duration/1000000LL;
2334 thh = tns/3600;
2335 tmm = (tns%3600)/60;
2336 tss = (tns%60);
2337 frac = (double)event.button.x/(double)cur_stream->width;
2338 ns = frac*tns;
2339 hh = ns/3600;
2340 mm = (ns%3600)/60;
2341 ss = (ns%60);
2342 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2343 hh, mm, ss, thh, tmm, tss);
2344 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2346 break;
2347 case SDL_VIDEORESIZE:
2348 if (cur_stream) {
2349 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2350 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2351 screen_width = cur_stream->width = event.resize.w;
2352 screen_height= cur_stream->height= event.resize.h;
2354 break;
2355 case SDL_QUIT:
2356 case FF_QUIT_EVENT:
2357 do_exit();
2358 break;
2359 case FF_ALLOC_EVENT:
2360 video_open(event.user.data1);
2361 alloc_picture(event.user.data1);
2362 break;
2363 case FF_REFRESH_EVENT:
2364 video_refresh_timer(event.user.data1);
2365 break;
2366 default:
2367 break;
2372 static void opt_frame_size(const char *arg)
2374 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2375 fprintf(stderr, "Incorrect frame size\n");
2376 exit(1);
2378 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2379 fprintf(stderr, "Frame size must be a multiple of 2\n");
2380 exit(1);
2384 static void opt_width(const char *arg)
2386 screen_width = atoi(arg);
2387 if(screen_width<=0){
2388 fprintf(stderr, "invalid width\n");
2389 exit(1);
2393 static void opt_height(const char *arg)
2395 screen_height = atoi(arg);
2396 if(screen_height<=0){
2397 fprintf(stderr, "invalid height\n");
2398 exit(1);
2402 static void opt_format(const char *arg)
2404 file_iformat = av_find_input_format(arg);
2405 if (!file_iformat) {
2406 fprintf(stderr, "Unknown input format: %s\n", arg);
2407 exit(1);
2411 static void opt_frame_pix_fmt(const char *arg)
2413 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2416 #ifdef CONFIG_RTSP_DEMUXER
2417 static void opt_rtp_tcp(void)
2419 /* only tcp protocol */
2420 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2422 #endif
2424 static void opt_sync(const char *arg)
2426 if (!strcmp(arg, "audio"))
2427 av_sync_type = AV_SYNC_AUDIO_MASTER;
2428 else if (!strcmp(arg, "video"))
2429 av_sync_type = AV_SYNC_VIDEO_MASTER;
2430 else if (!strcmp(arg, "ext"))
2431 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2432 else
2433 show_help();
2436 static void opt_seek(const char *arg)
2438 start_time = parse_date(arg, 1);
2441 static void opt_debug(const char *arg)
2443 av_log_level = 99;
2444 debug = atoi(arg);
2447 static void opt_vismv(const char *arg)
2449 debug_mv = atoi(arg);
2452 static void opt_thread_count(const char *arg)
2454 thread_count= atoi(arg);
2455 #if !defined(HAVE_THREADS)
2456 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2457 #endif
2460 const OptionDef options[] = {
2461 { "h", 0, {(void*)show_help}, "show help" },
2462 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2463 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2464 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2465 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2466 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2467 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2468 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2469 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2470 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2471 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2472 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2473 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2474 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2475 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2476 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2477 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2478 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2479 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2480 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2481 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2482 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2483 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2484 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2485 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2486 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2487 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2488 #ifdef CONFIG_RTSP_DEMUXER
2489 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2490 #endif
2491 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2492 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2493 { NULL, },
2496 void show_help(void)
2498 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2499 "usage: ffplay [options] input_file\n"
2500 "Simple media player\n");
2501 printf("\n");
2502 show_help_options(options, "Main options:\n",
2503 OPT_EXPERT, 0);
2504 show_help_options(options, "\nAdvanced options:\n",
2505 OPT_EXPERT, OPT_EXPERT);
2506 printf("\nWhile playing:\n"
2507 "q, ESC quit\n"
2508 "f toggle full screen\n"
2509 "p, SPC pause\n"
2510 "a cycle audio channel\n"
2511 "v cycle video channel\n"
2512 "t cycle subtitle channel\n"
2513 "w show audio waves\n"
2514 "left/right seek backward/forward 10 seconds\n"
2515 "down/up seek backward/forward 1 minute\n"
2516 "mouse click seek to percentage in file corresponding to fraction of width\n"
2518 exit(1);
2521 void parse_arg_file(const char *filename)
2523 if (!strcmp(filename, "-"))
2524 filename = "pipe:";
2525 input_filename = filename;
2528 /* Called from the main */
2529 int main(int argc, char **argv)
2531 int flags;
2533 /* register all codecs, demux and protocols */
2534 av_register_all();
2536 #ifdef CONFIG_OS2
2537 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2539 // Make stdout and stderr unbuffered
2540 setbuf( stdout, NULL );
2541 setbuf( stderr, NULL );
2542 #endif
2544 parse_options(argc, argv, options);
2546 if (!input_filename)
2547 show_help();
2549 if (display_disable) {
2550 video_disable = 1;
2552 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2553 #if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2554 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2555 #endif
2556 if (SDL_Init (flags)) {
2557 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2558 exit(1);
2561 if (!display_disable) {
2562 #ifdef HAVE_SDL_VIDEO_SIZE
2563 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2564 fs_screen_width = vi->current_w;
2565 fs_screen_height = vi->current_h;
2566 #endif
2569 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2570 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2571 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2572 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2574 av_init_packet(&flush_pkt);
2575 flush_pkt.data= "FLUSH";
2577 cur_stream = stream_open(input_filename, file_iformat);
2579 event_loop();
2581 /* never returns */
2583 return 0;