Rename some functions
[openal-soft.git] / examples / alffplay.c
blob04df6a83199abc31b2f25ae632eb5d5e2c761aac
1 /*
2 * alffplay.c
4 * A pedagogical video player that really works! Now with seeking features.
6 * Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, and a tutorial by
7 * Martin Bohme <boehme@inb.uni-luebeckREMOVETHIS.de>.
9 * Requires C99.
12 #include <stdio.h>
13 #include <math.h>
15 #include <libavcodec/avcodec.h>
16 #include <libavformat/avformat.h>
17 #include <libavformat/avio.h>
18 #include <libavutil/time.h>
19 #include <libavutil/avstring.h>
20 #include <libswscale/swscale.h>
21 #include <libswresample/swresample.h>
23 #include <SDL.h>
24 #include <SDL_thread.h>
25 #include <SDL_video.h>
27 #include "threads.h"
28 #include "bool.h"
30 #include "AL/al.h"
31 #include "AL/alc.h"
32 #include "AL/alext.h"
35 static bool has_latency_check = false;
36 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
38 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
39 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
40 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) /* Bytes of compressed audio data to keep queued */
41 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) /* Bytes of compressed video data to keep queued */
42 #define AV_SYNC_THRESHOLD 0.01
43 #define AV_NOSYNC_THRESHOLD 10.0
44 #define SAMPLE_CORRECTION_MAX_DIFF 0.1
45 #define AUDIO_DIFF_AVG_NB 20
46 #define VIDEO_PICTURE_QUEUE_SIZE 16
48 enum {
49 FF_UPDATE_EVENT = SDL_USEREVENT,
50 FF_REFRESH_EVENT,
51 FF_QUIT_EVENT
55 typedef struct PacketQueue {
56 AVPacketList *first_pkt, *last_pkt;
57 volatile int nb_packets;
58 volatile int size;
59 volatile bool flushing;
60 almtx_t mutex;
61 alcnd_t cond;
62 } PacketQueue;
64 typedef struct VideoPicture {
65 SDL_Texture *bmp;
66 int width, height; /* Logical image size (actual size may be larger) */
67 volatile bool updated;
68 double pts;
69 } VideoPicture;
71 typedef struct AudioState {
72 AVStream *st;
74 PacketQueue q;
75 AVPacket pkt;
77 /* Used for clock difference average computation */
78 double diff_accum;
79 double diff_avg_coef;
80 double diff_threshold;
82 /* Time (in seconds) of the next sample to be buffered */
83 double current_pts;
85 /* Decompressed sample frame, and swresample context for conversion */
86 AVFrame *decoded_aframe;
87 struct SwrContext *swres_ctx;
89 /* Conversion format, for what gets fed to OpenAL */
90 int dst_ch_layout;
91 enum AVSampleFormat dst_sample_fmt;
93 /* Storage of converted samples */
94 uint8_t *samples;
95 ssize_t samples_len; /* In samples */
96 ssize_t samples_pos;
97 int samples_max;
99 /* OpenAL format */
100 ALenum format;
101 ALint frame_size;
103 ALuint source;
104 ALuint buffer[AUDIO_BUFFER_QUEUE_SIZE];
105 ALuint buffer_idx;
106 almtx_t src_mutex;
108 althrd_t thread;
109 } AudioState;
111 typedef struct VideoState {
112 AVStream *st;
114 PacketQueue q;
116 double clock;
117 double frame_timer;
118 double frame_last_pts;
119 double frame_last_delay;
120 double current_pts;
121 /* time (av_gettime) at which we updated current_pts - used to have running video pts */
122 int64_t current_pts_time;
124 /* Decompressed video frame, and swscale context for conversion */
125 AVFrame *decoded_vframe;
126 struct SwsContext *swscale_ctx;
128 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
129 int pictq_size, pictq_rindex, pictq_windex;
130 almtx_t pictq_mutex;
131 alcnd_t pictq_cond;
133 althrd_t thread;
134 } VideoState;
136 typedef struct MovieState {
137 AVFormatContext *pFormatCtx;
138 int videoStream, audioStream;
140 volatile bool seek_req;
141 int64_t seek_pos;
143 int av_sync_type;
145 int64_t external_clock_base;
147 AudioState audio;
148 VideoState video;
150 althrd_t parse_thread;
152 char filename[1024];
154 volatile bool quit;
155 } MovieState;
157 enum {
158 AV_SYNC_AUDIO_MASTER,
159 AV_SYNC_VIDEO_MASTER,
160 AV_SYNC_EXTERNAL_MASTER,
162 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
165 static AVPacket flush_pkt = { .data = (uint8_t*)"FLUSH" };
167 static void packet_queue_init(PacketQueue *q)
169 memset(q, 0, sizeof(PacketQueue));
170 almtx_init(&q->mutex, almtx_plain);
171 alcnd_init(&q->cond);
173 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
175 AVPacketList *pkt1;
176 if(pkt != &flush_pkt && !pkt->buf && av_dup_packet(pkt) < 0)
177 return -1;
179 pkt1 = av_malloc(sizeof(AVPacketList));
180 if(!pkt1) return -1;
181 pkt1->pkt = *pkt;
182 pkt1->next = NULL;
184 almtx_lock(&q->mutex);
185 if(!q->last_pkt)
186 q->first_pkt = pkt1;
187 else
188 q->last_pkt->next = pkt1;
189 q->last_pkt = pkt1;
190 q->nb_packets++;
191 q->size += pkt1->pkt.size;
192 almtx_unlock(&q->mutex);
194 alcnd_signal(&q->cond);
195 return 0;
197 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, MovieState *state)
199 AVPacketList *pkt1;
200 int ret = -1;
202 almtx_lock(&q->mutex);
203 while(!state->quit)
205 pkt1 = q->first_pkt;
206 if(pkt1)
208 q->first_pkt = pkt1->next;
209 if(!q->first_pkt)
210 q->last_pkt = NULL;
211 q->nb_packets--;
212 q->size -= pkt1->pkt.size;
213 *pkt = pkt1->pkt;
214 av_free(pkt1);
215 ret = 1;
216 break;
219 if(q->flushing)
221 ret = 0;
222 break;
224 alcnd_wait(&q->cond, &q->mutex);
226 almtx_unlock(&q->mutex);
227 return ret;
229 static void packet_queue_clear(PacketQueue *q)
231 AVPacketList *pkt, *pkt1;
233 almtx_lock(&q->mutex);
234 for(pkt = q->first_pkt;pkt != NULL;pkt = pkt1)
236 pkt1 = pkt->next;
237 if(pkt->pkt.data != flush_pkt.data)
238 av_free_packet(&pkt->pkt);
239 av_freep(&pkt);
241 q->last_pkt = NULL;
242 q->first_pkt = NULL;
243 q->nb_packets = 0;
244 q->size = 0;
245 almtx_unlock(&q->mutex);
247 static void packet_queue_flush(PacketQueue *q)
249 almtx_lock(&q->mutex);
250 q->flushing = true;
251 almtx_unlock(&q->mutex);
252 alcnd_signal(&q->cond);
254 static void packet_queue_deinit(PacketQueue *q)
256 packet_queue_clear(q);
257 alcnd_destroy(&q->cond);
258 almtx_destroy(&q->mutex);
262 static double get_audio_clock(AudioState *state)
264 double pts;
266 almtx_lock(&state->src_mutex);
267 /* The audio clock is the timestamp of the sample currently being heard.
268 * It's based on 4 components:
269 * 1 - The timestamp of the next sample to buffer (state->current_pts)
270 * 2 - The length of the source's buffer queue (AL_SEC_LENGTH_SOFT)
271 * 3 - The offset OpenAL is currently at in the source (the first value
272 * from AL_SEC_OFFSET_LATENCY_SOFT)
273 * 4 - The latency between OpenAL and the DAC (the second value from
274 * AL_SEC_OFFSET_LATENCY_SOFT)
276 * Subtracting the length of the source queue from the next sample's
277 * timestamp gives the timestamp of the sample at start of the source
278 * queue. Adding the source offset to that results in the timestamp for
279 * OpenAL's current position, and subtracting the source latency from that
280 * gives the timestamp of the sample currently at the DAC.
282 pts = state->current_pts;
283 if(state->source)
285 ALdouble offset[2] = { 0.0, 0.0 };
286 ALdouble queue_len = 0.0;
287 ALint status;
289 /* NOTE: The source state must be checked last, in case an underrun
290 * occurs and the source stops between retrieving the offset+latency
291 * and getting the state. */
292 if(has_latency_check)
294 alGetSourcedvSOFT(state->source, AL_SEC_OFFSET_LATENCY_SOFT, offset);
295 alGetSourcedvSOFT(state->source, AL_SEC_LENGTH_SOFT, &queue_len);
297 else
299 ALint ioffset, ilen;
300 alGetSourcei(state->source, AL_SAMPLE_OFFSET, &ioffset);
301 alGetSourcei(state->source, AL_SAMPLE_LENGTH_SOFT, &ilen);
302 offset[0] = (double)ioffset / state->st->codec->sample_rate;
303 queue_len = (double)ilen / state->st->codec->sample_rate;
305 alGetSourcei(state->source, AL_SOURCE_STATE, &status);
307 /* If the source is AL_STOPPED, then there was an underrun and all
308 * buffers are processed, so ignore the source queue. The audio thread
309 * will put the source into an AL_INITIAL state and clear the queue
310 * when it starts recovery. */
311 if(status != AL_STOPPED)
312 pts = pts - queue_len + offset[0];
313 if(status == AL_PLAYING)
314 pts = pts - offset[1];
316 almtx_unlock(&state->src_mutex);
318 return (pts >= 0.0) ? pts : 0.0;
320 static double get_video_clock(VideoState *state)
322 double delta = (av_gettime() - state->current_pts_time) / 1000000.0;
323 return state->current_pts + delta;
325 static double get_external_clock(MovieState *movState)
327 return (av_gettime()-movState->external_clock_base) / 1000000.0;
330 double get_master_clock(MovieState *movState)
332 if(movState->av_sync_type == AV_SYNC_VIDEO_MASTER)
333 return get_video_clock(&movState->video);
334 if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
335 return get_audio_clock(&movState->audio);
336 return get_external_clock(movState);
339 /* Return how many samples to skip to maintain sync (negative means to
340 * duplicate samples). */
341 static int synchronize_audio(MovieState *movState)
343 double diff, avg_diff;
344 double ref_clock;
346 if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
347 return 0;
349 ref_clock = get_master_clock(movState);
350 diff = ref_clock - get_audio_clock(&movState->audio);
352 if(!(diff < AV_NOSYNC_THRESHOLD))
354 /* Difference is TOO big; reset diff stuff */
355 movState->audio.diff_accum = 0.0;
356 return 0;
359 /* Accumulate the diffs */
360 movState->audio.diff_accum = movState->audio.diff_accum*movState->audio.diff_avg_coef + diff;
361 avg_diff = movState->audio.diff_accum*(1.0 - movState->audio.diff_avg_coef);
362 if(fabs(avg_diff) < movState->audio.diff_threshold)
363 return 0;
365 /* Constrain the per-update difference to avoid exceedingly large skips */
366 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
367 diff = SAMPLE_CORRECTION_MAX_DIFF;
368 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
369 diff = -SAMPLE_CORRECTION_MAX_DIFF;
370 return (int)(diff*movState->audio.st->codec->sample_rate);
373 static int audio_decode_frame(MovieState *movState)
375 AVPacket *pkt = &movState->audio.pkt;
377 while(!movState->quit)
379 while(!movState->quit && pkt->size == 0)
381 av_free_packet(pkt);
383 /* Get the next packet */
384 int err;
385 if((err=packet_queue_get(&movState->audio.q, pkt, movState)) <= 0)
387 if(err == 0)
388 break;
389 return err;
391 if(pkt->data == flush_pkt.data)
393 avcodec_flush_buffers(movState->audio.st->codec);
394 movState->audio.diff_accum = 0.0;
395 movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
397 alSourceRewind(movState->audio.source);
398 alSourcei(movState->audio.source, AL_BUFFER, 0);
400 av_new_packet(pkt, 0);
402 return -1;
405 /* If provided, update w/ pts */
406 if(pkt->pts != AV_NOPTS_VALUE)
407 movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
410 AVFrame *frame = movState->audio.decoded_aframe;
411 int got_frame = 0;
412 int len1 = avcodec_decode_audio4(movState->audio.st->codec, frame,
413 &got_frame, pkt);
414 if(len1 < 0) break;
416 if(len1 <= pkt->size)
418 /* Move the unread data to the front and clear the end bits */
419 int remaining = pkt->size - len1;
420 memmove(pkt->data, &pkt->data[len1], remaining);
421 av_shrink_packet(pkt, remaining);
424 if(!got_frame || frame->nb_samples <= 0)
426 av_frame_unref(frame);
427 continue;
430 if(frame->nb_samples > movState->audio.samples_max)
432 av_freep(&movState->audio.samples);
433 av_samples_alloc(
434 &movState->audio.samples, NULL, movState->audio.st->codec->channels,
435 frame->nb_samples, movState->audio.dst_sample_fmt, 0
437 movState->audio.samples_max = frame->nb_samples;
439 /* Return the amount of sample frames converted */
440 int data_size = swr_convert(movState->audio.swres_ctx,
441 &movState->audio.samples, frame->nb_samples,
442 (const uint8_t**)frame->data, frame->nb_samples
445 av_frame_unref(frame);
446 return data_size;
449 return -1;
452 static int read_audio(MovieState *movState, uint8_t *samples, int length)
454 int sample_skip = synchronize_audio(movState);
455 int audio_size = 0;
457 /* Read the next chunk of data, refill the buffer, and queue it
458 * on the source */
459 length /= movState->audio.frame_size;
460 while(audio_size < length)
462 if(movState->audio.samples_len <= 0 || movState->audio.samples_pos >= movState->audio.samples_len)
464 int frame_len = audio_decode_frame(movState);
465 if(frame_len < 0) return -1;
467 movState->audio.samples_len = frame_len;
468 if(movState->audio.samples_len == 0)
469 break;
471 movState->audio.samples_pos = (movState->audio.samples_len < sample_skip) ?
472 movState->audio.samples_len : sample_skip;
473 sample_skip -= movState->audio.samples_pos;
475 movState->audio.current_pts += (double)movState->audio.samples_pos /
476 (double)movState->audio.st->codec->sample_rate;
477 continue;
480 int rem = length - audio_size;
481 if(movState->audio.samples_pos >= 0)
483 int n = movState->audio.frame_size;
484 int len = movState->audio.samples_len - movState->audio.samples_pos;
485 if(rem > len) rem = len;
486 memcpy(samples + audio_size*n,
487 movState->audio.samples + movState->audio.samples_pos*n,
488 rem*n);
490 else
492 int n = movState->audio.frame_size;
493 int len = -movState->audio.samples_pos;
494 if(rem > len) rem = len;
496 /* Add samples by copying the first sample */
497 if(n == 1)
499 uint8_t sample = ((uint8_t*)movState->audio.samples)[0];
500 uint8_t *q = (uint8_t*)samples + audio_size;
501 for(int i = 0;i < rem;i++)
502 *(q++) = sample;
504 else if(n == 2)
506 uint16_t sample = ((uint16_t*)movState->audio.samples)[0];
507 uint16_t *q = (uint16_t*)samples + audio_size;
508 for(int i = 0;i < rem;i++)
509 *(q++) = sample;
511 else if(n == 4)
513 uint32_t sample = ((uint32_t*)movState->audio.samples)[0];
514 uint32_t *q = (uint32_t*)samples + audio_size;
515 for(int i = 0;i < rem;i++)
516 *(q++) = sample;
518 else if(n == 8)
520 uint64_t sample = ((uint64_t*)movState->audio.samples)[0];
521 uint64_t *q = (uint64_t*)samples + audio_size;
522 for(int i = 0;i < rem;i++)
523 *(q++) = sample;
525 else
527 uint8_t *sample = movState->audio.samples;
528 uint8_t *q = samples + audio_size*n;
529 for(int i = 0;i < rem;i++)
531 memcpy(q, sample, n);
532 q += n;
537 movState->audio.samples_pos += rem;
538 movState->audio.current_pts += (double)rem / movState->audio.st->codec->sample_rate;
539 audio_size += rem;
542 return audio_size * movState->audio.frame_size;
545 static int audio_thread(void *userdata)
547 MovieState *movState = (MovieState*)userdata;
548 uint8_t *samples = NULL;
549 ALsizei buffer_len;
551 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
552 alGenSources(1, &movState->audio.source);
554 alSourcei(movState->audio.source, AL_SOURCE_RELATIVE, AL_TRUE);
555 alSourcei(movState->audio.source, AL_ROLLOFF_FACTOR, 0);
557 av_new_packet(&movState->audio.pkt, 0);
559 /* Find a suitable format for OpenAL. Currently does not handle surround
560 * sound (everything non-mono becomes stereo). */
561 if(movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8 ||
562 movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8P)
564 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_U8;
565 movState->audio.frame_size = 1;
566 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
568 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
569 movState->audio.frame_size *= 1;
570 movState->audio.format = AL_FORMAT_MONO8;
572 else
574 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
575 movState->audio.frame_size *= 2;
576 movState->audio.format = AL_FORMAT_STEREO8;
579 else if((movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLT ||
580 movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
581 alIsExtensionPresent("AL_EXT_FLOAT32"))
583 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_FLT;
584 movState->audio.frame_size = 4;
585 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
587 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
588 movState->audio.frame_size *= 1;
589 movState->audio.format = AL_FORMAT_MONO_FLOAT32;
591 else
593 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
594 movState->audio.frame_size *= 2;
595 movState->audio.format = AL_FORMAT_STEREO_FLOAT32;
598 else
600 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_S16;
601 movState->audio.frame_size = 2;
602 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
604 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
605 movState->audio.frame_size *= 1;
606 movState->audio.format = AL_FORMAT_MONO16;
608 else
610 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
611 movState->audio.frame_size *= 2;
612 movState->audio.format = AL_FORMAT_STEREO16;
615 buffer_len = AUDIO_BUFFER_TIME * movState->audio.st->codec->sample_rate / 1000 *
616 movState->audio.frame_size;
617 samples = av_malloc(buffer_len);
619 movState->audio.samples = NULL;
620 movState->audio.samples_max = 0;
621 movState->audio.samples_pos = 0;
622 movState->audio.samples_len = 0;
624 if(!(movState->audio.decoded_aframe=av_frame_alloc()))
626 fprintf(stderr, "Failed to allocate audio frame\n");
627 goto finish;
630 movState->audio.swres_ctx = swr_alloc_set_opts(NULL,
631 movState->audio.dst_ch_layout,
632 movState->audio.dst_sample_fmt,
633 movState->audio.st->codec->sample_rate,
634 movState->audio.st->codec->channel_layout,
635 movState->audio.st->codec->sample_fmt,
636 movState->audio.st->codec->sample_rate,
637 0, NULL
639 if(!movState->audio.swres_ctx || swr_init(movState->audio.swres_ctx) != 0)
641 fprintf(stderr, "Failed to initialize audio converter\n");
642 goto finish;
645 almtx_lock(&movState->audio.src_mutex);
646 while(alGetError() == AL_NO_ERROR && !movState->quit)
648 /* First remove any processed buffers. */
649 ALint processed;
650 alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
651 alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
653 /* Refill the buffer queue. */
654 ALint queued;
655 alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
656 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
658 int audio_size;
660 /* Read the next chunk of data, fill the buffer, and queue it on
661 * the source */
662 audio_size = read_audio(movState, samples, buffer_len);
663 if(audio_size < 0) break;
665 ALuint bufid = movState->audio.buffer[movState->audio.buffer_idx++];
666 movState->audio.buffer_idx %= AUDIO_BUFFER_QUEUE_SIZE;
668 alBufferData(bufid, movState->audio.format, samples, audio_size,
669 movState->audio.st->codec->sample_rate);
670 alSourceQueueBuffers(movState->audio.source, 1, &bufid);
671 queued++;
674 /* Check that the source is playing. */
675 ALint state;
676 alGetSourcei(movState->audio.source, AL_SOURCE_STATE, &state);
677 if(state == AL_STOPPED)
679 /* AL_STOPPED means there was an underrun. Double-check that all
680 * processed buffers are removed, then rewind the source to get it
681 * back into an AL_INITIAL state. */
682 alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
683 alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
684 alSourceRewind(movState->audio.source);
685 continue;
688 almtx_unlock(&movState->audio.src_mutex);
690 /* (re)start the source if needed, and wait for a buffer to finish */
691 if(state != AL_PLAYING && state != AL_PAUSED)
693 alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
694 if(queued > 0) alSourcePlay(movState->audio.source);
696 SDL_Delay(AUDIO_BUFFER_TIME);
698 almtx_lock(&movState->audio.src_mutex);
700 almtx_unlock(&movState->audio.src_mutex);
702 finish:
703 av_frame_free(&movState->audio.decoded_aframe);
704 swr_free(&movState->audio.swres_ctx);
706 av_freep(&samples);
707 av_freep(&movState->audio.samples);
709 alDeleteSources(1, &movState->audio.source);
710 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
712 return 0;
716 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
718 (void)interval;
720 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_REFRESH_EVENT, .data1=opaque} });
721 return 0; /* 0 means stop timer */
724 /* Schedule a video refresh in 'delay' ms */
725 static void schedule_refresh(MovieState *movState, int delay)
727 SDL_AddTimer(delay, sdl_refresh_timer_cb, movState);
730 static void video_display(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
732 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
734 if(!vp->bmp)
735 return;
737 float aspect_ratio;
738 int win_w, win_h;
739 int w, h, x, y;
741 if(movState->video.st->codec->sample_aspect_ratio.num == 0)
742 aspect_ratio = 0.0f;
743 else
745 aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio) *
746 movState->video.st->codec->width /
747 movState->video.st->codec->height;
749 if(aspect_ratio <= 0.0f)
751 aspect_ratio = (float)movState->video.st->codec->width /
752 (float)movState->video.st->codec->height;
755 SDL_GetWindowSize(screen, &win_w, &win_h);
756 h = win_h;
757 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
758 if(w > win_w)
760 w = win_w;
761 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
763 x = (win_w - w) / 2;
764 y = (win_h - h) / 2;
766 SDL_RenderCopy(renderer, vp->bmp,
767 &(SDL_Rect){ .x=0, .y=0, .w=vp->width, .h=vp->height },
768 &(SDL_Rect){ .x=x, .y=y, .w=w, .h=h }
770 SDL_RenderPresent(renderer);
773 static void video_refresh_timer(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
775 if(!movState->video.st)
777 schedule_refresh(movState, 100);
778 return;
781 almtx_lock(&movState->video.pictq_mutex);
782 retry:
783 if(movState->video.pictq_size == 0)
784 schedule_refresh(movState, 1);
785 else
787 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
788 double actual_delay, delay, sync_threshold, ref_clock, diff;
790 movState->video.current_pts = vp->pts;
791 movState->video.current_pts_time = av_gettime();
793 delay = vp->pts - movState->video.frame_last_pts; /* the pts from last time */
794 if(delay <= 0 || delay >= 1.0)
796 /* if incorrect delay, use previous one */
797 delay = movState->video.frame_last_delay;
799 /* save for next time */
800 movState->video.frame_last_delay = delay;
801 movState->video.frame_last_pts = vp->pts;
803 /* Update delay to sync to clock if not master source. */
804 if(movState->av_sync_type != AV_SYNC_VIDEO_MASTER)
806 ref_clock = get_master_clock(movState);
807 diff = vp->pts - ref_clock;
809 /* Skip or repeat the frame. Take delay into account. */
810 sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
811 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
813 if(diff <= -sync_threshold)
814 delay = 0;
815 else if(diff >= sync_threshold)
816 delay = 2 * delay;
820 movState->video.frame_timer += delay;
821 /* Compute the REAL delay. */
822 actual_delay = movState->video.frame_timer - (av_gettime() / 1000000.0);
823 if(!(actual_delay >= 0.010))
825 /* We don't have time to handle this picture, just skip to the next one. */
826 movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
827 movState->video.pictq_size--;
828 alcnd_signal(&movState->video.pictq_cond);
829 goto retry;
831 schedule_refresh(movState, (int)(actual_delay*1000.0 + 0.5));
833 /* Show the picture! */
834 video_display(movState, screen, renderer);
836 /* Update queue for next picture. */
837 movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
838 movState->video.pictq_size--;
839 alcnd_signal(&movState->video.pictq_cond);
841 almtx_unlock(&movState->video.pictq_mutex);
845 static void update_picture(MovieState *movState, bool *first_update, SDL_Window *screen, SDL_Renderer *renderer)
847 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
849 /* allocate or resize the buffer! */
850 if(!vp->bmp || vp->width != movState->video.st->codec->width ||
851 vp->height != movState->video.st->codec->height)
853 if(vp->bmp)
854 SDL_DestroyTexture(vp->bmp);
855 vp->bmp = SDL_CreateTexture(
856 renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
857 movState->video.st->codec->coded_width, movState->video.st->codec->coded_height
859 if(!vp->bmp)
860 fprintf(stderr, "Failed to create YV12 texture!\n");
861 vp->width = movState->video.st->codec->width;
862 vp->height = movState->video.st->codec->height;
864 if(*first_update && vp->width > 0 && vp->height > 0)
866 /* For the first update, set the window size to the video size. */
867 *first_update = false;
869 int w = vp->width;
870 int h = vp->height;
871 if(movState->video.st->codec->sample_aspect_ratio.num != 0 &&
872 movState->video.st->codec->sample_aspect_ratio.den != 0)
874 double aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio);
875 if(aspect_ratio >= 1.0)
876 w = (int)(w*aspect_ratio + 0.5);
877 else if(aspect_ratio > 0.0)
878 h = (int)(h/aspect_ratio + 0.5);
880 SDL_SetWindowSize(screen, w, h);
884 if(vp->bmp)
886 AVFrame *frame = movState->video.decoded_vframe;
887 void *pixels = NULL;
888 int pitch = 0;
890 if(movState->video.st->codec->pix_fmt == PIX_FMT_YUV420P)
891 SDL_UpdateYUVTexture(vp->bmp, NULL,
892 frame->data[0], frame->linesize[0],
893 frame->data[1], frame->linesize[1],
894 frame->data[2], frame->linesize[2]
896 else if(SDL_LockTexture(vp->bmp, NULL, &pixels, &pitch) != 0)
897 fprintf(stderr, "Failed to lock texture\n");
898 else
900 // Convert the image into YUV format that SDL uses
901 int coded_w = movState->video.st->codec->coded_width;
902 int coded_h = movState->video.st->codec->coded_height;
903 int w = movState->video.st->codec->width;
904 int h = movState->video.st->codec->height;
905 if(!movState->video.swscale_ctx)
906 movState->video.swscale_ctx = sws_getContext(
907 w, h, movState->video.st->codec->pix_fmt,
908 w, h, PIX_FMT_YUV420P, SWS_X, NULL, NULL, NULL
911 /* point pict at the queue */
912 AVPicture pict;
913 pict.data[0] = pixels;
914 pict.data[2] = pict.data[0] + coded_w*coded_h;
915 pict.data[1] = pict.data[2] + coded_w*coded_h/4;
917 pict.linesize[0] = pitch;
918 pict.linesize[2] = pitch / 2;
919 pict.linesize[1] = pitch / 2;
921 sws_scale(movState->video.swscale_ctx, (const uint8_t**)frame->data,
922 frame->linesize, 0, h, pict.data, pict.linesize);
923 SDL_UnlockTexture(vp->bmp);
927 almtx_lock(&movState->video.pictq_mutex);
928 vp->updated = true;
929 almtx_unlock(&movState->video.pictq_mutex);
930 alcnd_signal(&movState->video.pictq_cond);
933 static int queue_picture(MovieState *movState, double pts)
935 /* Wait until we have space for a new pic */
936 almtx_lock(&movState->video.pictq_mutex);
937 while(movState->video.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !movState->quit)
938 alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
939 almtx_unlock(&movState->video.pictq_mutex);
941 if(movState->quit)
942 return -1;
944 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
946 /* We have to create/update the picture in the main thread */
947 vp->updated = false;
948 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_UPDATE_EVENT, .data1=movState} });
950 /* Wait until the picture is updated. */
951 almtx_lock(&movState->video.pictq_mutex);
952 while(!vp->updated && !movState->quit)
953 alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
954 almtx_unlock(&movState->video.pictq_mutex);
955 if(movState->quit)
956 return -1;
957 vp->pts = pts;
959 movState->video.pictq_windex = (movState->video.pictq_windex+1)%VIDEO_PICTURE_QUEUE_SIZE;
960 almtx_lock(&movState->video.pictq_mutex);
961 movState->video.pictq_size++;
962 almtx_unlock(&movState->video.pictq_mutex);
964 return 0;
967 static double synchronize_video(MovieState *movState, double pts)
969 double frame_delay;
971 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
972 pts = movState->video.clock;
973 else /* if we have pts, set video clock to it */
974 movState->video.clock = pts;
976 /* update the video clock */
977 frame_delay = av_q2d(movState->video.st->codec->time_base);
978 /* if we are repeating a frame, adjust clock accordingly */
979 frame_delay += movState->video.decoded_vframe->repeat_pict * (frame_delay * 0.5);
980 movState->video.clock += frame_delay;
981 return pts;
984 int video_thread(void *arg)
986 MovieState *movState = (MovieState*)arg;
987 AVPacket *packet = (AVPacket[1]){};
988 int64_t saved_pts, pkt_pts;
989 int frameFinished;
991 movState->video.decoded_vframe = av_frame_alloc();
992 while(packet_queue_get(&movState->video.q, packet, movState) >= 0)
994 if(packet->data == flush_pkt.data)
996 avcodec_flush_buffers(movState->video.st->codec);
998 almtx_lock(&movState->video.pictq_mutex);
999 movState->video.pictq_size = 0;
1000 movState->video.pictq_rindex = 0;
1001 movState->video.pictq_windex = 0;
1002 almtx_unlock(&movState->video.pictq_mutex);
1004 movState->video.clock = av_q2d(movState->video.st->time_base)*packet->pts;
1005 movState->video.current_pts = movState->video.clock;
1006 movState->video.current_pts_time = av_gettime();
1007 continue;
1010 pkt_pts = packet->pts;
1012 /* Decode video frame */
1013 avcodec_decode_video2(movState->video.st->codec, movState->video.decoded_vframe,
1014 &frameFinished, packet);
1015 if(pkt_pts != AV_NOPTS_VALUE && !movState->video.decoded_vframe->opaque)
1017 /* Store the packet's original pts in the frame, in case the frame
1018 * is not finished decoding yet. */
1019 saved_pts = pkt_pts;
1020 movState->video.decoded_vframe->opaque = &saved_pts;
1023 av_free_packet(packet);
1025 if(frameFinished)
1027 double pts = av_q2d(movState->video.st->time_base);
1028 if(packet->dts != AV_NOPTS_VALUE)
1029 pts *= packet->dts;
1030 else if(movState->video.decoded_vframe->opaque)
1031 pts *= *(int64_t*)movState->video.decoded_vframe->opaque;
1032 else
1033 pts *= 0.0;
1034 movState->video.decoded_vframe->opaque = NULL;
1036 pts = synchronize_video(movState, pts);
1037 if(queue_picture(movState, pts) < 0)
1038 break;
1042 sws_freeContext(movState->video.swscale_ctx);
1043 movState->video.swscale_ctx = NULL;
1044 av_frame_free(&movState->video.decoded_vframe);
1045 return 0;
1049 static int stream_component_open(MovieState *movState, int stream_index)
1051 AVFormatContext *pFormatCtx = movState->pFormatCtx;
1052 AVCodecContext *codecCtx;
1053 AVCodec *codec;
1055 if(stream_index < 0 || (unsigned int)stream_index >= pFormatCtx->nb_streams)
1056 return -1;
1058 /* Get a pointer to the codec context for the video stream, and open the
1059 * associated codec */
1060 codecCtx = pFormatCtx->streams[stream_index]->codec;
1062 codec = avcodec_find_decoder(codecCtx->codec_id);
1063 if(!codec || avcodec_open2(codecCtx, codec, NULL) < 0)
1065 fprintf(stderr, "Unsupported codec!\n");
1066 return -1;
1069 /* Initialize and start the media type handler */
1070 switch(codecCtx->codec_type)
1072 case AVMEDIA_TYPE_AUDIO:
1073 movState->audioStream = stream_index;
1074 movState->audio.st = pFormatCtx->streams[stream_index];
1076 /* Averaging filter for audio sync */
1077 movState->audio.diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1078 /* Correct audio only if larger error than this */
1079 movState->audio.diff_threshold = 2.0 * 0.050/* 50 ms */;
1081 memset(&movState->audio.pkt, 0, sizeof(movState->audio.pkt));
1082 if(althrd_create(&movState->audio.thread, audio_thread, movState) != althrd_success)
1084 movState->audioStream = -1;
1085 movState->audio.st = NULL;
1087 break;
1089 case AVMEDIA_TYPE_VIDEO:
1090 movState->videoStream = stream_index;
1091 movState->video.st = pFormatCtx->streams[stream_index];
1093 movState->video.current_pts_time = av_gettime();
1094 movState->video.frame_timer = (double)movState->video.current_pts_time /
1095 1000000.0;
1096 movState->video.frame_last_delay = 40e-3;
1098 if(althrd_create(&movState->video.thread, video_thread, movState) != althrd_success)
1100 movState->videoStream = -1;
1101 movState->video.st = NULL;
1103 break;
1105 default:
1106 break;
1109 return 0;
1112 static int decode_interrupt_cb(void *ctx)
1114 return ((MovieState*)ctx)->quit;
1117 int decode_thread(void *arg)
1119 MovieState *movState = (MovieState *)arg;
1120 AVFormatContext *fmtCtx = movState->pFormatCtx;
1121 AVPacket *packet = (AVPacket[1]){};
1122 int video_index = -1;
1123 int audio_index = -1;
1125 movState->videoStream = -1;
1126 movState->audioStream = -1;
1128 /* Dump information about file onto standard error */
1129 av_dump_format(fmtCtx, 0, movState->filename, 0);
1131 /* Find the first video and audio streams */
1132 for(unsigned int i = 0;i < fmtCtx->nb_streams;i++)
1134 if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1135 video_index = i;
1136 else if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1137 audio_index = i;
1139 movState->external_clock_base = av_gettime();
1140 if(audio_index >= 0)
1141 stream_component_open(movState, audio_index);
1142 if(video_index >= 0)
1143 stream_component_open(movState, video_index);
1145 if(movState->videoStream < 0 && movState->audioStream < 0)
1147 fprintf(stderr, "%s: could not open codecs\n", movState->filename);
1148 goto fail;
1151 /* Main packet handling loop */
1152 while(!movState->quit)
1154 if(movState->seek_req)
1156 int64_t seek_target = movState->seek_pos;
1157 int stream_index= -1;
1159 /* Prefer seeking on the video stream. */
1160 if(movState->videoStream >= 0)
1161 stream_index = movState->videoStream;
1162 else if(movState->audioStream >= 0)
1163 stream_index = movState->audioStream;
1165 /* Get a seek timestamp for the appropriate stream. */
1166 int64_t timestamp = seek_target;
1167 if(stream_index >= 0)
1168 timestamp = av_rescale_q(seek_target, AV_TIME_BASE_Q, fmtCtx->streams[stream_index]->time_base);
1170 if(av_seek_frame(movState->pFormatCtx, stream_index, timestamp, 0) < 0)
1171 fprintf(stderr, "%s: error while seeking\n", movState->pFormatCtx->filename);
1172 else
1174 /* Seek successful, clear the packet queues and send a special
1175 * 'flush' packet with the new stream clock time. */
1176 if(movState->audioStream >= 0)
1178 packet_queue_clear(&movState->audio.q);
1179 flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
1180 fmtCtx->streams[movState->audioStream]->time_base
1182 packet_queue_put(&movState->audio.q, &flush_pkt);
1184 if(movState->videoStream >= 0)
1186 packet_queue_clear(&movState->video.q);
1187 flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
1188 fmtCtx->streams[movState->videoStream]->time_base
1190 packet_queue_put(&movState->video.q, &flush_pkt);
1192 movState->external_clock_base = av_gettime() - seek_target;
1194 movState->seek_req = false;
1197 if(movState->audio.q.size >= MAX_AUDIOQ_SIZE ||
1198 movState->video.q.size >= MAX_VIDEOQ_SIZE)
1200 SDL_Delay(10);
1201 continue;
1204 if(av_read_frame(movState->pFormatCtx, packet) < 0)
1206 packet_queue_flush(&movState->video.q);
1207 packet_queue_flush(&movState->audio.q);
1208 break;
1211 /* Place the packet in the queue it's meant for, or discard it. */
1212 if(packet->stream_index == movState->videoStream)
1213 packet_queue_put(&movState->video.q, packet);
1214 else if(packet->stream_index == movState->audioStream)
1215 packet_queue_put(&movState->audio.q, packet);
1216 else
1217 av_free_packet(packet);
1220 /* all done - wait for it */
1221 while(!movState->quit)
1223 if(movState->audio.q.nb_packets == 0 && movState->video.q.nb_packets == 0)
1224 break;
1225 SDL_Delay(100);
1228 fail:
1229 movState->quit = true;
1230 packet_queue_flush(&movState->video.q);
1231 packet_queue_flush(&movState->audio.q);
1233 if(movState->videoStream >= 0)
1234 althrd_join(movState->video.thread, NULL);
1235 if(movState->audioStream >= 0)
1236 althrd_join(movState->audio.thread, NULL);
1238 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_QUIT_EVENT, .data1=movState} });
1240 return 0;
1244 static void stream_seek(MovieState *movState, double incr)
1246 if(!movState->seek_req)
1248 double newtime = get_master_clock(movState)+incr;
1249 if(newtime <= 0.0) movState->seek_pos = 0;
1250 else movState->seek_pos = (int64_t)(newtime * AV_TIME_BASE);
1251 movState->seek_req = true;
1255 int main(int argc, char *argv[])
1257 SDL_Event event;
1258 MovieState *movState;
1259 bool first_update = true;
1260 SDL_Window *screen;
1261 SDL_Renderer *renderer;
1262 ALCdevice *device;
1263 ALCcontext *context;
1265 if(argc < 2)
1267 fprintf(stderr, "Usage: %s <file>\n", argv[0]);
1268 return 1;
1270 /* Register all formats and codecs */
1271 av_register_all();
1272 /* Initialize networking protocols */
1273 avformat_network_init();
1275 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1277 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
1278 return 1;
1281 /* Make a window to put our video */
1282 screen = SDL_CreateWindow("alffplay", 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1283 if(!screen)
1285 fprintf(stderr, "SDL: could not set video mode - exiting\n");
1286 return 1;
1288 /* Make a renderer to handle the texture image surface and rendering. */
1289 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1290 if(renderer)
1292 SDL_RendererInfo rinf;
1293 bool ok = false;
1295 /* Make sure the renderer supports YV12 textures. If not, fallback to a
1296 * software renderer. */
1297 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1299 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1300 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_YV12);
1302 if(!ok)
1304 fprintf(stderr, "YV12 pixelformat textures not supported on renderer %s\n", rinf.name);
1305 SDL_DestroyRenderer(renderer);
1306 renderer = NULL;
1309 if(!renderer)
1310 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1311 if(!renderer)
1313 fprintf(stderr, "SDL: could not create renderer - exiting\n");
1314 return 1;
1316 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1317 SDL_RenderFillRect(renderer, NULL);
1318 SDL_RenderPresent(renderer);
1320 /* Open an audio device */
1321 device = alcOpenDevice(NULL);
1322 if(!device)
1324 fprintf(stderr, "OpenAL: could not open device - exiting\n");
1325 return 1;
1327 context = alcCreateContext(device, NULL);
1328 if(!context)
1330 fprintf(stderr, "OpenAL: could not create context - exiting\n");
1331 return 1;
1333 if(alcMakeContextCurrent(context) == ALC_FALSE)
1335 fprintf(stderr, "OpenAL: could not make context current - exiting\n");
1336 return 1;
1339 if(!alIsExtensionPresent("AL_SOFT_source_length"))
1341 fprintf(stderr, "Required AL_SOFT_source_length not supported - exiting\n");
1342 return 1;
1345 if(!alIsExtensionPresent("AL_SOFT_source_latency"))
1346 fprintf(stderr, "AL_SOFT_source_latency not supported, audio may be a bit laggy.\n");
1347 else
1349 alGetSourcedvSOFT = alGetProcAddress("alGetSourcedvSOFT");
1350 has_latency_check = true;
1354 movState = av_mallocz(sizeof(MovieState));
1356 av_strlcpy(movState->filename, argv[1], sizeof(movState->filename));
1358 packet_queue_init(&movState->audio.q);
1359 packet_queue_init(&movState->video.q);
1361 almtx_init(&movState->video.pictq_mutex, almtx_plain);
1362 alcnd_init(&movState->video.pictq_cond);
1363 almtx_init(&movState->audio.src_mutex, almtx_recursive);
1365 movState->av_sync_type = DEFAULT_AV_SYNC_TYPE;
1367 movState->pFormatCtx = avformat_alloc_context();
1368 movState->pFormatCtx->interrupt_callback = (AVIOInterruptCB){.callback=decode_interrupt_cb, .opaque=movState};
1370 if(avio_open2(&movState->pFormatCtx->pb, movState->filename, AVIO_FLAG_READ,
1371 &movState->pFormatCtx->interrupt_callback, NULL))
1373 fprintf(stderr, "Failed to open %s\n", movState->filename);
1374 return 1;
1377 /* Open movie file */
1378 if(avformat_open_input(&movState->pFormatCtx, movState->filename, NULL, NULL) != 0)
1380 fprintf(stderr, "Failed to open %s\n", movState->filename);
1381 return 1;
1384 /* Retrieve stream information */
1385 if(avformat_find_stream_info(movState->pFormatCtx, NULL) < 0)
1387 fprintf(stderr, "%s: failed to find stream info\n", movState->filename);
1388 return 1;
1391 schedule_refresh(movState, 40);
1394 if(althrd_create(&movState->parse_thread, decode_thread, movState) != althrd_success)
1396 fprintf(stderr, "Failed to create parse thread!\n");
1397 return 1;
1399 while(SDL_WaitEvent(&event) == 1)
1401 switch(event.type)
1403 case SDL_KEYDOWN:
1404 switch(event.key.keysym.sym)
1406 case SDLK_ESCAPE:
1407 movState->quit = true;
1408 break;
1410 case SDLK_LEFT:
1411 stream_seek(movState, -10.0);
1412 break;
1413 case SDLK_RIGHT:
1414 stream_seek(movState, 10.0);
1415 break;
1416 case SDLK_UP:
1417 stream_seek(movState, 30.0);
1418 break;
1419 case SDLK_DOWN:
1420 stream_seek(movState, -30.0);
1421 break;
1423 default:
1424 break;
1426 break;
1428 case SDL_WINDOWEVENT:
1429 switch(event.window.event)
1431 case SDL_WINDOWEVENT_RESIZED:
1432 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1433 SDL_RenderFillRect(renderer, NULL);
1434 break;
1436 default:
1437 break;
1439 break;
1441 case SDL_QUIT:
1442 movState->quit = true;
1443 break;
1445 case FF_UPDATE_EVENT:
1446 update_picture(event.user.data1, &first_update, screen, renderer);
1447 break;
1449 case FF_REFRESH_EVENT:
1450 video_refresh_timer(event.user.data1, screen, renderer);
1451 break;
1453 case FF_QUIT_EVENT:
1454 althrd_join(movState->parse_thread, NULL);
1456 avformat_close_input(&movState->pFormatCtx);
1458 almtx_destroy(&movState->audio.src_mutex);
1459 almtx_destroy(&movState->video.pictq_mutex);
1460 alcnd_destroy(&movState->video.pictq_cond);
1461 packet_queue_deinit(&movState->video.q);
1462 packet_queue_deinit(&movState->audio.q);
1464 alcMakeContextCurrent(NULL);
1465 alcDestroyContext(context);
1466 alcCloseDevice(device);
1468 SDL_Quit();
1469 exit(0);
1471 default:
1472 break;
1476 fprintf(stderr, "SDL_WaitEvent error - %s\n", SDL_GetError());
1477 return 1;