Avoid tracing wide-char strings
[openal-soft.git] / examples / alffplay.c
blob17f6d3bc4292dbe2a4cb9977c402a13f13c9eecb
1 /*
2 * alffplay.c
4 * A pedagogical video player that really works! Now with seeking features.
6 * Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, and a tutorial by
7 * Martin Bohme <boehme@inb.uni-luebeckREMOVETHIS.de>.
9 * Requires C99.
12 #include <stdio.h>
13 #include <math.h>
15 #include <libavcodec/avcodec.h>
16 #include <libavformat/avformat.h>
17 #include <libavformat/avio.h>
18 #include <libavutil/time.h>
19 #include <libavutil/avstring.h>
20 #include <libavutil/channel_layout.h>
21 #include <libswscale/swscale.h>
22 #include <libswresample/swresample.h>
24 #include <SDL.h>
25 #include <SDL_thread.h>
26 #include <SDL_video.h>
28 #include "threads.h"
29 #include "bool.h"
31 #include "AL/al.h"
32 #include "AL/alc.h"
33 #include "AL/alext.h"
36 static bool has_latency_check = false;
37 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
39 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
40 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
41 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024) /* Bytes of compressed audio data to keep queued */
42 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024) /* Bytes of compressed video data to keep queued */
43 #define AV_SYNC_THRESHOLD 0.01
44 #define AV_NOSYNC_THRESHOLD 10.0
45 #define SAMPLE_CORRECTION_MAX_DIFF 0.1
46 #define AUDIO_DIFF_AVG_NB 20
47 #define VIDEO_PICTURE_QUEUE_SIZE 16
49 enum {
50 FF_UPDATE_EVENT = SDL_USEREVENT,
51 FF_REFRESH_EVENT,
52 FF_QUIT_EVENT
56 typedef struct PacketQueue {
57 AVPacketList *first_pkt, *last_pkt;
58 volatile int nb_packets;
59 volatile int size;
60 volatile bool flushing;
61 almtx_t mutex;
62 alcnd_t cond;
63 } PacketQueue;
65 typedef struct VideoPicture {
66 SDL_Texture *bmp;
67 int width, height; /* Logical image size (actual size may be larger) */
68 volatile bool updated;
69 double pts;
70 } VideoPicture;
72 typedef struct AudioState {
73 AVStream *st;
75 PacketQueue q;
76 AVPacket pkt;
78 /* Used for clock difference average computation */
79 double diff_accum;
80 double diff_avg_coef;
81 double diff_threshold;
83 /* Time (in seconds) of the next sample to be buffered */
84 double current_pts;
86 /* Decompressed sample frame, and swresample context for conversion */
87 AVFrame *decoded_aframe;
88 struct SwrContext *swres_ctx;
90 /* Conversion format, for what gets fed to OpenAL */
91 int dst_ch_layout;
92 enum AVSampleFormat dst_sample_fmt;
94 /* Storage of converted samples */
95 uint8_t *samples;
96 ssize_t samples_len; /* In samples */
97 ssize_t samples_pos;
98 int samples_max;
100 /* OpenAL format */
101 ALenum format;
102 ALint frame_size;
104 ALuint source;
105 ALuint buffer[AUDIO_BUFFER_QUEUE_SIZE];
106 ALuint buffer_idx;
107 almtx_t src_mutex;
109 althrd_t thread;
110 } AudioState;
112 typedef struct VideoState {
113 AVStream *st;
115 PacketQueue q;
117 double clock;
118 double frame_timer;
119 double frame_last_pts;
120 double frame_last_delay;
121 double current_pts;
122 /* time (av_gettime) at which we updated current_pts - used to have running video pts */
123 int64_t current_pts_time;
125 /* Decompressed video frame, and swscale context for conversion */
126 AVFrame *decoded_vframe;
127 struct SwsContext *swscale_ctx;
129 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
130 int pictq_size, pictq_rindex, pictq_windex;
131 almtx_t pictq_mutex;
132 alcnd_t pictq_cond;
134 althrd_t thread;
135 } VideoState;
137 typedef struct MovieState {
138 AVFormatContext *pFormatCtx;
139 int videoStream, audioStream;
141 volatile bool seek_req;
142 int64_t seek_pos;
144 int av_sync_type;
146 int64_t external_clock_base;
148 AudioState audio;
149 VideoState video;
151 althrd_t parse_thread;
153 char filename[1024];
155 volatile bool quit;
156 } MovieState;
158 enum {
159 AV_SYNC_AUDIO_MASTER,
160 AV_SYNC_VIDEO_MASTER,
161 AV_SYNC_EXTERNAL_MASTER,
163 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
166 static AVPacket flush_pkt = { .data = (uint8_t*)"FLUSH" };
168 static void packet_queue_init(PacketQueue *q)
170 memset(q, 0, sizeof(PacketQueue));
171 almtx_init(&q->mutex, almtx_plain);
172 alcnd_init(&q->cond);
174 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
176 AVPacketList *pkt1;
177 if(pkt != &flush_pkt && !pkt->buf && av_dup_packet(pkt) < 0)
178 return -1;
180 pkt1 = av_malloc(sizeof(AVPacketList));
181 if(!pkt1) return -1;
182 pkt1->pkt = *pkt;
183 pkt1->next = NULL;
185 almtx_lock(&q->mutex);
186 if(!q->last_pkt)
187 q->first_pkt = pkt1;
188 else
189 q->last_pkt->next = pkt1;
190 q->last_pkt = pkt1;
191 q->nb_packets++;
192 q->size += pkt1->pkt.size;
193 almtx_unlock(&q->mutex);
195 alcnd_signal(&q->cond);
196 return 0;
198 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, MovieState *state)
200 AVPacketList *pkt1;
201 int ret = -1;
203 almtx_lock(&q->mutex);
204 while(!state->quit)
206 pkt1 = q->first_pkt;
207 if(pkt1)
209 q->first_pkt = pkt1->next;
210 if(!q->first_pkt)
211 q->last_pkt = NULL;
212 q->nb_packets--;
213 q->size -= pkt1->pkt.size;
214 *pkt = pkt1->pkt;
215 av_free(pkt1);
216 ret = 1;
217 break;
220 if(q->flushing)
222 ret = 0;
223 break;
225 alcnd_wait(&q->cond, &q->mutex);
227 almtx_unlock(&q->mutex);
228 return ret;
230 static void packet_queue_clear(PacketQueue *q)
232 AVPacketList *pkt, *pkt1;
234 almtx_lock(&q->mutex);
235 for(pkt = q->first_pkt;pkt != NULL;pkt = pkt1)
237 pkt1 = pkt->next;
238 if(pkt->pkt.data != flush_pkt.data)
239 av_free_packet(&pkt->pkt);
240 av_freep(&pkt);
242 q->last_pkt = NULL;
243 q->first_pkt = NULL;
244 q->nb_packets = 0;
245 q->size = 0;
246 almtx_unlock(&q->mutex);
248 static void packet_queue_flush(PacketQueue *q)
250 almtx_lock(&q->mutex);
251 q->flushing = true;
252 almtx_unlock(&q->mutex);
253 alcnd_signal(&q->cond);
255 static void packet_queue_deinit(PacketQueue *q)
257 packet_queue_clear(q);
258 alcnd_destroy(&q->cond);
259 almtx_destroy(&q->mutex);
263 static double get_audio_clock(AudioState *state)
265 double pts;
267 almtx_lock(&state->src_mutex);
268 /* The audio clock is the timestamp of the sample currently being heard.
269 * It's based on 4 components:
270 * 1 - The timestamp of the next sample to buffer (state->current_pts)
271 * 2 - The length of the source's buffer queue (AL_SEC_LENGTH_SOFT)
272 * 3 - The offset OpenAL is currently at in the source (the first value
273 * from AL_SEC_OFFSET_LATENCY_SOFT)
274 * 4 - The latency between OpenAL and the DAC (the second value from
275 * AL_SEC_OFFSET_LATENCY_SOFT)
277 * Subtracting the length of the source queue from the next sample's
278 * timestamp gives the timestamp of the sample at start of the source
279 * queue. Adding the source offset to that results in the timestamp for
280 * OpenAL's current position, and subtracting the source latency from that
281 * gives the timestamp of the sample currently at the DAC.
283 pts = state->current_pts;
284 if(state->source)
286 ALdouble offset[2] = { 0.0, 0.0 };
287 ALdouble queue_len = 0.0;
288 ALint status;
290 /* NOTE: The source state must be checked last, in case an underrun
291 * occurs and the source stops between retrieving the offset+latency
292 * and getting the state. */
293 if(has_latency_check)
295 alGetSourcedvSOFT(state->source, AL_SEC_OFFSET_LATENCY_SOFT, offset);
296 alGetSourcedvSOFT(state->source, AL_SEC_LENGTH_SOFT, &queue_len);
298 else
300 ALint ioffset, ilen;
301 alGetSourcei(state->source, AL_SAMPLE_OFFSET, &ioffset);
302 alGetSourcei(state->source, AL_SAMPLE_LENGTH_SOFT, &ilen);
303 offset[0] = (double)ioffset / state->st->codec->sample_rate;
304 queue_len = (double)ilen / state->st->codec->sample_rate;
306 alGetSourcei(state->source, AL_SOURCE_STATE, &status);
308 /* If the source is AL_STOPPED, then there was an underrun and all
309 * buffers are processed, so ignore the source queue. The audio thread
310 * will put the source into an AL_INITIAL state and clear the queue
311 * when it starts recovery. */
312 if(status != AL_STOPPED)
313 pts = pts - queue_len + offset[0];
314 if(status == AL_PLAYING)
315 pts = pts - offset[1];
317 almtx_unlock(&state->src_mutex);
319 return (pts >= 0.0) ? pts : 0.0;
321 static double get_video_clock(VideoState *state)
323 double delta = (av_gettime() - state->current_pts_time) / 1000000.0;
324 return state->current_pts + delta;
326 static double get_external_clock(MovieState *movState)
328 return (av_gettime()-movState->external_clock_base) / 1000000.0;
331 double get_master_clock(MovieState *movState)
333 if(movState->av_sync_type == AV_SYNC_VIDEO_MASTER)
334 return get_video_clock(&movState->video);
335 if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
336 return get_audio_clock(&movState->audio);
337 return get_external_clock(movState);
340 /* Return how many samples to skip to maintain sync (negative means to
341 * duplicate samples). */
342 static int synchronize_audio(MovieState *movState)
344 double diff, avg_diff;
345 double ref_clock;
347 if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
348 return 0;
350 ref_clock = get_master_clock(movState);
351 diff = ref_clock - get_audio_clock(&movState->audio);
353 if(!(diff < AV_NOSYNC_THRESHOLD))
355 /* Difference is TOO big; reset diff stuff */
356 movState->audio.diff_accum = 0.0;
357 return 0;
360 /* Accumulate the diffs */
361 movState->audio.diff_accum = movState->audio.diff_accum*movState->audio.diff_avg_coef + diff;
362 avg_diff = movState->audio.diff_accum*(1.0 - movState->audio.diff_avg_coef);
363 if(fabs(avg_diff) < movState->audio.diff_threshold)
364 return 0;
366 /* Constrain the per-update difference to avoid exceedingly large skips */
367 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
368 diff = SAMPLE_CORRECTION_MAX_DIFF;
369 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
370 diff = -SAMPLE_CORRECTION_MAX_DIFF;
371 return (int)(diff*movState->audio.st->codec->sample_rate);
374 static int audio_decode_frame(MovieState *movState)
376 AVPacket *pkt = &movState->audio.pkt;
378 while(!movState->quit)
380 while(!movState->quit && pkt->size == 0)
382 av_free_packet(pkt);
384 /* Get the next packet */
385 int err;
386 if((err=packet_queue_get(&movState->audio.q, pkt, movState)) <= 0)
388 if(err == 0)
389 break;
390 return err;
392 if(pkt->data == flush_pkt.data)
394 avcodec_flush_buffers(movState->audio.st->codec);
395 movState->audio.diff_accum = 0.0;
396 movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
398 alSourceRewind(movState->audio.source);
399 alSourcei(movState->audio.source, AL_BUFFER, 0);
401 av_new_packet(pkt, 0);
403 return -1;
406 /* If provided, update w/ pts */
407 if(pkt->pts != AV_NOPTS_VALUE)
408 movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
411 AVFrame *frame = movState->audio.decoded_aframe;
412 int got_frame = 0;
413 int len1 = avcodec_decode_audio4(movState->audio.st->codec, frame,
414 &got_frame, pkt);
415 if(len1 < 0) break;
417 if(len1 <= pkt->size)
419 /* Move the unread data to the front and clear the end bits */
420 int remaining = pkt->size - len1;
421 memmove(pkt->data, &pkt->data[len1], remaining);
422 av_shrink_packet(pkt, remaining);
425 if(!got_frame || frame->nb_samples <= 0)
427 av_frame_unref(frame);
428 continue;
431 if(frame->nb_samples > movState->audio.samples_max)
433 av_freep(&movState->audio.samples);
434 av_samples_alloc(
435 &movState->audio.samples, NULL, movState->audio.st->codec->channels,
436 frame->nb_samples, movState->audio.dst_sample_fmt, 0
438 movState->audio.samples_max = frame->nb_samples;
440 /* Return the amount of sample frames converted */
441 int data_size = swr_convert(movState->audio.swres_ctx,
442 &movState->audio.samples, frame->nb_samples,
443 (const uint8_t**)frame->data, frame->nb_samples
446 av_frame_unref(frame);
447 return data_size;
450 return -1;
453 static int read_audio(MovieState *movState, uint8_t *samples, int length)
455 int sample_skip = synchronize_audio(movState);
456 int audio_size = 0;
458 /* Read the next chunk of data, refill the buffer, and queue it
459 * on the source */
460 length /= movState->audio.frame_size;
461 while(audio_size < length)
463 if(movState->audio.samples_len <= 0 || movState->audio.samples_pos >= movState->audio.samples_len)
465 int frame_len = audio_decode_frame(movState);
466 if(frame_len < 0) return -1;
468 movState->audio.samples_len = frame_len;
469 if(movState->audio.samples_len == 0)
470 break;
472 movState->audio.samples_pos = (movState->audio.samples_len < sample_skip) ?
473 movState->audio.samples_len : sample_skip;
474 sample_skip -= movState->audio.samples_pos;
476 movState->audio.current_pts += (double)movState->audio.samples_pos /
477 (double)movState->audio.st->codec->sample_rate;
478 continue;
481 int rem = length - audio_size;
482 if(movState->audio.samples_pos >= 0)
484 int n = movState->audio.frame_size;
485 int len = movState->audio.samples_len - movState->audio.samples_pos;
486 if(rem > len) rem = len;
487 memcpy(samples + audio_size*n,
488 movState->audio.samples + movState->audio.samples_pos*n,
489 rem*n);
491 else
493 int n = movState->audio.frame_size;
494 int len = -movState->audio.samples_pos;
495 if(rem > len) rem = len;
497 /* Add samples by copying the first sample */
498 if(n == 1)
500 uint8_t sample = ((uint8_t*)movState->audio.samples)[0];
501 uint8_t *q = (uint8_t*)samples + audio_size;
502 for(int i = 0;i < rem;i++)
503 *(q++) = sample;
505 else if(n == 2)
507 uint16_t sample = ((uint16_t*)movState->audio.samples)[0];
508 uint16_t *q = (uint16_t*)samples + audio_size;
509 for(int i = 0;i < rem;i++)
510 *(q++) = sample;
512 else if(n == 4)
514 uint32_t sample = ((uint32_t*)movState->audio.samples)[0];
515 uint32_t *q = (uint32_t*)samples + audio_size;
516 for(int i = 0;i < rem;i++)
517 *(q++) = sample;
519 else if(n == 8)
521 uint64_t sample = ((uint64_t*)movState->audio.samples)[0];
522 uint64_t *q = (uint64_t*)samples + audio_size;
523 for(int i = 0;i < rem;i++)
524 *(q++) = sample;
526 else
528 uint8_t *sample = movState->audio.samples;
529 uint8_t *q = samples + audio_size*n;
530 for(int i = 0;i < rem;i++)
532 memcpy(q, sample, n);
533 q += n;
538 movState->audio.samples_pos += rem;
539 movState->audio.current_pts += (double)rem / movState->audio.st->codec->sample_rate;
540 audio_size += rem;
543 return audio_size * movState->audio.frame_size;
546 static int audio_thread(void *userdata)
548 MovieState *movState = (MovieState*)userdata;
549 uint8_t *samples = NULL;
550 ALsizei buffer_len;
551 ALenum fmt;
553 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
554 alGenSources(1, &movState->audio.source);
556 alSourcei(movState->audio.source, AL_SOURCE_RELATIVE, AL_TRUE);
557 alSourcei(movState->audio.source, AL_ROLLOFF_FACTOR, 0);
559 av_new_packet(&movState->audio.pkt, 0);
561 /* Find a suitable format for OpenAL. */
562 movState->audio.format = AL_NONE;
563 if(movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8 ||
564 movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8P)
566 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_U8;
567 movState->audio.frame_size = 1;
568 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
569 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
570 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
572 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
573 movState->audio.frame_size *= 8;
574 movState->audio.format = fmt;
576 if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
577 movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
578 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
579 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
581 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
582 movState->audio.frame_size *= 6;
583 movState->audio.format = fmt;
585 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
587 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
588 movState->audio.frame_size *= 1;
589 movState->audio.format = AL_FORMAT_MONO8;
591 if(movState->audio.format == AL_NONE)
593 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
594 movState->audio.frame_size *= 2;
595 movState->audio.format = AL_FORMAT_STEREO8;
598 if((movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLT ||
599 movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
600 alIsExtensionPresent("AL_EXT_FLOAT32"))
602 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_FLT;
603 movState->audio.frame_size = 4;
604 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
605 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
606 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
608 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
609 movState->audio.frame_size *= 8;
610 movState->audio.format = fmt;
612 if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
613 movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
614 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
615 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
617 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
618 movState->audio.frame_size *= 6;
619 movState->audio.format = fmt;
621 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
623 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
624 movState->audio.frame_size *= 1;
625 movState->audio.format = AL_FORMAT_MONO_FLOAT32;
627 if(movState->audio.format == AL_NONE)
629 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
630 movState->audio.frame_size *= 2;
631 movState->audio.format = AL_FORMAT_STEREO_FLOAT32;
634 if(movState->audio.format == AL_NONE)
636 movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_S16;
637 movState->audio.frame_size = 2;
638 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
639 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
640 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
642 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
643 movState->audio.frame_size *= 8;
644 movState->audio.format = fmt;
646 if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
647 movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
648 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
649 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
651 movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
652 movState->audio.frame_size *= 6;
653 movState->audio.format = fmt;
655 if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
657 movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
658 movState->audio.frame_size *= 1;
659 movState->audio.format = AL_FORMAT_MONO16;
661 if(movState->audio.format == AL_NONE)
663 movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
664 movState->audio.frame_size *= 2;
665 movState->audio.format = AL_FORMAT_STEREO16;
668 buffer_len = AUDIO_BUFFER_TIME * movState->audio.st->codec->sample_rate / 1000 *
669 movState->audio.frame_size;
670 samples = av_malloc(buffer_len);
672 movState->audio.samples = NULL;
673 movState->audio.samples_max = 0;
674 movState->audio.samples_pos = 0;
675 movState->audio.samples_len = 0;
677 if(!(movState->audio.decoded_aframe=av_frame_alloc()))
679 fprintf(stderr, "Failed to allocate audio frame\n");
680 goto finish;
683 movState->audio.swres_ctx = swr_alloc_set_opts(NULL,
684 movState->audio.dst_ch_layout,
685 movState->audio.dst_sample_fmt,
686 movState->audio.st->codec->sample_rate,
687 movState->audio.st->codec->channel_layout ?
688 movState->audio.st->codec->channel_layout :
689 av_get_default_channel_layout(movState->audio.st->codec->channels),
690 movState->audio.st->codec->sample_fmt,
691 movState->audio.st->codec->sample_rate,
692 0, NULL
694 if(!movState->audio.swres_ctx || swr_init(movState->audio.swres_ctx) != 0)
696 fprintf(stderr, "Failed to initialize audio converter\n");
697 goto finish;
700 almtx_lock(&movState->audio.src_mutex);
701 while(alGetError() == AL_NO_ERROR && !movState->quit)
703 /* First remove any processed buffers. */
704 ALint processed;
705 alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
706 alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
708 /* Refill the buffer queue. */
709 ALint queued;
710 alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
711 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
713 int audio_size;
715 /* Read the next chunk of data, fill the buffer, and queue it on
716 * the source */
717 audio_size = read_audio(movState, samples, buffer_len);
718 if(audio_size < 0) break;
720 ALuint bufid = movState->audio.buffer[movState->audio.buffer_idx++];
721 movState->audio.buffer_idx %= AUDIO_BUFFER_QUEUE_SIZE;
723 alBufferData(bufid, movState->audio.format, samples, audio_size,
724 movState->audio.st->codec->sample_rate);
725 alSourceQueueBuffers(movState->audio.source, 1, &bufid);
726 queued++;
729 /* Check that the source is playing. */
730 ALint state;
731 alGetSourcei(movState->audio.source, AL_SOURCE_STATE, &state);
732 if(state == AL_STOPPED)
734 /* AL_STOPPED means there was an underrun. Double-check that all
735 * processed buffers are removed, then rewind the source to get it
736 * back into an AL_INITIAL state. */
737 alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
738 alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
739 alSourceRewind(movState->audio.source);
740 continue;
743 almtx_unlock(&movState->audio.src_mutex);
745 /* (re)start the source if needed, and wait for a buffer to finish */
746 if(state != AL_PLAYING && state != AL_PAUSED)
748 alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
749 if(queued > 0) alSourcePlay(movState->audio.source);
751 SDL_Delay(AUDIO_BUFFER_TIME);
753 almtx_lock(&movState->audio.src_mutex);
755 almtx_unlock(&movState->audio.src_mutex);
757 finish:
758 av_frame_free(&movState->audio.decoded_aframe);
759 swr_free(&movState->audio.swres_ctx);
761 av_freep(&samples);
762 av_freep(&movState->audio.samples);
764 alDeleteSources(1, &movState->audio.source);
765 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
767 return 0;
771 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
773 (void)interval;
775 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_REFRESH_EVENT, .data1=opaque} });
776 return 0; /* 0 means stop timer */
779 /* Schedule a video refresh in 'delay' ms */
780 static void schedule_refresh(MovieState *movState, int delay)
782 SDL_AddTimer(delay, sdl_refresh_timer_cb, movState);
785 static void video_display(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
787 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
789 if(!vp->bmp)
790 return;
792 float aspect_ratio;
793 int win_w, win_h;
794 int w, h, x, y;
796 if(movState->video.st->codec->sample_aspect_ratio.num == 0)
797 aspect_ratio = 0.0f;
798 else
800 aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio) *
801 movState->video.st->codec->width /
802 movState->video.st->codec->height;
804 if(aspect_ratio <= 0.0f)
806 aspect_ratio = (float)movState->video.st->codec->width /
807 (float)movState->video.st->codec->height;
810 SDL_GetWindowSize(screen, &win_w, &win_h);
811 h = win_h;
812 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
813 if(w > win_w)
815 w = win_w;
816 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
818 x = (win_w - w) / 2;
819 y = (win_h - h) / 2;
821 SDL_RenderCopy(renderer, vp->bmp,
822 &(SDL_Rect){ .x=0, .y=0, .w=vp->width, .h=vp->height },
823 &(SDL_Rect){ .x=x, .y=y, .w=w, .h=h }
825 SDL_RenderPresent(renderer);
828 static void video_refresh_timer(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
830 if(!movState->video.st)
832 schedule_refresh(movState, 100);
833 return;
836 almtx_lock(&movState->video.pictq_mutex);
837 retry:
838 if(movState->video.pictq_size == 0)
839 schedule_refresh(movState, 1);
840 else
842 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
843 double actual_delay, delay, sync_threshold, ref_clock, diff;
845 movState->video.current_pts = vp->pts;
846 movState->video.current_pts_time = av_gettime();
848 delay = vp->pts - movState->video.frame_last_pts; /* the pts from last time */
849 if(delay <= 0 || delay >= 1.0)
851 /* if incorrect delay, use previous one */
852 delay = movState->video.frame_last_delay;
854 /* save for next time */
855 movState->video.frame_last_delay = delay;
856 movState->video.frame_last_pts = vp->pts;
858 /* Update delay to sync to clock if not master source. */
859 if(movState->av_sync_type != AV_SYNC_VIDEO_MASTER)
861 ref_clock = get_master_clock(movState);
862 diff = vp->pts - ref_clock;
864 /* Skip or repeat the frame. Take delay into account. */
865 sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
866 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
868 if(diff <= -sync_threshold)
869 delay = 0;
870 else if(diff >= sync_threshold)
871 delay = 2 * delay;
875 movState->video.frame_timer += delay;
876 /* Compute the REAL delay. */
877 actual_delay = movState->video.frame_timer - (av_gettime() / 1000000.0);
878 if(!(actual_delay >= 0.010))
880 /* We don't have time to handle this picture, just skip to the next one. */
881 movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
882 movState->video.pictq_size--;
883 alcnd_signal(&movState->video.pictq_cond);
884 goto retry;
886 schedule_refresh(movState, (int)(actual_delay*1000.0 + 0.5));
888 /* Show the picture! */
889 video_display(movState, screen, renderer);
891 /* Update queue for next picture. */
892 movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
893 movState->video.pictq_size--;
894 alcnd_signal(&movState->video.pictq_cond);
896 almtx_unlock(&movState->video.pictq_mutex);
900 static void update_picture(MovieState *movState, bool *first_update, SDL_Window *screen, SDL_Renderer *renderer)
902 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
904 /* allocate or resize the buffer! */
905 if(!vp->bmp || vp->width != movState->video.st->codec->width ||
906 vp->height != movState->video.st->codec->height)
908 if(vp->bmp)
909 SDL_DestroyTexture(vp->bmp);
910 vp->bmp = SDL_CreateTexture(
911 renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
912 movState->video.st->codec->coded_width, movState->video.st->codec->coded_height
914 if(!vp->bmp)
915 fprintf(stderr, "Failed to create YV12 texture!\n");
916 vp->width = movState->video.st->codec->width;
917 vp->height = movState->video.st->codec->height;
919 if(*first_update && vp->width > 0 && vp->height > 0)
921 /* For the first update, set the window size to the video size. */
922 *first_update = false;
924 int w = vp->width;
925 int h = vp->height;
926 if(movState->video.st->codec->sample_aspect_ratio.num != 0 &&
927 movState->video.st->codec->sample_aspect_ratio.den != 0)
929 double aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio);
930 if(aspect_ratio >= 1.0)
931 w = (int)(w*aspect_ratio + 0.5);
932 else if(aspect_ratio > 0.0)
933 h = (int)(h/aspect_ratio + 0.5);
935 SDL_SetWindowSize(screen, w, h);
939 if(vp->bmp)
941 AVFrame *frame = movState->video.decoded_vframe;
942 void *pixels = NULL;
943 int pitch = 0;
945 if(movState->video.st->codec->pix_fmt == PIX_FMT_YUV420P)
946 SDL_UpdateYUVTexture(vp->bmp, NULL,
947 frame->data[0], frame->linesize[0],
948 frame->data[1], frame->linesize[1],
949 frame->data[2], frame->linesize[2]
951 else if(SDL_LockTexture(vp->bmp, NULL, &pixels, &pitch) != 0)
952 fprintf(stderr, "Failed to lock texture\n");
953 else
955 // Convert the image into YUV format that SDL uses
956 int coded_w = movState->video.st->codec->coded_width;
957 int coded_h = movState->video.st->codec->coded_height;
958 int w = movState->video.st->codec->width;
959 int h = movState->video.st->codec->height;
960 if(!movState->video.swscale_ctx)
961 movState->video.swscale_ctx = sws_getContext(
962 w, h, movState->video.st->codec->pix_fmt,
963 w, h, PIX_FMT_YUV420P, SWS_X, NULL, NULL, NULL
966 /* point pict at the queue */
967 AVPicture pict;
968 pict.data[0] = pixels;
969 pict.data[2] = pict.data[0] + coded_w*coded_h;
970 pict.data[1] = pict.data[2] + coded_w*coded_h/4;
972 pict.linesize[0] = pitch;
973 pict.linesize[2] = pitch / 2;
974 pict.linesize[1] = pitch / 2;
976 sws_scale(movState->video.swscale_ctx, (const uint8_t**)frame->data,
977 frame->linesize, 0, h, pict.data, pict.linesize);
978 SDL_UnlockTexture(vp->bmp);
982 almtx_lock(&movState->video.pictq_mutex);
983 vp->updated = true;
984 almtx_unlock(&movState->video.pictq_mutex);
985 alcnd_signal(&movState->video.pictq_cond);
988 static int queue_picture(MovieState *movState, double pts)
990 /* Wait until we have space for a new pic */
991 almtx_lock(&movState->video.pictq_mutex);
992 while(movState->video.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !movState->quit)
993 alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
994 almtx_unlock(&movState->video.pictq_mutex);
996 if(movState->quit)
997 return -1;
999 VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
1001 /* We have to create/update the picture in the main thread */
1002 vp->updated = false;
1003 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_UPDATE_EVENT, .data1=movState} });
1005 /* Wait until the picture is updated. */
1006 almtx_lock(&movState->video.pictq_mutex);
1007 while(!vp->updated && !movState->quit)
1008 alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
1009 almtx_unlock(&movState->video.pictq_mutex);
1010 if(movState->quit)
1011 return -1;
1012 vp->pts = pts;
1014 movState->video.pictq_windex = (movState->video.pictq_windex+1)%VIDEO_PICTURE_QUEUE_SIZE;
1015 almtx_lock(&movState->video.pictq_mutex);
1016 movState->video.pictq_size++;
1017 almtx_unlock(&movState->video.pictq_mutex);
1019 return 0;
1022 static double synchronize_video(MovieState *movState, double pts)
1024 double frame_delay;
1026 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
1027 pts = movState->video.clock;
1028 else /* if we have pts, set video clock to it */
1029 movState->video.clock = pts;
1031 /* update the video clock */
1032 frame_delay = av_q2d(movState->video.st->codec->time_base);
1033 /* if we are repeating a frame, adjust clock accordingly */
1034 frame_delay += movState->video.decoded_vframe->repeat_pict * (frame_delay * 0.5);
1035 movState->video.clock += frame_delay;
1036 return pts;
1039 int video_thread(void *arg)
1041 MovieState *movState = (MovieState*)arg;
1042 AVPacket *packet = (AVPacket[1]){};
1043 int64_t saved_pts, pkt_pts;
1044 int frameFinished;
1046 movState->video.decoded_vframe = av_frame_alloc();
1047 while(packet_queue_get(&movState->video.q, packet, movState) >= 0)
1049 if(packet->data == flush_pkt.data)
1051 avcodec_flush_buffers(movState->video.st->codec);
1053 almtx_lock(&movState->video.pictq_mutex);
1054 movState->video.pictq_size = 0;
1055 movState->video.pictq_rindex = 0;
1056 movState->video.pictq_windex = 0;
1057 almtx_unlock(&movState->video.pictq_mutex);
1059 movState->video.clock = av_q2d(movState->video.st->time_base)*packet->pts;
1060 movState->video.current_pts = movState->video.clock;
1061 movState->video.current_pts_time = av_gettime();
1062 continue;
1065 pkt_pts = packet->pts;
1067 /* Decode video frame */
1068 avcodec_decode_video2(movState->video.st->codec, movState->video.decoded_vframe,
1069 &frameFinished, packet);
1070 if(pkt_pts != AV_NOPTS_VALUE && !movState->video.decoded_vframe->opaque)
1072 /* Store the packet's original pts in the frame, in case the frame
1073 * is not finished decoding yet. */
1074 saved_pts = pkt_pts;
1075 movState->video.decoded_vframe->opaque = &saved_pts;
1078 av_free_packet(packet);
1080 if(frameFinished)
1082 double pts = av_q2d(movState->video.st->time_base);
1083 if(packet->dts != AV_NOPTS_VALUE)
1084 pts *= packet->dts;
1085 else if(movState->video.decoded_vframe->opaque)
1086 pts *= *(int64_t*)movState->video.decoded_vframe->opaque;
1087 else
1088 pts *= 0.0;
1089 movState->video.decoded_vframe->opaque = NULL;
1091 pts = synchronize_video(movState, pts);
1092 if(queue_picture(movState, pts) < 0)
1093 break;
1097 sws_freeContext(movState->video.swscale_ctx);
1098 movState->video.swscale_ctx = NULL;
1099 av_frame_free(&movState->video.decoded_vframe);
1100 return 0;
1104 static int stream_component_open(MovieState *movState, int stream_index)
1106 AVFormatContext *pFormatCtx = movState->pFormatCtx;
1107 AVCodecContext *codecCtx;
1108 AVCodec *codec;
1110 if(stream_index < 0 || (unsigned int)stream_index >= pFormatCtx->nb_streams)
1111 return -1;
1113 /* Get a pointer to the codec context for the video stream, and open the
1114 * associated codec */
1115 codecCtx = pFormatCtx->streams[stream_index]->codec;
1117 codec = avcodec_find_decoder(codecCtx->codec_id);
1118 if(!codec || avcodec_open2(codecCtx, codec, NULL) < 0)
1120 fprintf(stderr, "Unsupported codec!\n");
1121 return -1;
1124 /* Initialize and start the media type handler */
1125 switch(codecCtx->codec_type)
1127 case AVMEDIA_TYPE_AUDIO:
1128 movState->audioStream = stream_index;
1129 movState->audio.st = pFormatCtx->streams[stream_index];
1131 /* Averaging filter for audio sync */
1132 movState->audio.diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1133 /* Correct audio only if larger error than this */
1134 movState->audio.diff_threshold = 2.0 * 0.050/* 50 ms */;
1136 memset(&movState->audio.pkt, 0, sizeof(movState->audio.pkt));
1137 if(althrd_create(&movState->audio.thread, audio_thread, movState) != althrd_success)
1139 movState->audioStream = -1;
1140 movState->audio.st = NULL;
1142 break;
1144 case AVMEDIA_TYPE_VIDEO:
1145 movState->videoStream = stream_index;
1146 movState->video.st = pFormatCtx->streams[stream_index];
1148 movState->video.current_pts_time = av_gettime();
1149 movState->video.frame_timer = (double)movState->video.current_pts_time /
1150 1000000.0;
1151 movState->video.frame_last_delay = 40e-3;
1153 if(althrd_create(&movState->video.thread, video_thread, movState) != althrd_success)
1155 movState->videoStream = -1;
1156 movState->video.st = NULL;
1158 break;
1160 default:
1161 break;
1164 return 0;
1167 static int decode_interrupt_cb(void *ctx)
1169 return ((MovieState*)ctx)->quit;
1172 int decode_thread(void *arg)
1174 MovieState *movState = (MovieState *)arg;
1175 AVFormatContext *fmtCtx = movState->pFormatCtx;
1176 AVPacket *packet = (AVPacket[1]){};
1177 int video_index = -1;
1178 int audio_index = -1;
1180 movState->videoStream = -1;
1181 movState->audioStream = -1;
1183 /* Dump information about file onto standard error */
1184 av_dump_format(fmtCtx, 0, movState->filename, 0);
1186 /* Find the first video and audio streams */
1187 for(unsigned int i = 0;i < fmtCtx->nb_streams;i++)
1189 if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1190 video_index = i;
1191 else if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1192 audio_index = i;
1194 movState->external_clock_base = av_gettime();
1195 if(audio_index >= 0)
1196 stream_component_open(movState, audio_index);
1197 if(video_index >= 0)
1198 stream_component_open(movState, video_index);
1200 if(movState->videoStream < 0 && movState->audioStream < 0)
1202 fprintf(stderr, "%s: could not open codecs\n", movState->filename);
1203 goto fail;
1206 /* Main packet handling loop */
1207 while(!movState->quit)
1209 if(movState->seek_req)
1211 int64_t seek_target = movState->seek_pos;
1212 int stream_index= -1;
1214 /* Prefer seeking on the video stream. */
1215 if(movState->videoStream >= 0)
1216 stream_index = movState->videoStream;
1217 else if(movState->audioStream >= 0)
1218 stream_index = movState->audioStream;
1220 /* Get a seek timestamp for the appropriate stream. */
1221 int64_t timestamp = seek_target;
1222 if(stream_index >= 0)
1223 timestamp = av_rescale_q(seek_target, AV_TIME_BASE_Q, fmtCtx->streams[stream_index]->time_base);
1225 if(av_seek_frame(movState->pFormatCtx, stream_index, timestamp, 0) < 0)
1226 fprintf(stderr, "%s: error while seeking\n", movState->pFormatCtx->filename);
1227 else
1229 /* Seek successful, clear the packet queues and send a special
1230 * 'flush' packet with the new stream clock time. */
1231 if(movState->audioStream >= 0)
1233 packet_queue_clear(&movState->audio.q);
1234 flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
1235 fmtCtx->streams[movState->audioStream]->time_base
1237 packet_queue_put(&movState->audio.q, &flush_pkt);
1239 if(movState->videoStream >= 0)
1241 packet_queue_clear(&movState->video.q);
1242 flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
1243 fmtCtx->streams[movState->videoStream]->time_base
1245 packet_queue_put(&movState->video.q, &flush_pkt);
1247 movState->external_clock_base = av_gettime() - seek_target;
1249 movState->seek_req = false;
1252 if(movState->audio.q.size >= MAX_AUDIOQ_SIZE ||
1253 movState->video.q.size >= MAX_VIDEOQ_SIZE)
1255 SDL_Delay(10);
1256 continue;
1259 if(av_read_frame(movState->pFormatCtx, packet) < 0)
1261 packet_queue_flush(&movState->video.q);
1262 packet_queue_flush(&movState->audio.q);
1263 break;
1266 /* Place the packet in the queue it's meant for, or discard it. */
1267 if(packet->stream_index == movState->videoStream)
1268 packet_queue_put(&movState->video.q, packet);
1269 else if(packet->stream_index == movState->audioStream)
1270 packet_queue_put(&movState->audio.q, packet);
1271 else
1272 av_free_packet(packet);
1275 /* all done - wait for it */
1276 while(!movState->quit)
1278 if(movState->audio.q.nb_packets == 0 && movState->video.q.nb_packets == 0)
1279 break;
1280 SDL_Delay(100);
1283 fail:
1284 movState->quit = true;
1285 packet_queue_flush(&movState->video.q);
1286 packet_queue_flush(&movState->audio.q);
1288 if(movState->videoStream >= 0)
1289 althrd_join(movState->video.thread, NULL);
1290 if(movState->audioStream >= 0)
1291 althrd_join(movState->audio.thread, NULL);
1293 SDL_PushEvent(&(SDL_Event){ .user={.type=FF_QUIT_EVENT, .data1=movState} });
1295 return 0;
1299 static void stream_seek(MovieState *movState, double incr)
1301 if(!movState->seek_req)
1303 double newtime = get_master_clock(movState)+incr;
1304 if(newtime <= 0.0) movState->seek_pos = 0;
1305 else movState->seek_pos = (int64_t)(newtime * AV_TIME_BASE);
1306 movState->seek_req = true;
1310 int main(int argc, char *argv[])
1312 SDL_Event event;
1313 MovieState *movState;
1314 bool first_update = true;
1315 SDL_Window *screen;
1316 SDL_Renderer *renderer;
1317 ALCdevice *device;
1318 ALCcontext *context;
1320 if(argc < 2)
1322 fprintf(stderr, "Usage: %s <file>\n", argv[0]);
1323 return 1;
1325 /* Register all formats and codecs */
1326 av_register_all();
1327 /* Initialize networking protocols */
1328 avformat_network_init();
1330 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1332 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
1333 return 1;
1336 /* Make a window to put our video */
1337 screen = SDL_CreateWindow("alffplay", 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1338 if(!screen)
1340 fprintf(stderr, "SDL: could not set video mode - exiting\n");
1341 return 1;
1343 /* Make a renderer to handle the texture image surface and rendering. */
1344 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1345 if(renderer)
1347 SDL_RendererInfo rinf;
1348 bool ok = false;
1350 /* Make sure the renderer supports YV12 textures. If not, fallback to a
1351 * software renderer. */
1352 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1354 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1355 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_YV12);
1357 if(!ok)
1359 fprintf(stderr, "YV12 pixelformat textures not supported on renderer %s\n", rinf.name);
1360 SDL_DestroyRenderer(renderer);
1361 renderer = NULL;
1364 if(!renderer)
1365 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1366 if(!renderer)
1368 fprintf(stderr, "SDL: could not create renderer - exiting\n");
1369 return 1;
1371 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1372 SDL_RenderFillRect(renderer, NULL);
1373 SDL_RenderPresent(renderer);
1375 /* Open an audio device */
1376 device = alcOpenDevice(NULL);
1377 if(!device)
1379 fprintf(stderr, "OpenAL: could not open device - exiting\n");
1380 return 1;
1382 context = alcCreateContext(device, NULL);
1383 if(!context)
1385 fprintf(stderr, "OpenAL: could not create context - exiting\n");
1386 return 1;
1388 if(alcMakeContextCurrent(context) == ALC_FALSE)
1390 fprintf(stderr, "OpenAL: could not make context current - exiting\n");
1391 return 1;
1394 if(!alIsExtensionPresent("AL_SOFT_source_length"))
1396 fprintf(stderr, "Required AL_SOFT_source_length not supported - exiting\n");
1397 return 1;
1400 if(!alIsExtensionPresent("AL_SOFT_source_latency"))
1401 fprintf(stderr, "AL_SOFT_source_latency not supported, audio may be a bit laggy.\n");
1402 else
1404 alGetSourcedvSOFT = alGetProcAddress("alGetSourcedvSOFT");
1405 has_latency_check = true;
1409 movState = av_mallocz(sizeof(MovieState));
1411 av_strlcpy(movState->filename, argv[1], sizeof(movState->filename));
1413 packet_queue_init(&movState->audio.q);
1414 packet_queue_init(&movState->video.q);
1416 almtx_init(&movState->video.pictq_mutex, almtx_plain);
1417 alcnd_init(&movState->video.pictq_cond);
1418 almtx_init(&movState->audio.src_mutex, almtx_recursive);
1420 movState->av_sync_type = DEFAULT_AV_SYNC_TYPE;
1422 movState->pFormatCtx = avformat_alloc_context();
1423 movState->pFormatCtx->interrupt_callback = (AVIOInterruptCB){.callback=decode_interrupt_cb, .opaque=movState};
1425 if(avio_open2(&movState->pFormatCtx->pb, movState->filename, AVIO_FLAG_READ,
1426 &movState->pFormatCtx->interrupt_callback, NULL))
1428 fprintf(stderr, "Failed to open %s\n", movState->filename);
1429 return 1;
1432 /* Open movie file */
1433 if(avformat_open_input(&movState->pFormatCtx, movState->filename, NULL, NULL) != 0)
1435 fprintf(stderr, "Failed to open %s\n", movState->filename);
1436 return 1;
1439 /* Retrieve stream information */
1440 if(avformat_find_stream_info(movState->pFormatCtx, NULL) < 0)
1442 fprintf(stderr, "%s: failed to find stream info\n", movState->filename);
1443 return 1;
1446 schedule_refresh(movState, 40);
1449 if(althrd_create(&movState->parse_thread, decode_thread, movState) != althrd_success)
1451 fprintf(stderr, "Failed to create parse thread!\n");
1452 return 1;
1454 while(SDL_WaitEvent(&event) == 1)
1456 switch(event.type)
1458 case SDL_KEYDOWN:
1459 switch(event.key.keysym.sym)
1461 case SDLK_ESCAPE:
1462 movState->quit = true;
1463 break;
1465 case SDLK_LEFT:
1466 stream_seek(movState, -10.0);
1467 break;
1468 case SDLK_RIGHT:
1469 stream_seek(movState, 10.0);
1470 break;
1471 case SDLK_UP:
1472 stream_seek(movState, 30.0);
1473 break;
1474 case SDLK_DOWN:
1475 stream_seek(movState, -30.0);
1476 break;
1478 default:
1479 break;
1481 break;
1483 case SDL_WINDOWEVENT:
1484 switch(event.window.event)
1486 case SDL_WINDOWEVENT_RESIZED:
1487 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1488 SDL_RenderFillRect(renderer, NULL);
1489 break;
1491 default:
1492 break;
1494 break;
1496 case SDL_QUIT:
1497 movState->quit = true;
1498 break;
1500 case FF_UPDATE_EVENT:
1501 update_picture(event.user.data1, &first_update, screen, renderer);
1502 break;
1504 case FF_REFRESH_EVENT:
1505 video_refresh_timer(event.user.data1, screen, renderer);
1506 break;
1508 case FF_QUIT_EVENT:
1509 althrd_join(movState->parse_thread, NULL);
1511 avformat_close_input(&movState->pFormatCtx);
1513 almtx_destroy(&movState->audio.src_mutex);
1514 almtx_destroy(&movState->video.pictq_mutex);
1515 alcnd_destroy(&movState->video.pictq_cond);
1516 packet_queue_deinit(&movState->video.q);
1517 packet_queue_deinit(&movState->audio.q);
1519 alcMakeContextCurrent(NULL);
1520 alcDestroyContext(context);
1521 alcCloseDevice(device);
1523 SDL_Quit();
1524 exit(0);
1526 default:
1527 break;
1531 fprintf(stderr, "SDL_WaitEvent error - %s\n", SDL_GetError());
1532 return 1;