Don't enable NFC for a 0 reference delay
[openal-soft.git] / examples / alffplay.cpp
blob108709309f4338d8df2c41f3a609a7fdf2137cc8
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <algorithm>
9 #include <iostream>
10 #include <iomanip>
11 #include <cstring>
12 #include <limits>
13 #include <thread>
14 #include <chrono>
15 #include <atomic>
16 #include <mutex>
17 #include <deque>
19 extern "C" {
20 #include "libavcodec/avcodec.h"
21 #include "libavformat/avformat.h"
22 #include "libavformat/avio.h"
23 #include "libavutil/time.h"
24 #include "libavutil/pixfmt.h"
25 #include "libavutil/avstring.h"
26 #include "libavutil/channel_layout.h"
27 #include "libswscale/swscale.h"
28 #include "libswresample/swresample.h"
31 #include "SDL.h"
33 #include "AL/alc.h"
34 #include "AL/al.h"
35 #include "AL/alext.h"
37 namespace
40 static const std::string AppName("alffplay");
42 static bool has_latency_check = false;
43 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
45 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
46 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
47 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
48 #define AV_SYNC_THRESHOLD 0.01
49 #define AV_NOSYNC_THRESHOLD 10.0
50 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
51 #define AUDIO_DIFF_AVG_NB 20
52 #define VIDEO_PICTURE_QUEUE_SIZE 16
54 enum {
55 FF_UPDATE_EVENT = SDL_USEREVENT,
56 FF_REFRESH_EVENT,
57 FF_MOVIE_DONE_EVENT
60 enum {
61 AV_SYNC_AUDIO_MASTER,
62 AV_SYNC_VIDEO_MASTER,
63 AV_SYNC_EXTERNAL_MASTER,
65 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
69 struct PacketQueue {
70 std::deque<AVPacket> mPackets;
71 std::atomic<int> mTotalSize;
72 std::atomic<bool> mFinished;
73 std::mutex mMutex;
74 std::condition_variable mCond;
76 PacketQueue() : mTotalSize(0), mFinished(false)
77 { }
78 ~PacketQueue()
79 { clear(); }
81 int put(const AVPacket *pkt);
82 int peek(AVPacket *pkt, std::atomic<bool> &quit_var);
83 void pop();
85 void clear();
86 void finish();
90 struct MovieState;
92 struct AudioState {
93 MovieState *mMovie;
95 AVStream *mStream;
96 AVCodecContext *mCodecCtx;
98 PacketQueue mQueue;
100 /* Used for clock difference average computation */
101 struct {
102 std::atomic<int> Clocks; /* In microseconds */
103 double Accum;
104 double AvgCoeff;
105 double Threshold;
106 int AvgCount;
107 } mDiff;
109 /* Time (in seconds) of the next sample to be buffered */
110 double mCurrentPts;
112 /* Decompressed sample frame, and swresample context for conversion */
113 AVFrame *mDecodedFrame;
114 struct SwrContext *mSwresCtx;
116 /* Conversion format, for what gets fed to Alure */
117 int mDstChanLayout;
118 enum AVSampleFormat mDstSampleFmt;
120 /* Storage of converted samples */
121 uint8_t *mSamples;
122 int mSamplesLen; /* In samples */
123 int mSamplesPos;
124 int mSamplesMax;
126 /* OpenAL format */
127 ALenum mFormat;
128 ALsizei mFrameSize;
130 std::recursive_mutex mSrcMutex;
131 ALuint mSource;
132 ALuint mBuffers[AUDIO_BUFFER_QUEUE_SIZE];
133 ALsizei mBufferIdx;
135 AudioState(MovieState *movie)
136 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr)
137 , mDiff{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
138 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE)
139 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
140 , mFormat(AL_NONE), mFrameSize(0), mSource(0), mBufferIdx(0)
142 for(auto &buf : mBuffers)
143 buf = 0;
145 ~AudioState()
147 if(mSource)
148 alDeleteSources(1, &mSource);
149 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
151 av_frame_free(&mDecodedFrame);
152 swr_free(&mSwresCtx);
154 av_freep(&mSamples);
156 avcodec_free_context(&mCodecCtx);
159 double getClock();
161 int getSync();
162 int decodeFrame();
163 int readAudio(uint8_t *samples, int length);
165 int handler();
168 struct VideoState {
169 MovieState *mMovie;
171 AVStream *mStream;
172 AVCodecContext *mCodecCtx;
174 PacketQueue mQueue;
176 double mClock;
177 double mFrameTimer;
178 double mFrameLastPts;
179 double mFrameLastDelay;
180 double mCurrentPts;
181 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
182 int64_t mCurrentPtsTime;
184 /* Decompressed video frame, and swscale context for conversion */
185 AVFrame *mDecodedFrame;
186 struct SwsContext *mSwscaleCtx;
188 struct Picture {
189 SDL_Texture *mImage;
190 int mWidth, mHeight; /* Logical image size (actual size may be larger) */
191 std::atomic<bool> mUpdated;
192 double mPts;
194 Picture()
195 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
197 ~Picture()
199 if(mImage)
200 SDL_DestroyTexture(mImage);
201 mImage = nullptr;
204 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
205 size_t mPictQSize, mPictQRead, mPictQWrite;
206 std::mutex mPictQMutex;
207 std::condition_variable mPictQCond;
208 bool mFirstUpdate;
209 std::atomic<bool> mEOS;
210 std::atomic<bool> mFinalUpdate;
212 VideoState(MovieState *movie)
213 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
214 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
215 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
216 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
217 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
219 ~VideoState()
221 sws_freeContext(mSwscaleCtx);
222 mSwscaleCtx = nullptr;
223 av_frame_free(&mDecodedFrame);
224 avcodec_free_context(&mCodecCtx);
227 double getClock();
229 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
230 void schedRefresh(int delay);
231 void display(SDL_Window *screen, SDL_Renderer *renderer);
232 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
233 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
234 int queuePicture(double pts);
235 double synchronize(double pts);
236 int handler();
239 struct MovieState {
240 AVFormatContext *mFormatCtx;
241 int mVideoStream, mAudioStream;
243 int mAVSyncType;
245 int64_t mExternalClockBase;
247 std::atomic<bool> mQuit;
249 AudioState mAudio;
250 VideoState mVideo;
252 std::thread mParseThread;
253 std::thread mAudioThread;
254 std::thread mVideoThread;
256 std::string mFilename;
258 MovieState(std::string fname)
259 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
260 , mAVSyncType(DEFAULT_AV_SYNC_TYPE), mExternalClockBase(0), mQuit(false)
261 , mAudio(this), mVideo(this), mFilename(std::move(fname))
263 ~MovieState()
265 mQuit = true;
266 if(mParseThread.joinable())
267 mParseThread.join();
268 avformat_close_input(&mFormatCtx);
271 static int decode_interrupt_cb(void *ctx);
272 bool prepare();
273 void setTitle(SDL_Window *window);
275 double getClock();
277 double getMasterClock();
279 int streamComponentOpen(int stream_index);
280 int parse_handler();
284 int PacketQueue::put(const AVPacket *pkt)
286 std::unique_lock<std::mutex> lock(mMutex);
287 mPackets.push_back(AVPacket{});
288 if(av_packet_ref(&mPackets.back(), pkt) != 0)
290 mPackets.pop_back();
291 return -1;
293 mTotalSize += mPackets.back().size;
294 lock.unlock();
296 mCond.notify_one();
297 return 0;
300 int PacketQueue::peek(AVPacket *pkt, std::atomic<bool> &quit_var)
302 std::unique_lock<std::mutex> lock(mMutex);
303 while(!quit_var.load())
305 if(!mPackets.empty())
307 if(av_packet_ref(pkt, &mPackets.front()) != 0)
308 return -1;
309 return 1;
312 if(mFinished.load())
313 return 0;
314 mCond.wait(lock);
316 return -1;
319 void PacketQueue::pop()
321 std::unique_lock<std::mutex> lock(mMutex);
322 AVPacket *pkt = &mPackets.front();
323 mTotalSize -= pkt->size;
324 av_packet_unref(pkt);
325 mPackets.pop_front();
328 void PacketQueue::clear()
330 std::unique_lock<std::mutex> lock(mMutex);
331 std::for_each(mPackets.begin(), mPackets.end(),
332 [](AVPacket &pkt) { av_packet_unref(&pkt); }
334 mPackets.clear();
335 mTotalSize = 0;
337 void PacketQueue::finish()
339 std::unique_lock<std::mutex> lock(mMutex);
340 mFinished = true;
341 lock.unlock();
342 mCond.notify_all();
346 double AudioState::getClock()
348 double pts;
350 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
351 /* The audio clock is the timestamp of the sample currently being heard.
352 * It's based on 4 components:
353 * 1 - The timestamp of the next sample to buffer (state->current_pts)
354 * 2 - The length of the source's buffer queue
355 * 3 - The offset OpenAL is currently at in the source (the first value
356 * from AL_SEC_OFFSET_LATENCY_SOFT)
357 * 4 - The latency between OpenAL and the DAC (the second value from
358 * AL_SEC_OFFSET_LATENCY_SOFT)
360 * Subtracting the length of the source queue from the next sample's
361 * timestamp gives the timestamp of the sample at start of the source
362 * queue. Adding the source offset to that results in the timestamp for
363 * OpenAL's current position, and subtracting the source latency from that
364 * gives the timestamp of the sample currently at the DAC.
366 pts = mCurrentPts;
367 if(mSource)
369 ALdouble offset[2];
370 ALint queue_size;
371 ALint status;
373 /* NOTE: The source state must be checked last, in case an underrun
374 * occurs and the source stops between retrieving the offset+latency
375 * and getting the state. */
376 if(has_latency_check)
378 alGetSourcedvSOFT(mSource, AL_SEC_OFFSET_LATENCY_SOFT, offset);
379 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
381 else
383 ALint ioffset;
384 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
385 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
386 offset[0] = (double)ioffset / (double)mCodecCtx->sample_rate;
387 offset[1] = 0.0f;
389 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
391 /* If the source is AL_STOPPED, then there was an underrun and all
392 * buffers are processed, so ignore the source queue. The audio thread
393 * will put the source into an AL_INITIAL state and clear the queue
394 * when it starts recovery. */
395 if(status != AL_STOPPED)
396 pts -= queue_size*((double)AUDIO_BUFFER_TIME/1000.0) - offset[0];
397 if(status == AL_PLAYING)
398 pts -= offset[1];
400 lock.unlock();
402 return std::max(pts, 0.0);
405 int AudioState::getSync()
407 double diff, avg_diff, ref_clock;
409 if(mMovie->mAVSyncType == AV_SYNC_AUDIO_MASTER)
410 return 0;
412 ref_clock = mMovie->getMasterClock();
413 diff = ref_clock - getClock();
415 if(!(fabs(diff) < AV_NOSYNC_THRESHOLD))
417 /* Difference is TOO big; reset diff stuff */
418 mDiff.Accum = 0.0;
419 return 0;
422 /* Accumulate the diffs */
423 mDiff.Accum = mDiff.Accum*mDiff.AvgCoeff + diff;
424 avg_diff = mDiff.Accum*(1.0 - mDiff.AvgCoeff);
425 if(fabs(avg_diff) < mDiff.Threshold)
426 return 0;
428 /* Constrain the per-update difference to avoid exceedingly large skips */
429 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
430 diff = SAMPLE_CORRECTION_MAX_DIFF;
431 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
432 diff = -SAMPLE_CORRECTION_MAX_DIFF;
433 return (int)(diff*mCodecCtx->sample_rate);
436 int AudioState::decodeFrame()
438 while(!mMovie->mQuit.load())
440 while(!mMovie->mQuit.load())
442 /* Get the next packet */
443 AVPacket pkt{};
444 if(mQueue.peek(&pkt, mMovie->mQuit) <= 0)
445 return -1;
447 int ret = avcodec_send_packet(mCodecCtx, &pkt);
448 if(ret != AVERROR(EAGAIN))
450 if(ret < 0)
451 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
452 mQueue.pop();
454 av_packet_unref(&pkt);
455 if(ret == 0 || ret == AVERROR(EAGAIN))
456 break;
459 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
460 if(ret == AVERROR(EAGAIN))
461 continue;
462 if(ret == AVERROR_EOF || ret < 0)
464 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
465 return 0;
468 if(mDecodedFrame->nb_samples <= 0)
470 av_frame_unref(mDecodedFrame);
471 continue;
474 /* If provided, update w/ pts */
475 int64_t pts = av_frame_get_best_effort_timestamp(mDecodedFrame);
476 if(pts != AV_NOPTS_VALUE)
477 mCurrentPts = av_q2d(mStream->time_base)*pts;
479 if(mDecodedFrame->nb_samples > mSamplesMax)
481 av_freep(&mSamples);
482 av_samples_alloc(
483 &mSamples, nullptr, mCodecCtx->channels,
484 mDecodedFrame->nb_samples, mDstSampleFmt, 0
486 mSamplesMax = mDecodedFrame->nb_samples;
488 /* Return the amount of sample frames converted */
489 int data_size = swr_convert(mSwresCtx, &mSamples, mDecodedFrame->nb_samples,
490 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
493 av_frame_unref(mDecodedFrame);
494 return data_size;
497 return 0;
500 /* Duplicates the sample at in to out, count times. The frame size is a
501 * multiple of the template type size.
503 template<typename T>
504 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
506 const T *sample = reinterpret_cast<const T*>(in);
507 T *dst = reinterpret_cast<T*>(out);
508 if(frame_size == sizeof(T))
509 std::fill_n(dst, count, *sample);
510 else
512 /* NOTE: frame_size is a multiple of sizeof(T). */
513 int type_mult = frame_size / sizeof(T);
514 int i = 0;
515 std::generate_n(dst, count*type_mult,
516 [sample,type_mult,&i]() -> T
518 T ret = sample[i];
519 i = (i+1)%type_mult;
520 return ret;
527 int AudioState::readAudio(uint8_t *samples, int length)
529 int sample_skip = getSync();
530 int audio_size = 0;
532 /* Read the next chunk of data, refill the buffer, and queue it
533 * on the source */
534 length /= mFrameSize;
535 while(audio_size < length)
537 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
539 int frame_len = decodeFrame();
540 if(frame_len <= 0) break;
542 mSamplesLen = frame_len;
543 mSamplesPos = std::min(mSamplesLen, sample_skip);
544 sample_skip -= mSamplesPos;
546 mCurrentPts += (double)mSamplesPos / (double)mCodecCtx->sample_rate;
547 continue;
550 int rem = length - audio_size;
551 if(mSamplesPos >= 0)
553 int len = mSamplesLen - mSamplesPos;
554 if(rem > len) rem = len;
555 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
557 else
559 rem = std::min(rem, -mSamplesPos);
561 /* Add samples by copying the first sample */
562 if((mFrameSize&7) == 0)
563 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
564 else if((mFrameSize&3) == 0)
565 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
566 else if((mFrameSize&1) == 0)
567 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
568 else
569 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
572 mSamplesPos += rem;
573 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
574 samples += rem*mFrameSize;
575 audio_size += rem;
578 if(audio_size < length && audio_size > 0)
580 int rem = length - audio_size;
581 std::fill_n(samples, rem*mFrameSize,
582 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
583 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
584 audio_size += rem;
587 return audio_size * mFrameSize;
591 int AudioState::handler()
593 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
594 ALenum fmt;
596 /* Find a suitable format for Alure. */
597 mDstChanLayout = 0;
598 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
600 mDstSampleFmt = AV_SAMPLE_FMT_U8;
601 mFrameSize = 1;
602 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
603 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
604 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
606 mDstChanLayout = mCodecCtx->channel_layout;
607 mFrameSize *= 8;
608 mFormat = fmt;
610 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
611 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
612 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
613 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
615 mDstChanLayout = mCodecCtx->channel_layout;
616 mFrameSize *= 6;
617 mFormat = fmt;
619 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
621 mDstChanLayout = mCodecCtx->channel_layout;
622 mFrameSize *= 1;
623 mFormat = AL_FORMAT_MONO8;
625 if(!mDstChanLayout)
627 mDstChanLayout = AV_CH_LAYOUT_STEREO;
628 mFrameSize *= 2;
629 mFormat = AL_FORMAT_STEREO8;
632 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
633 alIsExtensionPresent("AL_EXT_FLOAT32"))
635 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
636 mFrameSize = 4;
637 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
638 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
639 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
641 mDstChanLayout = mCodecCtx->channel_layout;
642 mFrameSize *= 8;
643 mFormat = fmt;
645 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
646 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
647 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
648 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
650 mDstChanLayout = mCodecCtx->channel_layout;
651 mFrameSize *= 6;
652 mFormat = fmt;
654 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
656 mDstChanLayout = mCodecCtx->channel_layout;
657 mFrameSize *= 1;
658 mFormat = AL_FORMAT_MONO_FLOAT32;
660 if(!mDstChanLayout)
662 mDstChanLayout = AV_CH_LAYOUT_STEREO;
663 mFrameSize *= 2;
664 mFormat = AL_FORMAT_STEREO_FLOAT32;
667 if(!mDstChanLayout)
669 mDstSampleFmt = AV_SAMPLE_FMT_S16;
670 mFrameSize = 2;
671 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
672 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
673 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
675 mDstChanLayout = mCodecCtx->channel_layout;
676 mFrameSize *= 8;
677 mFormat = fmt;
679 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
680 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
681 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
682 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
684 mDstChanLayout = mCodecCtx->channel_layout;
685 mFrameSize *= 6;
686 mFormat = fmt;
688 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
690 mDstChanLayout = mCodecCtx->channel_layout;
691 mFrameSize *= 1;
692 mFormat = AL_FORMAT_MONO16;
694 if(!mDstChanLayout)
696 mDstChanLayout = AV_CH_LAYOUT_STEREO;
697 mFrameSize *= 2;
698 mFormat = AL_FORMAT_STEREO16;
701 ALsizei buffer_len = mCodecCtx->sample_rate * AUDIO_BUFFER_TIME / 1000 *
702 mFrameSize;
703 void *samples = av_malloc(buffer_len);
705 mSamples = NULL;
706 mSamplesMax = 0;
707 mSamplesPos = 0;
708 mSamplesLen = 0;
710 if(!(mDecodedFrame=av_frame_alloc()))
712 std::cerr<< "Failed to allocate audio frame" <<std::endl;
713 goto finish;
716 mSwresCtx = swr_alloc_set_opts(nullptr,
717 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
718 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
719 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
720 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
721 0, nullptr
723 if(!mSwresCtx || swr_init(mSwresCtx) != 0)
725 std::cerr<< "Failed to initialize audio converter" <<std::endl;
726 goto finish;
729 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
730 alGenSources(1, &mSource);
732 while(alGetError() == AL_NO_ERROR && !mMovie->mQuit.load())
734 /* First remove any processed buffers. */
735 ALint processed;
736 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
737 if(processed > 0)
739 std::array<ALuint,AUDIO_BUFFER_QUEUE_SIZE> tmp;
740 alSourceUnqueueBuffers(mSource, processed, tmp.data());
743 /* Refill the buffer queue. */
744 ALint queued;
745 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
746 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
748 int audio_size;
750 /* Read the next chunk of data, fill the buffer, and queue it on
751 * the source */
752 audio_size = readAudio(reinterpret_cast<uint8_t*>(samples), buffer_len);
753 if(audio_size <= 0) break;
755 ALuint bufid = mBuffers[mBufferIdx++];
756 mBufferIdx %= AUDIO_BUFFER_QUEUE_SIZE;
758 alBufferData(bufid, mFormat, samples, audio_size, mCodecCtx->sample_rate);
759 alSourceQueueBuffers(mSource, 1, &bufid);
760 queued++;
762 if(queued == 0)
763 break;
765 /* Check that the source is playing. */
766 ALint state;
767 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
768 if(state == AL_STOPPED)
770 /* AL_STOPPED means there was an underrun. Rewind the source to get
771 * it back into an AL_INITIAL state.
773 alSourceRewind(mSource);
774 continue;
777 lock.unlock();
779 /* (re)start the source if needed, and wait for a buffer to finish */
780 if(state != AL_PLAYING && state != AL_PAUSED)
781 alSourcePlay(mSource);
782 SDL_Delay(AUDIO_BUFFER_TIME / 3);
784 lock.lock();
787 finish:
788 alSourceRewind(mSource);
789 alSourcei(mSource, AL_BUFFER, 0);
791 av_frame_free(&mDecodedFrame);
792 swr_free(&mSwresCtx);
794 av_freep(&mSamples);
796 return 0;
800 double VideoState::getClock()
802 double delta = (av_gettime() - mCurrentPtsTime) / 1000000.0;
803 return mCurrentPts + delta;
806 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
808 SDL_Event evt{};
809 evt.user.type = FF_REFRESH_EVENT;
810 evt.user.data1 = opaque;
811 SDL_PushEvent(&evt);
812 return 0; /* 0 means stop timer */
815 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
816 void VideoState::schedRefresh(int delay)
818 SDL_AddTimer(delay, sdl_refresh_timer_cb, this);
821 /* Called by VideoState::refreshTimer to display the next video frame. */
822 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
824 Picture *vp = &mPictQ[mPictQRead];
826 if(!vp->mImage)
827 return;
829 float aspect_ratio;
830 int win_w, win_h;
831 int w, h, x, y;
833 if(mCodecCtx->sample_aspect_ratio.num == 0)
834 aspect_ratio = 0.0f;
835 else
837 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
838 mCodecCtx->height;
840 if(aspect_ratio <= 0.0f)
841 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
843 SDL_GetWindowSize(screen, &win_w, &win_h);
844 h = win_h;
845 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
846 if(w > win_w)
848 w = win_w;
849 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
851 x = (win_w - w) / 2;
852 y = (win_h - h) / 2;
854 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
855 SDL_Rect dst_rect{ x, y, w, h };
856 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
857 SDL_RenderPresent(renderer);
860 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
861 * was created. It handles the display of the next decoded video frame (if not
862 * falling behind), and sets up the timer for the following video frame.
864 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
866 if(!mStream)
868 if(mEOS)
870 mFinalUpdate = true;
871 std::unique_lock<std::mutex>(mPictQMutex).unlock();
872 mPictQCond.notify_all();
873 return;
875 schedRefresh(100);
876 return;
879 std::unique_lock<std::mutex> lock(mPictQMutex);
880 retry:
881 if(mPictQSize == 0)
883 if(mEOS)
884 mFinalUpdate = true;
885 else
886 schedRefresh(1);
887 lock.unlock();
888 mPictQCond.notify_all();
889 return;
892 Picture *vp = &mPictQ[mPictQRead];
893 mCurrentPts = vp->mPts;
894 mCurrentPtsTime = av_gettime();
896 /* Get delay using the frame pts and the pts from last frame. */
897 double delay = vp->mPts - mFrameLastPts;
898 if(delay <= 0 || delay >= 1.0)
900 /* If incorrect delay, use previous one. */
901 delay = mFrameLastDelay;
903 /* Save for next frame. */
904 mFrameLastDelay = delay;
905 mFrameLastPts = vp->mPts;
907 /* Update delay to sync to clock if not master source. */
908 if(mMovie->mAVSyncType != AV_SYNC_VIDEO_MASTER)
910 double ref_clock = mMovie->getMasterClock();
911 double diff = vp->mPts - ref_clock;
913 /* Skip or repeat the frame. Take delay into account. */
914 double sync_threshold = std::min(delay, AV_SYNC_THRESHOLD);
915 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
917 if(diff <= -sync_threshold)
918 delay = 0;
919 else if(diff >= sync_threshold)
920 delay *= 2.0;
924 mFrameTimer += delay;
925 /* Compute the REAL delay. */
926 double actual_delay = mFrameTimer - (av_gettime() / 1000000.0);
927 if(!(actual_delay >= 0.010))
929 /* We don't have time to handle this picture, just skip to the next one. */
930 mPictQRead = (mPictQRead+1)%mPictQ.size();
931 mPictQSize--;
932 goto retry;
934 schedRefresh((int)(actual_delay*1000.0 + 0.5));
936 /* Show the picture! */
937 display(screen, renderer);
939 /* Update queue for next picture. */
940 mPictQRead = (mPictQRead+1)%mPictQ.size();
941 mPictQSize--;
942 lock.unlock();
943 mPictQCond.notify_all();
946 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
947 * main thread where the renderer was created.
949 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
951 Picture *vp = &mPictQ[mPictQWrite];
952 bool fmt_updated = false;
954 /* allocate or resize the buffer! */
955 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
957 fmt_updated = true;
958 if(vp->mImage)
959 SDL_DestroyTexture(vp->mImage);
960 vp->mImage = SDL_CreateTexture(
961 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
962 mCodecCtx->coded_width, mCodecCtx->coded_height
964 if(!vp->mImage)
965 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
966 vp->mWidth = mCodecCtx->width;
967 vp->mHeight = mCodecCtx->height;
969 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
971 /* For the first update, set the window size to the video size. */
972 mFirstUpdate = false;
974 int w = vp->mWidth;
975 int h = vp->mHeight;
976 if(mCodecCtx->sample_aspect_ratio.den != 0)
978 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
979 if(aspect_ratio >= 1.0)
980 w = (int)(w*aspect_ratio + 0.5);
981 else if(aspect_ratio > 0.0)
982 h = (int)(h/aspect_ratio + 0.5);
984 SDL_SetWindowSize(screen, w, h);
988 if(vp->mImage)
990 AVFrame *frame = mDecodedFrame;
991 void *pixels = nullptr;
992 int pitch = 0;
994 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
995 SDL_UpdateYUVTexture(vp->mImage, nullptr,
996 frame->data[0], frame->linesize[0],
997 frame->data[1], frame->linesize[1],
998 frame->data[2], frame->linesize[2]
1000 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1001 std::cerr<< "Failed to lock texture" <<std::endl;
1002 else
1004 // Convert the image into YUV format that SDL uses
1005 int coded_w = mCodecCtx->coded_width;
1006 int coded_h = mCodecCtx->coded_height;
1007 int w = mCodecCtx->width;
1008 int h = mCodecCtx->height;
1009 if(!mSwscaleCtx || fmt_updated)
1011 sws_freeContext(mSwscaleCtx);
1012 mSwscaleCtx = sws_getContext(
1013 w, h, mCodecCtx->pix_fmt,
1014 w, h, AV_PIX_FMT_YUV420P, 0,
1015 nullptr, nullptr, nullptr
1019 /* point pict at the queue */
1020 uint8_t *pict_data[3];
1021 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1022 pict_data[1] = pict_data[0] + coded_w*coded_h;
1023 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1025 int pict_linesize[3];
1026 pict_linesize[0] = pitch;
1027 pict_linesize[1] = pitch / 2;
1028 pict_linesize[2] = pitch / 2;
1030 sws_scale(mSwscaleCtx, (const uint8_t**)frame->data,
1031 frame->linesize, 0, h, pict_data, pict_linesize);
1032 SDL_UnlockTexture(vp->mImage);
1036 std::unique_lock<std::mutex> lock(mPictQMutex);
1037 vp->mUpdated = true;
1038 lock.unlock();
1039 mPictQCond.notify_one();
1042 int VideoState::queuePicture(double pts)
1044 /* Wait until we have space for a new pic */
1045 std::unique_lock<std::mutex> lock(mPictQMutex);
1046 while(mPictQSize >= mPictQ.size() && !mMovie->mQuit.load())
1047 mPictQCond.wait(lock);
1048 lock.unlock();
1050 if(mMovie->mQuit.load())
1051 return -1;
1053 Picture *vp = &mPictQ[mPictQWrite];
1055 /* We have to create/update the picture in the main thread */
1056 vp->mUpdated = false;
1057 SDL_Event evt{};
1058 evt.user.type = FF_UPDATE_EVENT;
1059 evt.user.data1 = this;
1060 SDL_PushEvent(&evt);
1062 /* Wait until the picture is updated. */
1063 lock.lock();
1064 while(!vp->mUpdated && !mMovie->mQuit.load())
1065 mPictQCond.wait(lock);
1066 if(mMovie->mQuit.load())
1067 return -1;
1068 vp->mPts = pts;
1070 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1071 mPictQSize++;
1072 lock.unlock();
1074 return 0;
1077 double VideoState::synchronize(double pts)
1079 double frame_delay;
1081 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
1082 pts = mClock;
1083 else /* if we have pts, set video clock to it */
1084 mClock = pts;
1086 /* update the video clock */
1087 frame_delay = av_q2d(mCodecCtx->time_base);
1088 /* if we are repeating a frame, adjust clock accordingly */
1089 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1090 mClock += frame_delay;
1091 return pts;
1094 int VideoState::handler()
1096 mDecodedFrame = av_frame_alloc();
1097 while(!mMovie->mQuit)
1099 while(!mMovie->mQuit)
1101 AVPacket packet{};
1102 if(mQueue.peek(&packet, mMovie->mQuit) <= 0)
1103 goto finish;
1105 int ret = avcodec_send_packet(mCodecCtx, &packet);
1106 if(ret != AVERROR(EAGAIN))
1108 if(ret < 0)
1109 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
1110 mQueue.pop();
1112 av_packet_unref(&packet);
1113 if(ret == 0 || ret == AVERROR(EAGAIN))
1114 break;
1117 /* Decode video frame */
1118 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
1119 if(ret == AVERROR(EAGAIN))
1120 continue;
1121 if(ret < 0)
1123 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1124 break;
1127 double pts = synchronize(
1128 av_q2d(mStream->time_base) * av_frame_get_best_effort_timestamp(mDecodedFrame)
1130 if(queuePicture(pts) < 0)
1131 break;
1132 av_frame_unref(mDecodedFrame);
1134 finish:
1135 mEOS = true;
1136 av_frame_free(&mDecodedFrame);
1138 std::unique_lock<std::mutex> lock(mPictQMutex);
1139 if(mMovie->mQuit)
1141 mPictQRead = 0;
1142 mPictQWrite = 0;
1143 mPictQSize = 0;
1145 while(!mFinalUpdate)
1146 mPictQCond.wait(lock);
1148 return 0;
1152 int MovieState::decode_interrupt_cb(void *ctx)
1154 return reinterpret_cast<MovieState*>(ctx)->mQuit;
1157 bool MovieState::prepare()
1159 mFormatCtx = avformat_alloc_context();
1160 mFormatCtx->interrupt_callback.callback = decode_interrupt_cb;
1161 mFormatCtx->interrupt_callback.opaque = this;
1162 if(avio_open2(&mFormatCtx->pb, mFilename.c_str(), AVIO_FLAG_READ,
1163 &mFormatCtx->interrupt_callback, nullptr))
1165 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1166 return false;
1169 /* Open movie file */
1170 if(avformat_open_input(&mFormatCtx, mFilename.c_str(), nullptr, nullptr) != 0)
1172 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1173 return false;
1176 /* Retrieve stream information */
1177 if(avformat_find_stream_info(mFormatCtx, nullptr) < 0)
1179 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1180 return false;
1183 mVideo.schedRefresh(40);
1185 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1186 return true;
1189 void MovieState::setTitle(SDL_Window *window)
1191 auto pos1 = mFilename.rfind('/');
1192 auto pos2 = mFilename.rfind('\\');
1193 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1194 (pos2 == std::string::npos) ? pos1 :
1195 std::max(pos1, pos2)) + 1;
1196 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1199 double MovieState::getClock()
1201 return (av_gettime()-mExternalClockBase) / 1000000.0;
1204 double MovieState::getMasterClock()
1206 if(mAVSyncType == AV_SYNC_VIDEO_MASTER)
1207 return mVideo.getClock();
1208 if(mAVSyncType == AV_SYNC_AUDIO_MASTER)
1209 return mAudio.getClock();
1210 return getClock();
1213 int MovieState::streamComponentOpen(int stream_index)
1215 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1216 return -1;
1218 /* Get a pointer to the codec context for the stream, and open the
1219 * associated codec.
1221 AVCodecContext *avctx = avcodec_alloc_context3(nullptr);
1222 if(!avctx) return -1;
1224 if(avcodec_parameters_to_context(avctx, mFormatCtx->streams[stream_index]->codecpar))
1226 avcodec_free_context(&avctx);
1227 return -1;
1230 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1231 if(!codec || avcodec_open2(avctx, codec, nullptr) < 0)
1233 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1234 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1235 avcodec_free_context(&avctx);
1236 return -1;
1239 /* Initialize and start the media type handler */
1240 switch(avctx->codec_type)
1242 case AVMEDIA_TYPE_AUDIO:
1243 mAudioStream = stream_index;
1244 mAudio.mStream = mFormatCtx->streams[stream_index];
1245 mAudio.mCodecCtx = avctx;
1247 /* Averaging filter for audio sync */
1248 mAudio.mDiff.AvgCoeff = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1249 /* Correct audio only if larger error than this */
1250 mAudio.mDiff.Threshold = 0.050/* 50 ms */;
1252 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1253 break;
1255 case AVMEDIA_TYPE_VIDEO:
1256 mVideoStream = stream_index;
1257 mVideo.mStream = mFormatCtx->streams[stream_index];
1258 mVideo.mCodecCtx = avctx;
1260 mVideo.mCurrentPtsTime = av_gettime();
1261 mVideo.mFrameTimer = (double)mVideo.mCurrentPtsTime / 1000000.0;
1262 mVideo.mFrameLastDelay = 40e-3;
1264 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1265 break;
1267 default:
1268 avcodec_free_context(&avctx);
1269 break;
1272 return 0;
1275 int MovieState::parse_handler()
1277 int video_index = -1;
1278 int audio_index = -1;
1280 mVideoStream = -1;
1281 mAudioStream = -1;
1283 /* Dump information about file onto standard error */
1284 av_dump_format(mFormatCtx, 0, mFilename.c_str(), 0);
1286 /* Find the first video and audio streams */
1287 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1289 if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1290 video_index = i;
1291 else if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1292 audio_index = i;
1294 /* Start the external clock in 50ms, to give the audio and video
1295 * components time to start without needing to skip ahead.
1297 mExternalClockBase = av_gettime() + 50000;
1298 if(audio_index >= 0)
1299 streamComponentOpen(audio_index);
1300 if(video_index >= 0)
1301 streamComponentOpen(video_index);
1303 if(mVideoStream < 0 && mAudioStream < 0)
1305 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1306 mQuit = true;
1309 /* Main packet handling loop */
1310 while(!mQuit.load())
1312 if(mAudio.mQueue.mTotalSize + mVideo.mQueue.mTotalSize >= MAX_QUEUE_SIZE)
1314 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1315 continue;
1318 AVPacket packet;
1319 if(av_read_frame(mFormatCtx, &packet) < 0)
1320 break;
1322 /* Copy the packet in the queue it's meant for. */
1323 if(packet.stream_index == mVideoStream)
1324 mVideo.mQueue.put(&packet);
1325 else if(packet.stream_index == mAudioStream)
1326 mAudio.mQueue.put(&packet);
1327 av_packet_unref(&packet);
1329 mVideo.mQueue.finish();
1330 mAudio.mQueue.finish();
1332 /* all done - wait for it */
1333 if(mVideoThread.joinable())
1334 mVideoThread.join();
1335 if(mAudioThread.joinable())
1336 mAudioThread.join();
1338 mVideo.mEOS = true;
1339 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1340 while(!mVideo.mFinalUpdate)
1341 mVideo.mPictQCond.wait(lock);
1342 lock.unlock();
1344 SDL_Event evt{};
1345 evt.user.type = FF_MOVIE_DONE_EVENT;
1346 SDL_PushEvent(&evt);
1348 return 0;
1351 } // namespace
1354 int main(int argc, char *argv[])
1356 std::unique_ptr<MovieState> movState;
1358 if(argc < 2)
1360 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] <files...>" <<std::endl;
1361 return 1;
1363 /* Register all formats and codecs */
1364 av_register_all();
1365 /* Initialize networking protocols */
1366 avformat_network_init();
1368 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1370 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1371 return 1;
1374 /* Make a window to put our video */
1375 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1376 if(!screen)
1378 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1379 return 1;
1381 /* Make a renderer to handle the texture image surface and rendering. */
1382 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1383 if(renderer)
1385 SDL_RendererInfo rinf{};
1386 bool ok = false;
1388 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1389 * software renderer. */
1390 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1392 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1393 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1395 if(!ok)
1397 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1398 SDL_DestroyRenderer(renderer);
1399 renderer = nullptr;
1402 if(!renderer)
1403 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1404 if(!renderer)
1406 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1407 return 1;
1409 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1410 SDL_RenderFillRect(renderer, nullptr);
1411 SDL_RenderPresent(renderer);
1413 /* Open an audio device */
1414 int fileidx = 1;
1415 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1417 ALCdevice *dev = NULL;
1418 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1420 dev = alcOpenDevice(argv[2]);
1421 if(dev)
1423 fileidx = 3;
1424 return dev;
1426 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1428 return alcOpenDevice(nullptr);
1429 }();
1430 ALCcontext *context = alcCreateContext(device, nullptr);
1431 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1433 std::cerr<< "Failed to set up audio device" <<std::endl;
1434 if(context)
1435 alcDestroyContext(context);
1436 return 1;
1439 while(fileidx < argc && !movState)
1441 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1442 if(!movState->prepare()) movState = nullptr;
1444 if(!movState)
1446 std::cerr<< "Could not start a video" <<std::endl;
1447 return 1;
1449 movState->setTitle(screen);
1451 /* Default to going to the next movie at the end of one. */
1452 enum class EomAction {
1453 Next, Quit
1454 } eom_action = EomAction::Next;
1455 SDL_Event event;
1456 while(SDL_WaitEvent(&event) == 1)
1458 switch(event.type)
1460 case SDL_KEYDOWN:
1461 switch(event.key.keysym.sym)
1463 case SDLK_ESCAPE:
1464 movState->mQuit = true;
1465 eom_action = EomAction::Quit;
1466 break;
1468 case SDLK_n:
1469 movState->mQuit = true;
1470 eom_action = EomAction::Next;
1471 break;
1473 default:
1474 break;
1476 break;
1478 case SDL_WINDOWEVENT:
1479 switch(event.window.event)
1481 case SDL_WINDOWEVENT_RESIZED:
1482 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1483 SDL_RenderFillRect(renderer, nullptr);
1484 break;
1486 default:
1487 break;
1489 break;
1491 case SDL_QUIT:
1492 movState->mQuit = true;
1493 eom_action = EomAction::Quit;
1494 break;
1496 case FF_UPDATE_EVENT:
1497 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1498 screen, renderer
1500 break;
1502 case FF_REFRESH_EVENT:
1503 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1504 screen, renderer
1506 break;
1508 case FF_MOVIE_DONE_EVENT:
1509 if(eom_action != EomAction::Quit)
1511 movState = nullptr;
1512 while(fileidx < argc && !movState)
1514 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1515 if(!movState->prepare()) movState = nullptr;
1517 if(movState)
1519 movState->setTitle(screen);
1520 break;
1524 /* Nothing more to play. Shut everything down and quit. */
1525 movState = nullptr;
1527 alcMakeContextCurrent(nullptr);
1528 alcDestroyContext(context);
1529 alcCloseDevice(device);
1531 SDL_DestroyRenderer(renderer);
1532 renderer = nullptr;
1533 SDL_DestroyWindow(screen);
1534 screen = nullptr;
1536 SDL_Quit();
1537 exit(0);
1539 default:
1540 break;
1544 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1545 return 1;