Simplify bsinc filter storage in the filter state
[openal-soft.git] / examples / alffplay.cpp
blob10c005538ea141d5f92cc1b3af3086ab78736782
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <mutex>
18 #include <deque>
19 #include <array>
21 extern "C" {
22 #include "libavcodec/avcodec.h"
23 #include "libavformat/avformat.h"
24 #include "libavformat/avio.h"
25 #include "libavutil/time.h"
26 #include "libavutil/pixfmt.h"
27 #include "libavutil/avstring.h"
28 #include "libavutil/channel_layout.h"
29 #include "libswscale/swscale.h"
30 #include "libswresample/swresample.h"
33 #include "SDL.h"
35 #include "AL/alc.h"
36 #include "AL/al.h"
37 #include "AL/alext.h"
39 namespace
42 static const std::string AppName("alffplay");
44 static bool do_direct_out = false;
45 static bool has_latency_check = false;
46 static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
48 #define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
49 #define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
50 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
51 #define AV_SYNC_THRESHOLD 0.01
52 #define AV_NOSYNC_THRESHOLD 10.0
53 #define SAMPLE_CORRECTION_MAX_DIFF 0.05
54 #define AUDIO_DIFF_AVG_NB 20
55 #define VIDEO_PICTURE_QUEUE_SIZE 16
57 enum {
58 FF_UPDATE_EVENT = SDL_USEREVENT,
59 FF_REFRESH_EVENT,
60 FF_MOVIE_DONE_EVENT
63 enum {
64 AV_SYNC_AUDIO_MASTER,
65 AV_SYNC_VIDEO_MASTER,
66 AV_SYNC_EXTERNAL_MASTER,
68 DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
72 struct PacketQueue {
73 std::deque<AVPacket> mPackets;
74 std::atomic<int> mTotalSize;
75 std::atomic<bool> mFinished;
76 std::mutex mMutex;
77 std::condition_variable mCond;
79 PacketQueue() : mTotalSize(0), mFinished(false)
80 { }
81 ~PacketQueue()
82 { clear(); }
84 int put(const AVPacket *pkt);
85 int peek(AVPacket *pkt, std::atomic<bool> &quit_var);
86 void pop();
88 void clear();
89 void finish();
93 struct MovieState;
95 struct AudioState {
96 MovieState *mMovie;
98 AVStream *mStream;
99 AVCodecContext *mCodecCtx;
101 PacketQueue mQueue;
103 /* Used for clock difference average computation */
104 struct {
105 std::atomic<int> Clocks; /* In microseconds */
106 double Accum;
107 double AvgCoeff;
108 double Threshold;
109 int AvgCount;
110 } mDiff;
112 /* Time (in seconds) of the next sample to be buffered */
113 double mCurrentPts;
115 /* Decompressed sample frame, and swresample context for conversion */
116 AVFrame *mDecodedFrame;
117 struct SwrContext *mSwresCtx;
119 /* Conversion format, for what gets fed to Alure */
120 int mDstChanLayout;
121 enum AVSampleFormat mDstSampleFmt;
123 /* Storage of converted samples */
124 uint8_t *mSamples;
125 int mSamplesLen; /* In samples */
126 int mSamplesPos;
127 int mSamplesMax;
129 /* OpenAL format */
130 ALenum mFormat;
131 ALsizei mFrameSize;
133 std::recursive_mutex mSrcMutex;
134 ALuint mSource;
135 ALuint mBuffers[AUDIO_BUFFER_QUEUE_SIZE];
136 ALsizei mBufferIdx;
138 AudioState(MovieState *movie)
139 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr)
140 , mDiff{{0}, 0.0, 0.0, 0.0, 0}, mCurrentPts(0.0), mDecodedFrame(nullptr)
141 , mSwresCtx(nullptr), mDstChanLayout(0), mDstSampleFmt(AV_SAMPLE_FMT_NONE)
142 , mSamples(nullptr), mSamplesLen(0), mSamplesPos(0), mSamplesMax(0)
143 , mFormat(AL_NONE), mFrameSize(0), mSource(0), mBufferIdx(0)
145 for(auto &buf : mBuffers)
146 buf = 0;
148 ~AudioState()
150 if(mSource)
151 alDeleteSources(1, &mSource);
152 alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
154 av_frame_free(&mDecodedFrame);
155 swr_free(&mSwresCtx);
157 av_freep(&mSamples);
159 avcodec_free_context(&mCodecCtx);
162 double getClock();
164 int getSync();
165 int decodeFrame();
166 int readAudio(uint8_t *samples, int length);
168 int handler();
171 struct VideoState {
172 MovieState *mMovie;
174 AVStream *mStream;
175 AVCodecContext *mCodecCtx;
177 PacketQueue mQueue;
179 double mClock;
180 double mFrameTimer;
181 double mFrameLastPts;
182 double mFrameLastDelay;
183 double mCurrentPts;
184 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
185 int64_t mCurrentPtsTime;
187 /* Decompressed video frame, and swscale context for conversion */
188 AVFrame *mDecodedFrame;
189 struct SwsContext *mSwscaleCtx;
191 struct Picture {
192 SDL_Texture *mImage;
193 int mWidth, mHeight; /* Logical image size (actual size may be larger) */
194 std::atomic<bool> mUpdated;
195 double mPts;
197 Picture()
198 : mImage(nullptr), mWidth(0), mHeight(0), mUpdated(false), mPts(0.0)
200 ~Picture()
202 if(mImage)
203 SDL_DestroyTexture(mImage);
204 mImage = nullptr;
207 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
208 size_t mPictQSize, mPictQRead, mPictQWrite;
209 std::mutex mPictQMutex;
210 std::condition_variable mPictQCond;
211 bool mFirstUpdate;
212 std::atomic<bool> mEOS;
213 std::atomic<bool> mFinalUpdate;
215 VideoState(MovieState *movie)
216 : mMovie(movie), mStream(nullptr), mCodecCtx(nullptr), mClock(0.0)
217 , mFrameTimer(0.0), mFrameLastPts(0.0), mFrameLastDelay(0.0)
218 , mCurrentPts(0.0), mCurrentPtsTime(0), mDecodedFrame(nullptr)
219 , mSwscaleCtx(nullptr), mPictQSize(0), mPictQRead(0), mPictQWrite(0)
220 , mFirstUpdate(true), mEOS(false), mFinalUpdate(false)
222 ~VideoState()
224 sws_freeContext(mSwscaleCtx);
225 mSwscaleCtx = nullptr;
226 av_frame_free(&mDecodedFrame);
227 avcodec_free_context(&mCodecCtx);
230 double getClock();
232 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
233 void schedRefresh(int delay);
234 void display(SDL_Window *screen, SDL_Renderer *renderer);
235 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
236 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
237 int queuePicture(double pts);
238 double synchronize(double pts);
239 int handler();
242 struct MovieState {
243 AVFormatContext *mFormatCtx;
244 int mVideoStream, mAudioStream;
246 int mAVSyncType;
248 int64_t mExternalClockBase;
250 std::atomic<bool> mQuit;
252 AudioState mAudio;
253 VideoState mVideo;
255 std::thread mParseThread;
256 std::thread mAudioThread;
257 std::thread mVideoThread;
259 std::string mFilename;
261 MovieState(std::string fname)
262 : mFormatCtx(nullptr), mVideoStream(0), mAudioStream(0)
263 , mAVSyncType(DEFAULT_AV_SYNC_TYPE), mExternalClockBase(0), mQuit(false)
264 , mAudio(this), mVideo(this), mFilename(std::move(fname))
266 ~MovieState()
268 mQuit = true;
269 if(mParseThread.joinable())
270 mParseThread.join();
271 avformat_close_input(&mFormatCtx);
274 static int decode_interrupt_cb(void *ctx);
275 bool prepare();
276 void setTitle(SDL_Window *window);
278 double getClock();
280 double getMasterClock();
282 int streamComponentOpen(int stream_index);
283 int parse_handler();
287 int PacketQueue::put(const AVPacket *pkt)
289 std::unique_lock<std::mutex> lock(mMutex);
290 mPackets.push_back(AVPacket{});
291 if(av_packet_ref(&mPackets.back(), pkt) != 0)
293 mPackets.pop_back();
294 return -1;
296 mTotalSize += mPackets.back().size;
297 lock.unlock();
299 mCond.notify_one();
300 return 0;
303 int PacketQueue::peek(AVPacket *pkt, std::atomic<bool> &quit_var)
305 std::unique_lock<std::mutex> lock(mMutex);
306 while(!quit_var.load())
308 if(!mPackets.empty())
310 if(av_packet_ref(pkt, &mPackets.front()) != 0)
311 return -1;
312 return 1;
315 if(mFinished.load())
316 return 0;
317 mCond.wait(lock);
319 return -1;
322 void PacketQueue::pop()
324 std::unique_lock<std::mutex> lock(mMutex);
325 AVPacket *pkt = &mPackets.front();
326 mTotalSize -= pkt->size;
327 av_packet_unref(pkt);
328 mPackets.pop_front();
331 void PacketQueue::clear()
333 std::unique_lock<std::mutex> lock(mMutex);
334 std::for_each(mPackets.begin(), mPackets.end(),
335 [](AVPacket &pkt) { av_packet_unref(&pkt); }
337 mPackets.clear();
338 mTotalSize = 0;
340 void PacketQueue::finish()
342 std::unique_lock<std::mutex> lock(mMutex);
343 mFinished = true;
344 lock.unlock();
345 mCond.notify_all();
349 double AudioState::getClock()
351 double pts;
353 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
354 /* The audio clock is the timestamp of the sample currently being heard.
355 * It's based on 4 components:
356 * 1 - The timestamp of the next sample to buffer (state->current_pts)
357 * 2 - The length of the source's buffer queue
358 * 3 - The offset OpenAL is currently at in the source (the first value
359 * from AL_SEC_OFFSET_LATENCY_SOFT)
360 * 4 - The latency between OpenAL and the DAC (the second value from
361 * AL_SEC_OFFSET_LATENCY_SOFT)
363 * Subtracting the length of the source queue from the next sample's
364 * timestamp gives the timestamp of the sample at start of the source
365 * queue. Adding the source offset to that results in the timestamp for
366 * OpenAL's current position, and subtracting the source latency from that
367 * gives the timestamp of the sample currently at the DAC.
369 pts = mCurrentPts;
370 if(mSource)
372 ALdouble offset[2];
373 ALint queue_size;
374 ALint status;
376 /* NOTE: The source state must be checked last, in case an underrun
377 * occurs and the source stops between retrieving the offset+latency
378 * and getting the state. */
379 if(has_latency_check)
381 alGetSourcedvSOFT(mSource, AL_SEC_OFFSET_LATENCY_SOFT, offset);
382 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
384 else
386 ALint ioffset;
387 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
388 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queue_size);
389 offset[0] = (double)ioffset / (double)mCodecCtx->sample_rate;
390 offset[1] = 0.0f;
392 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
394 /* If the source is AL_STOPPED, then there was an underrun and all
395 * buffers are processed, so ignore the source queue. The audio thread
396 * will put the source into an AL_INITIAL state and clear the queue
397 * when it starts recovery. */
398 if(status != AL_STOPPED)
399 pts -= queue_size*((double)AUDIO_BUFFER_TIME/1000.0) - offset[0];
400 if(status == AL_PLAYING)
401 pts -= offset[1];
403 lock.unlock();
405 return std::max(pts, 0.0);
408 int AudioState::getSync()
410 double diff, avg_diff, ref_clock;
412 if(mMovie->mAVSyncType == AV_SYNC_AUDIO_MASTER)
413 return 0;
415 ref_clock = mMovie->getMasterClock();
416 diff = ref_clock - getClock();
418 if(!(fabs(diff) < AV_NOSYNC_THRESHOLD))
420 /* Difference is TOO big; reset diff stuff */
421 mDiff.Accum = 0.0;
422 return 0;
425 /* Accumulate the diffs */
426 mDiff.Accum = mDiff.Accum*mDiff.AvgCoeff + diff;
427 avg_diff = mDiff.Accum*(1.0 - mDiff.AvgCoeff);
428 if(fabs(avg_diff) < mDiff.Threshold)
429 return 0;
431 /* Constrain the per-update difference to avoid exceedingly large skips */
432 if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
433 diff = SAMPLE_CORRECTION_MAX_DIFF;
434 else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
435 diff = -SAMPLE_CORRECTION_MAX_DIFF;
436 return (int)(diff*mCodecCtx->sample_rate);
439 int AudioState::decodeFrame()
441 while(!mMovie->mQuit.load())
443 while(!mMovie->mQuit.load())
445 /* Get the next packet */
446 AVPacket pkt{};
447 if(mQueue.peek(&pkt, mMovie->mQuit) <= 0)
448 return -1;
450 int ret = avcodec_send_packet(mCodecCtx, &pkt);
451 if(ret != AVERROR(EAGAIN))
453 if(ret < 0)
454 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
455 mQueue.pop();
457 av_packet_unref(&pkt);
458 if(ret == 0 || ret == AVERROR(EAGAIN))
459 break;
462 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
463 if(ret == AVERROR(EAGAIN))
464 continue;
465 if(ret == AVERROR_EOF || ret < 0)
467 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
468 return 0;
471 if(mDecodedFrame->nb_samples <= 0)
473 av_frame_unref(mDecodedFrame);
474 continue;
477 /* If provided, update w/ pts */
478 int64_t pts = av_frame_get_best_effort_timestamp(mDecodedFrame);
479 if(pts != AV_NOPTS_VALUE)
480 mCurrentPts = av_q2d(mStream->time_base)*pts;
482 if(mDecodedFrame->nb_samples > mSamplesMax)
484 av_freep(&mSamples);
485 av_samples_alloc(
486 &mSamples, nullptr, mCodecCtx->channels,
487 mDecodedFrame->nb_samples, mDstSampleFmt, 0
489 mSamplesMax = mDecodedFrame->nb_samples;
491 /* Return the amount of sample frames converted */
492 int data_size = swr_convert(mSwresCtx, &mSamples, mDecodedFrame->nb_samples,
493 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
496 av_frame_unref(mDecodedFrame);
497 return data_size;
500 return 0;
503 /* Duplicates the sample at in to out, count times. The frame size is a
504 * multiple of the template type size.
506 template<typename T>
507 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
509 const T *sample = reinterpret_cast<const T*>(in);
510 T *dst = reinterpret_cast<T*>(out);
511 if(frame_size == sizeof(T))
512 std::fill_n(dst, count, *sample);
513 else
515 /* NOTE: frame_size is a multiple of sizeof(T). */
516 int type_mult = frame_size / sizeof(T);
517 int i = 0;
518 std::generate_n(dst, count*type_mult,
519 [sample,type_mult,&i]() -> T
521 T ret = sample[i];
522 i = (i+1)%type_mult;
523 return ret;
530 int AudioState::readAudio(uint8_t *samples, int length)
532 int sample_skip = getSync();
533 int audio_size = 0;
535 /* Read the next chunk of data, refill the buffer, and queue it
536 * on the source */
537 length /= mFrameSize;
538 while(audio_size < length)
540 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
542 int frame_len = decodeFrame();
543 if(frame_len <= 0) break;
545 mSamplesLen = frame_len;
546 mSamplesPos = std::min(mSamplesLen, sample_skip);
547 sample_skip -= mSamplesPos;
549 mCurrentPts += (double)mSamplesPos / (double)mCodecCtx->sample_rate;
550 continue;
553 int rem = length - audio_size;
554 if(mSamplesPos >= 0)
556 int len = mSamplesLen - mSamplesPos;
557 if(rem > len) rem = len;
558 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
560 else
562 rem = std::min(rem, -mSamplesPos);
564 /* Add samples by copying the first sample */
565 if((mFrameSize&7) == 0)
566 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
567 else if((mFrameSize&3) == 0)
568 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
569 else if((mFrameSize&1) == 0)
570 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
571 else
572 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
575 mSamplesPos += rem;
576 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
577 samples += rem*mFrameSize;
578 audio_size += rem;
581 if(audio_size < length && audio_size > 0)
583 int rem = length - audio_size;
584 std::fill_n(samples, rem*mFrameSize,
585 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
586 mCurrentPts += (double)rem / mCodecCtx->sample_rate;
587 audio_size += rem;
590 return audio_size * mFrameSize;
594 int AudioState::handler()
596 std::unique_lock<std::recursive_mutex> lock(mSrcMutex);
597 ALenum fmt;
599 /* Find a suitable format for Alure. */
600 mDstChanLayout = 0;
601 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
603 mDstSampleFmt = AV_SAMPLE_FMT_U8;
604 mFrameSize = 1;
605 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
606 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
607 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
609 mDstChanLayout = mCodecCtx->channel_layout;
610 mFrameSize *= 8;
611 mFormat = fmt;
613 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
614 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
615 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
616 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
618 mDstChanLayout = mCodecCtx->channel_layout;
619 mFrameSize *= 6;
620 mFormat = fmt;
622 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
624 mDstChanLayout = mCodecCtx->channel_layout;
625 mFrameSize *= 1;
626 mFormat = AL_FORMAT_MONO8;
628 if(!mDstChanLayout)
630 mDstChanLayout = AV_CH_LAYOUT_STEREO;
631 mFrameSize *= 2;
632 mFormat = AL_FORMAT_STEREO8;
635 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
636 alIsExtensionPresent("AL_EXT_FLOAT32"))
638 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
639 mFrameSize = 4;
640 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
641 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
642 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
644 mDstChanLayout = mCodecCtx->channel_layout;
645 mFrameSize *= 8;
646 mFormat = fmt;
648 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
649 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
650 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
651 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
653 mDstChanLayout = mCodecCtx->channel_layout;
654 mFrameSize *= 6;
655 mFormat = fmt;
657 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
659 mDstChanLayout = mCodecCtx->channel_layout;
660 mFrameSize *= 1;
661 mFormat = AL_FORMAT_MONO_FLOAT32;
663 if(!mDstChanLayout)
665 mDstChanLayout = AV_CH_LAYOUT_STEREO;
666 mFrameSize *= 2;
667 mFormat = AL_FORMAT_STEREO_FLOAT32;
670 if(!mDstChanLayout)
672 mDstSampleFmt = AV_SAMPLE_FMT_S16;
673 mFrameSize = 2;
674 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
675 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
676 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
678 mDstChanLayout = mCodecCtx->channel_layout;
679 mFrameSize *= 8;
680 mFormat = fmt;
682 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
683 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
684 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
685 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
687 mDstChanLayout = mCodecCtx->channel_layout;
688 mFrameSize *= 6;
689 mFormat = fmt;
691 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
693 mDstChanLayout = mCodecCtx->channel_layout;
694 mFrameSize *= 1;
695 mFormat = AL_FORMAT_MONO16;
697 if(!mDstChanLayout)
699 mDstChanLayout = AV_CH_LAYOUT_STEREO;
700 mFrameSize *= 2;
701 mFormat = AL_FORMAT_STEREO16;
704 ALsizei buffer_len = mCodecCtx->sample_rate * AUDIO_BUFFER_TIME / 1000 *
705 mFrameSize;
706 void *samples = av_malloc(buffer_len);
708 mSamples = NULL;
709 mSamplesMax = 0;
710 mSamplesPos = 0;
711 mSamplesLen = 0;
713 if(!(mDecodedFrame=av_frame_alloc()))
715 std::cerr<< "Failed to allocate audio frame" <<std::endl;
716 goto finish;
719 mSwresCtx = swr_alloc_set_opts(nullptr,
720 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
721 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
722 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
723 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
724 0, nullptr
726 if(!mSwresCtx || swr_init(mSwresCtx) != 0)
728 std::cerr<< "Failed to initialize audio converter" <<std::endl;
729 goto finish;
732 alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, mBuffers);
733 alGenSources(1, &mSource);
735 if(do_direct_out)
737 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
738 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
739 else
741 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
742 std::cout<< "Direct out enabled" <<std::endl;
746 while(alGetError() == AL_NO_ERROR && !mMovie->mQuit.load())
748 /* First remove any processed buffers. */
749 ALint processed;
750 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
751 if(processed > 0)
753 std::array<ALuint,AUDIO_BUFFER_QUEUE_SIZE> tmp;
754 alSourceUnqueueBuffers(mSource, processed, tmp.data());
757 /* Refill the buffer queue. */
758 ALint queued;
759 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
760 while(queued < AUDIO_BUFFER_QUEUE_SIZE)
762 int audio_size;
764 /* Read the next chunk of data, fill the buffer, and queue it on
765 * the source */
766 audio_size = readAudio(reinterpret_cast<uint8_t*>(samples), buffer_len);
767 if(audio_size <= 0) break;
769 ALuint bufid = mBuffers[mBufferIdx++];
770 mBufferIdx %= AUDIO_BUFFER_QUEUE_SIZE;
772 alBufferData(bufid, mFormat, samples, audio_size, mCodecCtx->sample_rate);
773 alSourceQueueBuffers(mSource, 1, &bufid);
774 queued++;
776 if(queued == 0)
777 break;
779 /* Check that the source is playing. */
780 ALint state;
781 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
782 if(state == AL_STOPPED)
784 /* AL_STOPPED means there was an underrun. Rewind the source to get
785 * it back into an AL_INITIAL state.
787 alSourceRewind(mSource);
788 continue;
791 lock.unlock();
793 /* (re)start the source if needed, and wait for a buffer to finish */
794 if(state != AL_PLAYING && state != AL_PAUSED)
795 alSourcePlay(mSource);
796 SDL_Delay(AUDIO_BUFFER_TIME / 3);
798 lock.lock();
801 finish:
802 alSourceRewind(mSource);
803 alSourcei(mSource, AL_BUFFER, 0);
805 av_frame_free(&mDecodedFrame);
806 swr_free(&mSwresCtx);
808 av_freep(&mSamples);
810 return 0;
814 double VideoState::getClock()
816 double delta = (av_gettime() - mCurrentPtsTime) / 1000000.0;
817 return mCurrentPts + delta;
820 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
822 SDL_Event evt{};
823 evt.user.type = FF_REFRESH_EVENT;
824 evt.user.data1 = opaque;
825 SDL_PushEvent(&evt);
826 return 0; /* 0 means stop timer */
829 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
830 void VideoState::schedRefresh(int delay)
832 SDL_AddTimer(delay, sdl_refresh_timer_cb, this);
835 /* Called by VideoState::refreshTimer to display the next video frame. */
836 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
838 Picture *vp = &mPictQ[mPictQRead];
840 if(!vp->mImage)
841 return;
843 float aspect_ratio;
844 int win_w, win_h;
845 int w, h, x, y;
847 if(mCodecCtx->sample_aspect_ratio.num == 0)
848 aspect_ratio = 0.0f;
849 else
851 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
852 mCodecCtx->height;
854 if(aspect_ratio <= 0.0f)
855 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
857 SDL_GetWindowSize(screen, &win_w, &win_h);
858 h = win_h;
859 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
860 if(w > win_w)
862 w = win_w;
863 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
865 x = (win_w - w) / 2;
866 y = (win_h - h) / 2;
868 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
869 SDL_Rect dst_rect{ x, y, w, h };
870 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
871 SDL_RenderPresent(renderer);
874 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
875 * was created. It handles the display of the next decoded video frame (if not
876 * falling behind), and sets up the timer for the following video frame.
878 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
880 if(!mStream)
882 if(mEOS)
884 mFinalUpdate = true;
885 std::unique_lock<std::mutex>(mPictQMutex).unlock();
886 mPictQCond.notify_all();
887 return;
889 schedRefresh(100);
890 return;
893 std::unique_lock<std::mutex> lock(mPictQMutex);
894 retry:
895 if(mPictQSize == 0)
897 if(mEOS)
898 mFinalUpdate = true;
899 else
900 schedRefresh(1);
901 lock.unlock();
902 mPictQCond.notify_all();
903 return;
906 Picture *vp = &mPictQ[mPictQRead];
907 mCurrentPts = vp->mPts;
908 mCurrentPtsTime = av_gettime();
910 /* Get delay using the frame pts and the pts from last frame. */
911 double delay = vp->mPts - mFrameLastPts;
912 if(delay <= 0 || delay >= 1.0)
914 /* If incorrect delay, use previous one. */
915 delay = mFrameLastDelay;
917 /* Save for next frame. */
918 mFrameLastDelay = delay;
919 mFrameLastPts = vp->mPts;
921 /* Update delay to sync to clock if not master source. */
922 if(mMovie->mAVSyncType != AV_SYNC_VIDEO_MASTER)
924 double ref_clock = mMovie->getMasterClock();
925 double diff = vp->mPts - ref_clock;
927 /* Skip or repeat the frame. Take delay into account. */
928 double sync_threshold = std::min(delay, AV_SYNC_THRESHOLD);
929 if(fabs(diff) < AV_NOSYNC_THRESHOLD)
931 if(diff <= -sync_threshold)
932 delay = 0;
933 else if(diff >= sync_threshold)
934 delay *= 2.0;
938 mFrameTimer += delay;
939 /* Compute the REAL delay. */
940 double actual_delay = mFrameTimer - (av_gettime() / 1000000.0);
941 if(!(actual_delay >= 0.010))
943 /* We don't have time to handle this picture, just skip to the next one. */
944 mPictQRead = (mPictQRead+1)%mPictQ.size();
945 mPictQSize--;
946 goto retry;
948 schedRefresh((int)(actual_delay*1000.0 + 0.5));
950 /* Show the picture! */
951 display(screen, renderer);
953 /* Update queue for next picture. */
954 mPictQRead = (mPictQRead+1)%mPictQ.size();
955 mPictQSize--;
956 lock.unlock();
957 mPictQCond.notify_all();
960 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
961 * main thread where the renderer was created.
963 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
965 Picture *vp = &mPictQ[mPictQWrite];
966 bool fmt_updated = false;
968 /* allocate or resize the buffer! */
969 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
971 fmt_updated = true;
972 if(vp->mImage)
973 SDL_DestroyTexture(vp->mImage);
974 vp->mImage = SDL_CreateTexture(
975 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
976 mCodecCtx->coded_width, mCodecCtx->coded_height
978 if(!vp->mImage)
979 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
980 vp->mWidth = mCodecCtx->width;
981 vp->mHeight = mCodecCtx->height;
983 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
985 /* For the first update, set the window size to the video size. */
986 mFirstUpdate = false;
988 int w = vp->mWidth;
989 int h = vp->mHeight;
990 if(mCodecCtx->sample_aspect_ratio.den != 0)
992 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
993 if(aspect_ratio >= 1.0)
994 w = (int)(w*aspect_ratio + 0.5);
995 else if(aspect_ratio > 0.0)
996 h = (int)(h/aspect_ratio + 0.5);
998 SDL_SetWindowSize(screen, w, h);
1002 if(vp->mImage)
1004 AVFrame *frame = mDecodedFrame;
1005 void *pixels = nullptr;
1006 int pitch = 0;
1008 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1009 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1010 frame->data[0], frame->linesize[0],
1011 frame->data[1], frame->linesize[1],
1012 frame->data[2], frame->linesize[2]
1014 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1015 std::cerr<< "Failed to lock texture" <<std::endl;
1016 else
1018 // Convert the image into YUV format that SDL uses
1019 int coded_w = mCodecCtx->coded_width;
1020 int coded_h = mCodecCtx->coded_height;
1021 int w = mCodecCtx->width;
1022 int h = mCodecCtx->height;
1023 if(!mSwscaleCtx || fmt_updated)
1025 sws_freeContext(mSwscaleCtx);
1026 mSwscaleCtx = sws_getContext(
1027 w, h, mCodecCtx->pix_fmt,
1028 w, h, AV_PIX_FMT_YUV420P, 0,
1029 nullptr, nullptr, nullptr
1033 /* point pict at the queue */
1034 uint8_t *pict_data[3];
1035 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1036 pict_data[1] = pict_data[0] + coded_w*coded_h;
1037 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1039 int pict_linesize[3];
1040 pict_linesize[0] = pitch;
1041 pict_linesize[1] = pitch / 2;
1042 pict_linesize[2] = pitch / 2;
1044 sws_scale(mSwscaleCtx, (const uint8_t**)frame->data,
1045 frame->linesize, 0, h, pict_data, pict_linesize);
1046 SDL_UnlockTexture(vp->mImage);
1050 std::unique_lock<std::mutex> lock(mPictQMutex);
1051 vp->mUpdated = true;
1052 lock.unlock();
1053 mPictQCond.notify_one();
1056 int VideoState::queuePicture(double pts)
1058 /* Wait until we have space for a new pic */
1059 std::unique_lock<std::mutex> lock(mPictQMutex);
1060 while(mPictQSize >= mPictQ.size() && !mMovie->mQuit.load())
1061 mPictQCond.wait(lock);
1062 lock.unlock();
1064 if(mMovie->mQuit.load())
1065 return -1;
1067 Picture *vp = &mPictQ[mPictQWrite];
1069 /* We have to create/update the picture in the main thread */
1070 vp->mUpdated = false;
1071 SDL_Event evt{};
1072 evt.user.type = FF_UPDATE_EVENT;
1073 evt.user.data1 = this;
1074 SDL_PushEvent(&evt);
1076 /* Wait until the picture is updated. */
1077 lock.lock();
1078 while(!vp->mUpdated && !mMovie->mQuit.load())
1079 mPictQCond.wait(lock);
1080 if(mMovie->mQuit.load())
1081 return -1;
1082 vp->mPts = pts;
1084 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1085 mPictQSize++;
1086 lock.unlock();
1088 return 0;
1091 double VideoState::synchronize(double pts)
1093 double frame_delay;
1095 if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
1096 pts = mClock;
1097 else /* if we have pts, set video clock to it */
1098 mClock = pts;
1100 /* update the video clock */
1101 frame_delay = av_q2d(mCodecCtx->time_base);
1102 /* if we are repeating a frame, adjust clock accordingly */
1103 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1104 mClock += frame_delay;
1105 return pts;
1108 int VideoState::handler()
1110 mDecodedFrame = av_frame_alloc();
1111 while(!mMovie->mQuit)
1113 while(!mMovie->mQuit)
1115 AVPacket packet{};
1116 if(mQueue.peek(&packet, mMovie->mQuit) <= 0)
1117 goto finish;
1119 int ret = avcodec_send_packet(mCodecCtx, &packet);
1120 if(ret != AVERROR(EAGAIN))
1122 if(ret < 0)
1123 std::cerr<< "Failed to send encoded packet: 0x"<<std::hex<<ret<<std::dec <<std::endl;
1124 mQueue.pop();
1126 av_packet_unref(&packet);
1127 if(ret == 0 || ret == AVERROR(EAGAIN))
1128 break;
1131 /* Decode video frame */
1132 int ret = avcodec_receive_frame(mCodecCtx, mDecodedFrame);
1133 if(ret == AVERROR(EAGAIN))
1134 continue;
1135 if(ret < 0)
1137 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1138 break;
1141 double pts = synchronize(
1142 av_q2d(mStream->time_base) * av_frame_get_best_effort_timestamp(mDecodedFrame)
1144 if(queuePicture(pts) < 0)
1145 break;
1146 av_frame_unref(mDecodedFrame);
1148 finish:
1149 mEOS = true;
1150 av_frame_free(&mDecodedFrame);
1152 std::unique_lock<std::mutex> lock(mPictQMutex);
1153 if(mMovie->mQuit)
1155 mPictQRead = 0;
1156 mPictQWrite = 0;
1157 mPictQSize = 0;
1159 while(!mFinalUpdate)
1160 mPictQCond.wait(lock);
1162 return 0;
1166 int MovieState::decode_interrupt_cb(void *ctx)
1168 return reinterpret_cast<MovieState*>(ctx)->mQuit;
1171 bool MovieState::prepare()
1173 mFormatCtx = avformat_alloc_context();
1174 mFormatCtx->interrupt_callback.callback = decode_interrupt_cb;
1175 mFormatCtx->interrupt_callback.opaque = this;
1176 if(avio_open2(&mFormatCtx->pb, mFilename.c_str(), AVIO_FLAG_READ,
1177 &mFormatCtx->interrupt_callback, nullptr))
1179 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1180 return false;
1183 /* Open movie file */
1184 if(avformat_open_input(&mFormatCtx, mFilename.c_str(), nullptr, nullptr) != 0)
1186 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1187 return false;
1190 /* Retrieve stream information */
1191 if(avformat_find_stream_info(mFormatCtx, nullptr) < 0)
1193 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1194 return false;
1197 mVideo.schedRefresh(40);
1199 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1200 return true;
1203 void MovieState::setTitle(SDL_Window *window)
1205 auto pos1 = mFilename.rfind('/');
1206 auto pos2 = mFilename.rfind('\\');
1207 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1208 (pos2 == std::string::npos) ? pos1 :
1209 std::max(pos1, pos2)) + 1;
1210 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1213 double MovieState::getClock()
1215 return (av_gettime()-mExternalClockBase) / 1000000.0;
1218 double MovieState::getMasterClock()
1220 if(mAVSyncType == AV_SYNC_VIDEO_MASTER)
1221 return mVideo.getClock();
1222 if(mAVSyncType == AV_SYNC_AUDIO_MASTER)
1223 return mAudio.getClock();
1224 return getClock();
1227 int MovieState::streamComponentOpen(int stream_index)
1229 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1230 return -1;
1232 /* Get a pointer to the codec context for the stream, and open the
1233 * associated codec.
1235 AVCodecContext *avctx = avcodec_alloc_context3(nullptr);
1236 if(!avctx) return -1;
1238 if(avcodec_parameters_to_context(avctx, mFormatCtx->streams[stream_index]->codecpar))
1240 avcodec_free_context(&avctx);
1241 return -1;
1244 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1245 if(!codec || avcodec_open2(avctx, codec, nullptr) < 0)
1247 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1248 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1249 avcodec_free_context(&avctx);
1250 return -1;
1253 /* Initialize and start the media type handler */
1254 switch(avctx->codec_type)
1256 case AVMEDIA_TYPE_AUDIO:
1257 mAudioStream = stream_index;
1258 mAudio.mStream = mFormatCtx->streams[stream_index];
1259 mAudio.mCodecCtx = avctx;
1261 /* Averaging filter for audio sync */
1262 mAudio.mDiff.AvgCoeff = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1263 /* Correct audio only if larger error than this */
1264 mAudio.mDiff.Threshold = 0.050/* 50 ms */;
1266 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1267 break;
1269 case AVMEDIA_TYPE_VIDEO:
1270 mVideoStream = stream_index;
1271 mVideo.mStream = mFormatCtx->streams[stream_index];
1272 mVideo.mCodecCtx = avctx;
1274 mVideo.mCurrentPtsTime = av_gettime();
1275 mVideo.mFrameTimer = (double)mVideo.mCurrentPtsTime / 1000000.0;
1276 mVideo.mFrameLastDelay = 40e-3;
1278 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1279 break;
1281 default:
1282 avcodec_free_context(&avctx);
1283 break;
1286 return 0;
1289 int MovieState::parse_handler()
1291 int video_index = -1;
1292 int audio_index = -1;
1294 mVideoStream = -1;
1295 mAudioStream = -1;
1297 /* Dump information about file onto standard error */
1298 av_dump_format(mFormatCtx, 0, mFilename.c_str(), 0);
1300 /* Find the first video and audio streams */
1301 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1303 if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1304 video_index = i;
1305 else if(mFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1306 audio_index = i;
1308 /* Start the external clock in 50ms, to give the audio and video
1309 * components time to start without needing to skip ahead.
1311 mExternalClockBase = av_gettime() + 50000;
1312 if(audio_index >= 0)
1313 streamComponentOpen(audio_index);
1314 if(video_index >= 0)
1315 streamComponentOpen(video_index);
1317 if(mVideoStream < 0 && mAudioStream < 0)
1319 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1320 mQuit = true;
1323 /* Main packet handling loop */
1324 while(!mQuit.load())
1326 if(mAudio.mQueue.mTotalSize + mVideo.mQueue.mTotalSize >= MAX_QUEUE_SIZE)
1328 std::this_thread::sleep_for(std::chrono::milliseconds(10));
1329 continue;
1332 AVPacket packet;
1333 if(av_read_frame(mFormatCtx, &packet) < 0)
1334 break;
1336 /* Copy the packet in the queue it's meant for. */
1337 if(packet.stream_index == mVideoStream)
1338 mVideo.mQueue.put(&packet);
1339 else if(packet.stream_index == mAudioStream)
1340 mAudio.mQueue.put(&packet);
1341 av_packet_unref(&packet);
1343 mVideo.mQueue.finish();
1344 mAudio.mQueue.finish();
1346 /* all done - wait for it */
1347 if(mVideoThread.joinable())
1348 mVideoThread.join();
1349 if(mAudioThread.joinable())
1350 mAudioThread.join();
1352 mVideo.mEOS = true;
1353 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1354 while(!mVideo.mFinalUpdate)
1355 mVideo.mPictQCond.wait(lock);
1356 lock.unlock();
1358 SDL_Event evt{};
1359 evt.user.type = FF_MOVIE_DONE_EVENT;
1360 SDL_PushEvent(&evt);
1362 return 0;
1365 } // namespace
1368 int main(int argc, char *argv[])
1370 std::unique_ptr<MovieState> movState;
1372 if(argc < 2)
1374 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1375 return 1;
1377 /* Register all formats and codecs */
1378 av_register_all();
1379 /* Initialize networking protocols */
1380 avformat_network_init();
1382 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1384 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1385 return 1;
1388 /* Make a window to put our video */
1389 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1390 if(!screen)
1392 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1393 return 1;
1395 /* Make a renderer to handle the texture image surface and rendering. */
1396 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
1397 if(renderer)
1399 SDL_RendererInfo rinf{};
1400 bool ok = false;
1402 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1403 * software renderer. */
1404 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1406 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1407 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1409 if(!ok)
1411 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1412 SDL_DestroyRenderer(renderer);
1413 renderer = nullptr;
1416 if(!renderer)
1417 renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
1418 if(!renderer)
1420 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1421 return 1;
1423 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1424 SDL_RenderFillRect(renderer, nullptr);
1425 SDL_RenderPresent(renderer);
1427 /* Open an audio device */
1428 int fileidx = 1;
1429 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1431 ALCdevice *dev = NULL;
1432 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1434 fileidx = 3;
1435 dev = alcOpenDevice(argv[2]);
1436 if(dev) return dev;
1437 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1439 return alcOpenDevice(nullptr);
1440 }();
1441 ALCcontext *context = alcCreateContext(device, nullptr);
1442 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1444 std::cerr<< "Failed to set up audio device" <<std::endl;
1445 if(context)
1446 alcDestroyContext(context);
1447 return 1;
1450 const ALCchar *name = nullptr;
1451 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1452 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1453 if(!name || alcGetError(device) != AL_NO_ERROR)
1454 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1455 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1457 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1459 ++fileidx;
1460 do_direct_out = true;
1463 while(fileidx < argc && !movState)
1465 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1466 if(!movState->prepare()) movState = nullptr;
1468 if(!movState)
1470 std::cerr<< "Could not start a video" <<std::endl;
1471 return 1;
1473 movState->setTitle(screen);
1475 /* Default to going to the next movie at the end of one. */
1476 enum class EomAction {
1477 Next, Quit
1478 } eom_action = EomAction::Next;
1479 SDL_Event event;
1480 while(SDL_WaitEvent(&event) == 1)
1482 switch(event.type)
1484 case SDL_KEYDOWN:
1485 switch(event.key.keysym.sym)
1487 case SDLK_ESCAPE:
1488 movState->mQuit = true;
1489 eom_action = EomAction::Quit;
1490 break;
1492 case SDLK_n:
1493 movState->mQuit = true;
1494 eom_action = EomAction::Next;
1495 break;
1497 default:
1498 break;
1500 break;
1502 case SDL_WINDOWEVENT:
1503 switch(event.window.event)
1505 case SDL_WINDOWEVENT_RESIZED:
1506 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1507 SDL_RenderFillRect(renderer, nullptr);
1508 break;
1510 default:
1511 break;
1513 break;
1515 case SDL_QUIT:
1516 movState->mQuit = true;
1517 eom_action = EomAction::Quit;
1518 break;
1520 case FF_UPDATE_EVENT:
1521 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1522 screen, renderer
1524 break;
1526 case FF_REFRESH_EVENT:
1527 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1528 screen, renderer
1530 break;
1532 case FF_MOVIE_DONE_EVENT:
1533 if(eom_action != EomAction::Quit)
1535 movState = nullptr;
1536 while(fileidx < argc && !movState)
1538 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1539 if(!movState->prepare()) movState = nullptr;
1541 if(movState)
1543 movState->setTitle(screen);
1544 break;
1548 /* Nothing more to play. Shut everything down and quit. */
1549 movState = nullptr;
1551 alcMakeContextCurrent(nullptr);
1552 alcDestroyContext(context);
1553 alcCloseDevice(device);
1555 SDL_DestroyRenderer(renderer);
1556 renderer = nullptr;
1557 SDL_DestroyWindow(screen);
1558 screen = nullptr;
1560 SDL_Quit();
1561 exit(0);
1563 default:
1564 break;
1568 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1569 return 1;