Finalize ALC_SOFT_device_clock
[openal-soft.git] / examples / alffplay.cpp
blobb57de5e48f69635094ad11bfe1eb402def48a9fa
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <vector>
18 #include <mutex>
19 #include <deque>
20 #include <array>
22 extern "C" {
23 #include "libavcodec/avcodec.h"
24 #include "libavformat/avformat.h"
25 #include "libavformat/avio.h"
26 #include "libavutil/time.h"
27 #include "libavutil/pixfmt.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/channel_layout.h"
30 #include "libswscale/swscale.h"
31 #include "libswresample/swresample.h"
34 #include "SDL.h"
36 #include "AL/alc.h"
37 #include "AL/al.h"
38 #include "AL/alext.h"
40 extern "C" {
41 #ifndef ALC_SOFT_device_clock
42 #define ALC_SOFT_device_clock 1
43 typedef int64_t ALCint64SOFT;
44 typedef uint64_t ALCuint64SOFT;
45 #define ALC_DEVICE_CLOCK_SOFT 0x1600
46 #define ALC_DEVICE_LATENCY_SOFT 0x1601
47 #define ALC_DEVICE_CLOCK_LATENCY_SOFT 0x1602
48 #define AL_SAMPLE_OFFSET_CLOCK_SOFT 0x1202
49 #define AL_SEC_OFFSET_CLOCK_SOFT 0x1203
50 typedef void (ALC_APIENTRY*LPALCGETINTEGER64VSOFT)(ALCdevice *device, ALCenum pname, ALsizei size, ALCint64SOFT *values);
51 #endif
52 } // extern "C"
54 namespace {
56 using nanoseconds = std::chrono::nanoseconds;
57 using microseconds = std::chrono::microseconds;
58 using milliseconds = std::chrono::milliseconds;
59 using seconds = std::chrono::seconds;
60 using seconds_d64 = std::chrono::duration<double>;
62 const std::string AppName("alffplay");
64 bool EnableDirectOut = false;
65 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
66 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
68 const seconds AVNoSyncThreshold(10);
70 const milliseconds VideoSyncThreshold(10);
71 #define VIDEO_PICTURE_QUEUE_SIZE 16
73 const seconds_d64 AudioSyncThreshold(0.03);
74 const milliseconds AudioSampleCorrectionMax(50);
75 /* Averaging filter coefficient for audio sync. */
76 #define AUDIO_DIFF_AVG_NB 20
77 const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
78 /* Per-buffer size, in time */
79 const milliseconds AudioBufferTime(20);
80 /* Buffer total size, in time (should be divisible by the buffer time) */
81 const milliseconds AudioBufferTotalTime(800);
83 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
85 enum {
86 FF_UPDATE_EVENT = SDL_USEREVENT,
87 FF_REFRESH_EVENT,
88 FF_MOVIE_DONE_EVENT
91 enum class SyncMaster {
92 Audio,
93 Video,
94 External,
96 Default = External
100 inline microseconds get_avtime()
101 { return microseconds(av_gettime()); }
103 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
104 struct AVIOContextDeleter {
105 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
107 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
109 struct AVFormatCtxDeleter {
110 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
112 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
114 struct AVCodecCtxDeleter {
115 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
117 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
119 struct AVFrameDeleter {
120 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
122 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
124 struct SwrContextDeleter {
125 void operator()(SwrContext *ptr) { swr_free(&ptr); }
127 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
129 struct SwsContextDeleter {
130 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
132 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
135 class PacketQueue {
136 std::deque<AVPacket> mPackets;
137 size_t mTotalSize{0};
139 public:
140 ~PacketQueue() { clear(); }
142 bool empty() const noexcept { return mPackets.empty(); }
143 size_t totalSize() const noexcept { return mTotalSize; }
145 void put(const AVPacket *pkt)
147 mPackets.push_back(AVPacket{});
148 if(av_packet_ref(&mPackets.back(), pkt) != 0)
149 mPackets.pop_back();
150 else
151 mTotalSize += mPackets.back().size;
154 AVPacket *front() noexcept
155 { return &mPackets.front(); }
157 void pop()
159 AVPacket *pkt = &mPackets.front();
160 mTotalSize -= pkt->size;
161 av_packet_unref(pkt);
162 mPackets.pop_front();
165 void clear()
167 for(AVPacket &pkt : mPackets)
168 av_packet_unref(&pkt);
169 mPackets.clear();
170 mTotalSize = 0;
175 struct MovieState;
177 struct AudioState {
178 MovieState &mMovie;
180 AVStream *mStream{nullptr};
181 AVCodecCtxPtr mCodecCtx;
183 std::mutex mQueueMtx;
184 std::condition_variable mQueueCond;
186 /* Used for clock difference average computation */
187 seconds_d64 mClockDiffAvg{0};
189 /* Time of the next sample to be buffered */
190 nanoseconds mCurrentPts{0};
192 /* Device clock time that the stream started at. */
193 nanoseconds mDeviceStartTime{nanoseconds::min()};
195 /* Decompressed sample frame, and swresample context for conversion */
196 AVFramePtr mDecodedFrame;
197 SwrContextPtr mSwresCtx;
199 /* Conversion format, for what gets fed to OpenAL */
200 int mDstChanLayout{0};
201 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
203 /* Storage of converted samples */
204 uint8_t *mSamples{nullptr};
205 int mSamplesLen{0}; /* In samples */
206 int mSamplesPos{0};
207 int mSamplesMax{0};
209 /* OpenAL format */
210 ALenum mFormat{AL_NONE};
211 ALsizei mFrameSize{0};
213 std::mutex mSrcMutex;
214 ALuint mSource{0};
215 std::vector<ALuint> mBuffers;
216 ALsizei mBufferIdx{0};
218 AudioState(MovieState &movie) : mMovie(movie)
220 ~AudioState()
222 if(mSource)
223 alDeleteSources(1, &mSource);
224 if(!mBuffers.empty())
225 alDeleteBuffers(mBuffers.size(), mBuffers.data());
227 av_freep(&mSamples);
230 nanoseconds getClockNoLock();
231 nanoseconds getClock()
233 std::lock_guard<std::mutex> lock(mSrcMutex);
234 return getClockNoLock();
237 bool isBufferFilled();
238 void startPlayback();
240 int getSync();
241 int decodeFrame();
242 int readAudio(uint8_t *samples, int length);
244 int handler();
247 struct VideoState {
248 MovieState &mMovie;
250 AVStream *mStream{nullptr};
251 AVCodecCtxPtr mCodecCtx;
253 std::mutex mQueueMtx;
254 std::condition_variable mQueueCond;
256 nanoseconds mClock{0};
257 nanoseconds mFrameTimer{0};
258 nanoseconds mFrameLastPts{0};
259 nanoseconds mFrameLastDelay{0};
260 nanoseconds mCurrentPts{0};
261 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
262 microseconds mCurrentPtsTime{0};
264 /* Decompressed video frame, and swscale context for conversion */
265 AVFramePtr mDecodedFrame;
266 SwsContextPtr mSwscaleCtx;
268 struct Picture {
269 SDL_Texture *mImage{nullptr};
270 int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
271 std::atomic<bool> mUpdated{false};
272 nanoseconds mPts{0};
274 ~Picture()
276 if(mImage)
277 SDL_DestroyTexture(mImage);
278 mImage = nullptr;
281 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
282 size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
283 std::mutex mPictQMutex;
284 std::condition_variable mPictQCond;
285 bool mFirstUpdate{true};
286 std::atomic<bool> mEOS{false};
287 std::atomic<bool> mFinalUpdate{false};
289 VideoState(MovieState &movie) : mMovie(movie) { }
291 nanoseconds getClock();
292 bool isBufferFilled();
294 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
295 void schedRefresh(milliseconds delay);
296 void display(SDL_Window *screen, SDL_Renderer *renderer);
297 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
298 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
299 int queuePicture(nanoseconds pts);
300 int handler();
303 struct MovieState {
304 AVIOContextPtr mIOContext;
305 AVFormatCtxPtr mFormatCtx;
307 SyncMaster mAVSyncType{SyncMaster::Default};
309 microseconds mClockBase{0};
310 std::atomic<bool> mPlaying{false};
312 std::mutex mSendMtx;
313 std::condition_variable mSendCond;
314 /* NOTE: false/clear = need data, true/set = no data needed */
315 std::atomic_flag mSendDataGood;
317 std::atomic<bool> mQuit{false};
319 AudioState mAudio;
320 VideoState mVideo;
322 std::thread mParseThread;
323 std::thread mAudioThread;
324 std::thread mVideoThread;
326 std::string mFilename;
328 MovieState(std::string fname)
329 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
331 ~MovieState()
333 mQuit = true;
334 if(mParseThread.joinable())
335 mParseThread.join();
338 static int decode_interrupt_cb(void *ctx);
339 bool prepare();
340 void setTitle(SDL_Window *window);
342 nanoseconds getClock();
344 nanoseconds getMasterClock();
346 nanoseconds getDuration();
348 int streamComponentOpen(int stream_index);
349 int parse_handler();
353 nanoseconds AudioState::getClockNoLock()
355 // The audio clock is the timestamp of the sample currently being heard.
356 if(alcGetInteger64vSOFT)
358 // If device start time = min, we aren't playing yet.
359 if(mDeviceStartTime == nanoseconds::min())
360 return nanoseconds::zero();
362 // Get the current device clock time and latency.
363 auto device = alcGetContextsDevice(alcGetCurrentContext());
364 ALCint64SOFT devtimes[2] = {0,0};
365 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
366 auto latency = nanoseconds(devtimes[1]);
367 auto device_time = nanoseconds(devtimes[0]);
369 // The clock is simply the current device time relative to the recorded
370 // start time. We can also subtract the latency to get more a accurate
371 // position of where the audio device actually is in the output stream.
372 return device_time - mDeviceStartTime - latency;
375 /* The source-based clock is based on 4 components:
376 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
377 * 2 - The length of the source's buffer queue
378 * (AudioBufferTime*AL_BUFFERS_QUEUED)
379 * 3 - The offset OpenAL is currently at in the source (the first value
380 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
381 * 4 - The latency between OpenAL and the DAC (the second value from
382 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
384 * Subtracting the length of the source queue from the next sample's
385 * timestamp gives the timestamp of the sample at the start of the source
386 * queue. Adding the source offset to that results in the timestamp for the
387 * sample at OpenAL's current position, and subtracting the source latency
388 * from that gives the timestamp of the sample currently at the DAC.
390 nanoseconds pts = mCurrentPts;
391 if(mSource)
393 ALint64SOFT offset[2];
394 ALint queued;
395 ALint status;
397 /* NOTE: The source state must be checked last, in case an underrun
398 * occurs and the source stops between retrieving the offset+latency
399 * and getting the state. */
400 if(alGetSourcei64vSOFT)
401 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
402 else
404 ALint ioffset;
405 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
406 offset[0] = (ALint64SOFT)ioffset << 32;
407 offset[1] = 0;
409 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
410 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
412 /* If the source is AL_STOPPED, then there was an underrun and all
413 * buffers are processed, so ignore the source queue. The audio thread
414 * will put the source into an AL_INITIAL state and clear the queue
415 * when it starts recovery. */
416 if(status != AL_STOPPED)
418 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
420 pts -= AudioBufferTime*queued;
421 pts += std::chrono::duration_cast<nanoseconds>(
422 fixed32(offset[0] / mCodecCtx->sample_rate)
425 /* Don't offset by the latency if the source isn't playing. */
426 if(status == AL_PLAYING)
427 pts -= nanoseconds(offset[1]);
430 return std::max(pts, nanoseconds::zero());
433 bool AudioState::isBufferFilled()
435 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
436 * does the source gen. So when we're able to grab the lock and the source
437 * is valid, the queue must be full.
439 std::lock_guard<std::mutex> lock(mSrcMutex);
440 return mSource != 0;
443 void AudioState::startPlayback()
445 alSourcePlay(mSource);
446 if(alcGetInteger64vSOFT)
448 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
450 // Subtract the total buffer queue time from the current pts to get the
451 // pts of the start of the queue.
452 nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
453 int64_t srctimes[2]={0,0};
454 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
455 auto device_time = nanoseconds(srctimes[1]);
456 auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
457 mCodecCtx->sample_rate;
459 // The mixer may have ticked and incremented the device time and sample
460 // offset, so subtract the source offset from the device time to get
461 // the device time the source started at. Also subtract startpts to get
462 // the device time the stream would have started at to reach where it
463 // is now.
464 mDeviceStartTime = device_time - src_offset - startpts;
468 int AudioState::getSync()
470 if(mMovie.mAVSyncType == SyncMaster::Audio)
471 return 0;
473 auto ref_clock = mMovie.getMasterClock();
474 auto diff = ref_clock - getClockNoLock();
476 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
478 /* Difference is TOO big; reset accumulated average */
479 mClockDiffAvg = seconds_d64::zero();
480 return 0;
483 /* Accumulate the diffs */
484 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
485 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
486 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
487 return 0;
489 /* Constrain the per-update difference to avoid exceedingly large skips */
490 diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
491 AudioSampleCorrectionMax);
492 return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
495 int AudioState::decodeFrame()
497 while(!mMovie.mQuit.load(std::memory_order_relaxed))
499 std::unique_lock<std::mutex> lock(mQueueMtx);
500 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
501 if(ret == AVERROR(EAGAIN))
503 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
504 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
505 mMovie.mSendCond.notify_one();
506 do {
507 mQueueCond.wait(lock);
508 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
509 } while(ret == AVERROR(EAGAIN));
511 lock.unlock();
512 if(ret == AVERROR_EOF) break;
513 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
514 mMovie.mSendCond.notify_one();
515 if(ret < 0)
517 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
518 return 0;
521 if(mDecodedFrame->nb_samples <= 0)
523 av_frame_unref(mDecodedFrame.get());
524 continue;
527 /* If provided, update w/ pts */
528 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
529 mCurrentPts = std::chrono::duration_cast<nanoseconds>(
530 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
533 if(mDecodedFrame->nb_samples > mSamplesMax)
535 av_freep(&mSamples);
536 av_samples_alloc(
537 &mSamples, nullptr, mCodecCtx->channels,
538 mDecodedFrame->nb_samples, mDstSampleFmt, 0
540 mSamplesMax = mDecodedFrame->nb_samples;
542 /* Return the amount of sample frames converted */
543 int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
544 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
547 av_frame_unref(mDecodedFrame.get());
548 return data_size;
551 return 0;
554 /* Duplicates the sample at in to out, count times. The frame size is a
555 * multiple of the template type size.
557 template<typename T>
558 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
560 const T *sample = reinterpret_cast<const T*>(in);
561 T *dst = reinterpret_cast<T*>(out);
562 if(frame_size == sizeof(T))
563 std::fill_n(dst, count, *sample);
564 else
566 /* NOTE: frame_size is a multiple of sizeof(T). */
567 int type_mult = frame_size / sizeof(T);
568 int i = 0;
569 std::generate_n(dst, count*type_mult,
570 [sample,type_mult,&i]() -> T
572 T ret = sample[i];
573 i = (i+1)%type_mult;
574 return ret;
581 int AudioState::readAudio(uint8_t *samples, int length)
583 int sample_skip = getSync();
584 int audio_size = 0;
586 /* Read the next chunk of data, refill the buffer, and queue it
587 * on the source */
588 length /= mFrameSize;
589 while(audio_size < length)
591 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
593 int frame_len = decodeFrame();
594 if(frame_len <= 0) break;
596 mSamplesLen = frame_len;
597 mSamplesPos = std::min(mSamplesLen, sample_skip);
598 sample_skip -= mSamplesPos;
600 // Adjust the device start time and current pts by the amount we're
601 // skipping/duplicating, so that the clock remains correct for the
602 // current stream position.
603 auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
604 mDeviceStartTime -= skip;
605 mCurrentPts += skip;
606 continue;
609 int rem = length - audio_size;
610 if(mSamplesPos >= 0)
612 int len = mSamplesLen - mSamplesPos;
613 if(rem > len) rem = len;
614 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
616 else
618 rem = std::min(rem, -mSamplesPos);
620 /* Add samples by copying the first sample */
621 if((mFrameSize&7) == 0)
622 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
623 else if((mFrameSize&3) == 0)
624 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
625 else if((mFrameSize&1) == 0)
626 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
627 else
628 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
631 mSamplesPos += rem;
632 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
633 samples += rem*mFrameSize;
634 audio_size += rem;
637 if(audio_size < length && audio_size > 0)
639 int rem = length - audio_size;
640 std::fill_n(samples, rem*mFrameSize,
641 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
642 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
643 audio_size += rem;
646 return audio_size * mFrameSize;
650 int AudioState::handler()
652 std::unique_lock<std::mutex> lock(mSrcMutex);
653 ALenum fmt;
655 /* Find a suitable format for OpenAL. */
656 mDstChanLayout = 0;
657 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
659 mDstSampleFmt = AV_SAMPLE_FMT_U8;
660 mFrameSize = 1;
661 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
662 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
663 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
665 mDstChanLayout = mCodecCtx->channel_layout;
666 mFrameSize *= 8;
667 mFormat = fmt;
669 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
670 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
671 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
672 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
674 mDstChanLayout = mCodecCtx->channel_layout;
675 mFrameSize *= 6;
676 mFormat = fmt;
678 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
680 mDstChanLayout = mCodecCtx->channel_layout;
681 mFrameSize *= 1;
682 mFormat = AL_FORMAT_MONO8;
684 if(!mDstChanLayout)
686 mDstChanLayout = AV_CH_LAYOUT_STEREO;
687 mFrameSize *= 2;
688 mFormat = AL_FORMAT_STEREO8;
691 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
692 alIsExtensionPresent("AL_EXT_FLOAT32"))
694 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
695 mFrameSize = 4;
696 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
697 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
698 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
700 mDstChanLayout = mCodecCtx->channel_layout;
701 mFrameSize *= 8;
702 mFormat = fmt;
704 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
705 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
706 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
707 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
709 mDstChanLayout = mCodecCtx->channel_layout;
710 mFrameSize *= 6;
711 mFormat = fmt;
713 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
715 mDstChanLayout = mCodecCtx->channel_layout;
716 mFrameSize *= 1;
717 mFormat = AL_FORMAT_MONO_FLOAT32;
719 if(!mDstChanLayout)
721 mDstChanLayout = AV_CH_LAYOUT_STEREO;
722 mFrameSize *= 2;
723 mFormat = AL_FORMAT_STEREO_FLOAT32;
726 if(!mDstChanLayout)
728 mDstSampleFmt = AV_SAMPLE_FMT_S16;
729 mFrameSize = 2;
730 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
731 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
732 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
734 mDstChanLayout = mCodecCtx->channel_layout;
735 mFrameSize *= 8;
736 mFormat = fmt;
738 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
739 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
740 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
741 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
743 mDstChanLayout = mCodecCtx->channel_layout;
744 mFrameSize *= 6;
745 mFormat = fmt;
747 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
749 mDstChanLayout = mCodecCtx->channel_layout;
750 mFrameSize *= 1;
751 mFormat = AL_FORMAT_MONO16;
753 if(!mDstChanLayout)
755 mDstChanLayout = AV_CH_LAYOUT_STEREO;
756 mFrameSize *= 2;
757 mFormat = AL_FORMAT_STEREO16;
760 ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
761 mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
762 void *samples = av_malloc(buffer_len);
764 mSamples = NULL;
765 mSamplesMax = 0;
766 mSamplesPos = 0;
767 mSamplesLen = 0;
769 mDecodedFrame.reset(av_frame_alloc());
770 if(!mDecodedFrame)
772 std::cerr<< "Failed to allocate audio frame" <<std::endl;
773 goto finish;
776 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
777 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
778 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
779 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
780 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
781 0, nullptr
783 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
785 std::cerr<< "Failed to initialize audio converter" <<std::endl;
786 goto finish;
789 mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
790 alGenBuffers(mBuffers.size(), mBuffers.data());
791 alGenSources(1, &mSource);
793 if(EnableDirectOut)
794 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
796 while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed))
798 /* First remove any processed buffers. */
799 ALint processed;
800 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
801 while(processed > 0)
803 std::array<ALuint,4> bids;
804 alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
805 bids.data());
806 processed -= std::min<ALsizei>(bids.size(), processed);
809 /* Refill the buffer queue. */
810 ALint queued;
811 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
812 while((ALuint)queued < mBuffers.size())
814 int audio_size;
816 /* Read the next chunk of data, fill the buffer, and queue it on
817 * the source */
818 audio_size = readAudio(reinterpret_cast<uint8_t*>(samples), buffer_len);
819 if(audio_size <= 0) break;
821 ALuint bufid = mBuffers[mBufferIdx++];
822 mBufferIdx %= mBuffers.size();
824 alBufferData(bufid, mFormat, samples, audio_size, mCodecCtx->sample_rate);
825 alSourceQueueBuffers(mSource, 1, &bufid);
826 queued++;
828 if(queued == 0)
829 break;
831 /* Check that the source is playing. */
832 ALint state;
833 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
834 if(state == AL_STOPPED)
836 /* AL_STOPPED means there was an underrun. Clear the buffer queue
837 * since this likely means we're late, and rewind the source to get
838 * it back into an AL_INITIAL state.
840 alSourceRewind(mSource);
841 alSourcei(mSource, AL_BUFFER, 0);
842 continue;
845 /* (re)start the source if needed, and wait for a buffer to finish */
846 if(state != AL_PLAYING && state != AL_PAUSED &&
847 mMovie.mPlaying.load(std::memory_order_relaxed))
848 startPlayback();
850 lock.unlock();
851 SDL_Delay((AudioBufferTime/3).count());
852 lock.lock();
855 alSourceRewind(mSource);
856 alSourcei(mSource, AL_BUFFER, 0);
858 finish:
859 av_freep(&samples);
861 return 0;
865 nanoseconds VideoState::getClock()
867 /* NOTE: This returns incorrect times while not playing. */
868 auto delta = get_avtime() - mCurrentPtsTime;
869 return mCurrentPts + delta;
872 bool VideoState::isBufferFilled()
874 std::unique_lock<std::mutex> lock(mPictQMutex);
875 return mPictQSize >= mPictQ.size();
878 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
880 SDL_Event evt{};
881 evt.user.type = FF_REFRESH_EVENT;
882 evt.user.data1 = opaque;
883 SDL_PushEvent(&evt);
884 return 0; /* 0 means stop timer */
887 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
888 void VideoState::schedRefresh(milliseconds delay)
890 SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
893 /* Called by VideoState::refreshTimer to display the next video frame. */
894 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
896 Picture *vp = &mPictQ[mPictQRead];
898 if(!vp->mImage)
899 return;
901 float aspect_ratio;
902 int win_w, win_h;
903 int w, h, x, y;
905 if(mCodecCtx->sample_aspect_ratio.num == 0)
906 aspect_ratio = 0.0f;
907 else
909 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
910 mCodecCtx->height;
912 if(aspect_ratio <= 0.0f)
913 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
915 SDL_GetWindowSize(screen, &win_w, &win_h);
916 h = win_h;
917 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
918 if(w > win_w)
920 w = win_w;
921 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
923 x = (win_w - w) / 2;
924 y = (win_h - h) / 2;
926 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
927 SDL_Rect dst_rect{ x, y, w, h };
928 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
929 SDL_RenderPresent(renderer);
932 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
933 * was created. It handles the display of the next decoded video frame (if not
934 * falling behind), and sets up the timer for the following video frame.
936 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
938 if(!mStream)
940 if(mEOS)
942 mFinalUpdate = true;
943 std::unique_lock<std::mutex>(mPictQMutex).unlock();
944 mPictQCond.notify_all();
945 return;
947 schedRefresh(milliseconds(100));
948 return;
950 if(!mMovie.mPlaying.load(std::memory_order_relaxed))
952 schedRefresh(milliseconds(1));
953 return;
956 std::unique_lock<std::mutex> lock(mPictQMutex);
957 retry:
958 if(mPictQSize == 0)
960 if(mEOS)
961 mFinalUpdate = true;
962 else
963 schedRefresh(milliseconds(1));
964 lock.unlock();
965 mPictQCond.notify_all();
966 return;
969 Picture *vp = &mPictQ[mPictQRead];
970 mCurrentPts = vp->mPts;
971 mCurrentPtsTime = get_avtime();
973 /* Get delay using the frame pts and the pts from last frame. */
974 auto delay = vp->mPts - mFrameLastPts;
975 if(delay <= seconds::zero() || delay >= seconds(1))
977 /* If incorrect delay, use previous one. */
978 delay = mFrameLastDelay;
980 /* Save for next frame. */
981 mFrameLastDelay = delay;
982 mFrameLastPts = vp->mPts;
984 /* Update delay to sync to clock if not master source. */
985 if(mMovie.mAVSyncType != SyncMaster::Video)
987 auto ref_clock = mMovie.getMasterClock();
988 auto diff = vp->mPts - ref_clock;
990 /* Skip or repeat the frame. Take delay into account. */
991 auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
992 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
994 if(diff <= -sync_threshold)
995 delay = nanoseconds::zero();
996 else if(diff >= sync_threshold)
997 delay *= 2;
1001 mFrameTimer += delay;
1002 /* Compute the REAL delay. */
1003 auto actual_delay = mFrameTimer - get_avtime();
1004 if(!(actual_delay >= VideoSyncThreshold))
1006 /* We don't have time to handle this picture, just skip to the next one. */
1007 mPictQRead = (mPictQRead+1)%mPictQ.size();
1008 mPictQSize--;
1009 goto retry;
1011 schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
1013 /* Show the picture! */
1014 display(screen, renderer);
1016 /* Update queue for next picture. */
1017 mPictQRead = (mPictQRead+1)%mPictQ.size();
1018 mPictQSize--;
1019 lock.unlock();
1020 mPictQCond.notify_all();
1023 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1024 * main thread where the renderer was created.
1026 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
1028 Picture *vp = &mPictQ[mPictQWrite];
1029 bool fmt_updated = false;
1031 /* allocate or resize the buffer! */
1032 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
1034 fmt_updated = true;
1035 if(vp->mImage)
1036 SDL_DestroyTexture(vp->mImage);
1037 vp->mImage = SDL_CreateTexture(
1038 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1039 mCodecCtx->coded_width, mCodecCtx->coded_height
1041 if(!vp->mImage)
1042 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1043 vp->mWidth = mCodecCtx->width;
1044 vp->mHeight = mCodecCtx->height;
1046 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
1048 /* For the first update, set the window size to the video size. */
1049 mFirstUpdate = false;
1051 int w = vp->mWidth;
1052 int h = vp->mHeight;
1053 if(mCodecCtx->sample_aspect_ratio.den != 0)
1055 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1056 if(aspect_ratio >= 1.0)
1057 w = (int)(w*aspect_ratio + 0.5);
1058 else if(aspect_ratio > 0.0)
1059 h = (int)(h/aspect_ratio + 0.5);
1061 SDL_SetWindowSize(screen, w, h);
1065 if(vp->mImage)
1067 AVFrame *frame = mDecodedFrame.get();
1068 void *pixels = nullptr;
1069 int pitch = 0;
1071 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1072 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1073 frame->data[0], frame->linesize[0],
1074 frame->data[1], frame->linesize[1],
1075 frame->data[2], frame->linesize[2]
1077 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1078 std::cerr<< "Failed to lock texture" <<std::endl;
1079 else
1081 // Convert the image into YUV format that SDL uses
1082 int coded_w = mCodecCtx->coded_width;
1083 int coded_h = mCodecCtx->coded_height;
1084 int w = mCodecCtx->width;
1085 int h = mCodecCtx->height;
1086 if(!mSwscaleCtx || fmt_updated)
1088 mSwscaleCtx.reset(sws_getContext(
1089 w, h, mCodecCtx->pix_fmt,
1090 w, h, AV_PIX_FMT_YUV420P, 0,
1091 nullptr, nullptr, nullptr
1095 /* point pict at the queue */
1096 uint8_t *pict_data[3];
1097 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1098 pict_data[1] = pict_data[0] + coded_w*coded_h;
1099 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1101 int pict_linesize[3];
1102 pict_linesize[0] = pitch;
1103 pict_linesize[1] = pitch / 2;
1104 pict_linesize[2] = pitch / 2;
1106 sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
1107 frame->linesize, 0, h, pict_data, pict_linesize);
1108 SDL_UnlockTexture(vp->mImage);
1112 vp->mUpdated.store(true, std::memory_order_release);
1113 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1114 mPictQCond.notify_one();
1117 int VideoState::queuePicture(nanoseconds pts)
1119 /* Wait until we have space for a new pic */
1120 std::unique_lock<std::mutex> lock(mPictQMutex);
1121 while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
1122 mPictQCond.wait(lock);
1123 lock.unlock();
1125 if(mMovie.mQuit.load(std::memory_order_relaxed))
1126 return -1;
1128 Picture *vp = &mPictQ[mPictQWrite];
1130 /* We have to create/update the picture in the main thread */
1131 vp->mUpdated.store(false, std::memory_order_relaxed);
1132 SDL_Event evt{};
1133 evt.user.type = FF_UPDATE_EVENT;
1134 evt.user.data1 = this;
1135 SDL_PushEvent(&evt);
1137 /* Wait until the picture is updated. */
1138 lock.lock();
1139 while(!vp->mUpdated.load(std::memory_order_relaxed))
1141 if(mMovie.mQuit.load(std::memory_order_relaxed))
1142 return -1;
1143 mPictQCond.wait(lock);
1145 if(mMovie.mQuit.load(std::memory_order_relaxed))
1146 return -1;
1147 vp->mPts = pts;
1149 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1150 mPictQSize++;
1151 lock.unlock();
1153 return 0;
1156 int VideoState::handler()
1158 mDecodedFrame.reset(av_frame_alloc());
1159 while(!mMovie.mQuit.load(std::memory_order_relaxed))
1161 std::unique_lock<std::mutex> lock(mQueueMtx);
1162 /* Decode video frame */
1163 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1164 if(ret == AVERROR(EAGAIN))
1166 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1167 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
1168 mMovie.mSendCond.notify_one();
1169 do {
1170 mQueueCond.wait(lock);
1171 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1172 } while(ret == AVERROR(EAGAIN));
1174 lock.unlock();
1175 if(ret == AVERROR_EOF) break;
1176 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1177 mMovie.mSendCond.notify_one();
1178 if(ret < 0)
1180 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1181 continue;
1184 /* Get the PTS for this frame. */
1185 nanoseconds pts;
1186 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
1187 mClock = std::chrono::duration_cast<nanoseconds>(
1188 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
1190 pts = mClock;
1192 /* Update the video clock to the next expected PTS. */
1193 auto frame_delay = av_q2d(mCodecCtx->time_base);
1194 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1195 mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
1197 if(queuePicture(pts) < 0)
1198 break;
1199 av_frame_unref(mDecodedFrame.get());
1201 mEOS = true;
1203 std::unique_lock<std::mutex> lock(mPictQMutex);
1204 if(mMovie.mQuit.load(std::memory_order_relaxed))
1206 mPictQRead = 0;
1207 mPictQWrite = 0;
1208 mPictQSize = 0;
1210 while(!mFinalUpdate)
1211 mPictQCond.wait(lock);
1213 return 0;
1217 int MovieState::decode_interrupt_cb(void *ctx)
1219 return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1222 bool MovieState::prepare()
1224 AVIOContext *avioctx = nullptr;
1225 AVIOInterruptCB intcb = { decode_interrupt_cb, this };
1226 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1228 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1229 return false;
1231 mIOContext.reset(avioctx);
1233 /* Open movie file. If avformat_open_input fails it will automatically free
1234 * this context, so don't set it onto a smart pointer yet.
1236 AVFormatContext *fmtctx = avformat_alloc_context();
1237 fmtctx->pb = mIOContext.get();
1238 fmtctx->interrupt_callback = intcb;
1239 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1241 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1242 return false;
1244 mFormatCtx.reset(fmtctx);
1246 /* Retrieve stream information */
1247 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1249 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1250 return false;
1253 mVideo.schedRefresh(milliseconds(40));
1255 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1256 return true;
1259 void MovieState::setTitle(SDL_Window *window)
1261 auto pos1 = mFilename.rfind('/');
1262 auto pos2 = mFilename.rfind('\\');
1263 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1264 (pos2 == std::string::npos) ? pos1 :
1265 std::max(pos1, pos2)) + 1;
1266 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1269 nanoseconds MovieState::getClock()
1271 if(!mPlaying.load(std::memory_order_relaxed))
1272 return nanoseconds::zero();
1273 return get_avtime() - mClockBase;
1276 nanoseconds MovieState::getMasterClock()
1278 if(mAVSyncType == SyncMaster::Video)
1279 return mVideo.getClock();
1280 if(mAVSyncType == SyncMaster::Audio)
1281 return mAudio.getClock();
1282 return getClock();
1285 nanoseconds MovieState::getDuration()
1286 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1288 int MovieState::streamComponentOpen(int stream_index)
1290 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1291 return -1;
1293 /* Get a pointer to the codec context for the stream, and open the
1294 * associated codec.
1296 AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
1297 if(!avctx) return -1;
1299 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1300 return -1;
1302 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1303 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1305 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1306 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1307 return -1;
1310 /* Initialize and start the media type handler */
1311 switch(avctx->codec_type)
1313 case AVMEDIA_TYPE_AUDIO:
1314 mAudio.mStream = mFormatCtx->streams[stream_index];
1315 mAudio.mCodecCtx = std::move(avctx);
1317 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1318 break;
1320 case AVMEDIA_TYPE_VIDEO:
1321 mVideo.mStream = mFormatCtx->streams[stream_index];
1322 mVideo.mCodecCtx = std::move(avctx);
1324 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1325 break;
1327 default:
1328 return -1;
1331 return stream_index;
1334 int MovieState::parse_handler()
1336 int video_index = -1;
1337 int audio_index = -1;
1339 /* Dump information about file onto standard error */
1340 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1342 /* Find the first video and audio streams */
1343 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1345 auto codecpar = mFormatCtx->streams[i]->codecpar;
1346 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1347 video_index = streamComponentOpen(i);
1348 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1349 audio_index = streamComponentOpen(i);
1352 if(video_index < 0 && audio_index < 0)
1354 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1355 mQuit = true;
1358 PacketQueue audio_queue, video_queue;
1359 bool input_finished = false;
1361 /* Main packet reading/dispatching loop */
1362 while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
1364 AVPacket packet;
1365 if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1366 input_finished = true;
1367 else
1369 /* Copy the packet into the queue it's meant for. */
1370 if(packet.stream_index == video_index)
1371 video_queue.put(&packet);
1372 else if(packet.stream_index == audio_index)
1373 audio_queue.put(&packet);
1374 av_packet_unref(&packet);
1377 do {
1378 /* Send whatever queued packets we have. */
1379 if(!audio_queue.empty())
1381 std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
1382 int ret;
1383 do {
1384 ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
1385 if(ret != AVERROR(EAGAIN)) audio_queue.pop();
1386 } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
1387 lock.unlock();
1388 mAudio.mQueueCond.notify_one();
1390 if(!video_queue.empty())
1392 std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
1393 int ret;
1394 do {
1395 ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
1396 if(ret != AVERROR(EAGAIN)) video_queue.pop();
1397 } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
1398 lock.unlock();
1399 mVideo.mQueueCond.notify_one();
1401 /* If the queues are completely empty, or it's not full and there's
1402 * more input to read, go get more.
1404 size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
1405 if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
1406 break;
1408 if(!mPlaying.load(std::memory_order_relaxed))
1410 if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
1411 (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
1413 /* Set the base time 50ms ahead of the current av time. */
1414 mClockBase = get_avtime() + milliseconds(50);
1415 mVideo.mCurrentPtsTime = mClockBase;
1416 mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
1417 mAudio.startPlayback();
1418 mPlaying.store(std::memory_order_release);
1421 /* Nothing to send or get for now, wait a bit and try again. */
1422 { std::unique_lock<std::mutex> lock(mSendMtx);
1423 if(mSendDataGood.test_and_set(std::memory_order_relaxed))
1424 mSendCond.wait_for(lock, milliseconds(10));
1426 } while(!mQuit.load(std::memory_order_relaxed));
1428 /* Pass a null packet to finish the send buffers (the receive functions
1429 * will get AVERROR_EOF when emptied).
1431 if(mVideo.mCodecCtx)
1433 { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
1434 avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
1436 mVideo.mQueueCond.notify_one();
1438 if(mAudio.mCodecCtx)
1440 { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
1441 avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
1443 mAudio.mQueueCond.notify_one();
1445 video_queue.clear();
1446 audio_queue.clear();
1448 /* all done - wait for it */
1449 if(mVideoThread.joinable())
1450 mVideoThread.join();
1451 if(mAudioThread.joinable())
1452 mAudioThread.join();
1454 mVideo.mEOS = true;
1455 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1456 while(!mVideo.mFinalUpdate)
1457 mVideo.mPictQCond.wait(lock);
1458 lock.unlock();
1460 SDL_Event evt{};
1461 evt.user.type = FF_MOVIE_DONE_EVENT;
1462 SDL_PushEvent(&evt);
1464 return 0;
1468 // Helper class+method to print the time with human-readable formatting.
1469 struct PrettyTime {
1470 seconds mTime;
1472 inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1474 using hours = std::chrono::hours;
1475 using minutes = std::chrono::minutes;
1476 using std::chrono::duration_cast;
1478 seconds t = rhs.mTime;
1479 if(t.count() < 0)
1481 os << '-';
1482 t *= -1;
1485 // Only handle up to hour formatting
1486 if(t >= hours(1))
1487 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1488 << (duration_cast<minutes>(t).count() % 60) << 'm';
1489 else
1490 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1491 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1492 << std::setfill(' ');
1493 return os;
1496 } // namespace
1499 int main(int argc, char *argv[])
1501 std::unique_ptr<MovieState> movState;
1503 if(argc < 2)
1505 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1506 return 1;
1508 /* Register all formats and codecs */
1509 av_register_all();
1510 /* Initialize networking protocols */
1511 avformat_network_init();
1513 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1515 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1516 return 1;
1519 /* Make a window to put our video */
1520 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1521 if(!screen)
1523 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1524 return 1;
1526 /* Make a renderer to handle the texture image surface and rendering. */
1527 Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
1528 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
1529 if(renderer)
1531 SDL_RendererInfo rinf{};
1532 bool ok = false;
1534 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1535 * software renderer. */
1536 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1538 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1539 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1541 if(!ok)
1543 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1544 SDL_DestroyRenderer(renderer);
1545 renderer = nullptr;
1548 if(!renderer)
1550 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1551 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1553 if(!renderer)
1555 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1556 return 1;
1558 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1559 SDL_RenderFillRect(renderer, nullptr);
1560 SDL_RenderPresent(renderer);
1562 /* Open an audio device */
1563 int fileidx = 1;
1564 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1566 ALCdevice *dev = NULL;
1567 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1569 fileidx = 3;
1570 dev = alcOpenDevice(argv[2]);
1571 if(dev) return dev;
1572 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1574 return alcOpenDevice(nullptr);
1575 }();
1576 ALCcontext *context = alcCreateContext(device, nullptr);
1577 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1579 std::cerr<< "Failed to set up audio device" <<std::endl;
1580 if(context)
1581 alcDestroyContext(context);
1582 return 1;
1585 const ALCchar *name = nullptr;
1586 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1587 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1588 if(!name || alcGetError(device) != AL_NO_ERROR)
1589 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1590 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1592 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1594 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1595 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1596 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1600 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1602 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1603 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1604 alGetProcAddress("alGetSourcei64vSOFT")
1608 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1610 ++fileidx;
1611 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1612 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1613 else
1615 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1616 EnableDirectOut = true;
1620 while(fileidx < argc && !movState)
1622 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1623 if(!movState->prepare()) movState = nullptr;
1625 if(!movState)
1627 std::cerr<< "Could not start a video" <<std::endl;
1628 return 1;
1630 movState->setTitle(screen);
1632 /* Default to going to the next movie at the end of one. */
1633 enum class EomAction {
1634 Next, Quit
1635 } eom_action = EomAction::Next;
1636 seconds last_time(-1);
1637 SDL_Event event;
1638 while(1)
1640 int have_evt = SDL_WaitEventTimeout(&event, 10);
1642 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
1643 if(cur_time != last_time)
1645 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
1646 std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
1647 last_time = cur_time;
1649 if(!have_evt) continue;
1651 switch(event.type)
1653 case SDL_KEYDOWN:
1654 switch(event.key.keysym.sym)
1656 case SDLK_ESCAPE:
1657 movState->mQuit = true;
1658 eom_action = EomAction::Quit;
1659 break;
1661 case SDLK_n:
1662 movState->mQuit = true;
1663 eom_action = EomAction::Next;
1664 break;
1666 default:
1667 break;
1669 break;
1671 case SDL_WINDOWEVENT:
1672 switch(event.window.event)
1674 case SDL_WINDOWEVENT_RESIZED:
1675 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1676 SDL_RenderFillRect(renderer, nullptr);
1677 break;
1679 default:
1680 break;
1682 break;
1684 case SDL_QUIT:
1685 movState->mQuit = true;
1686 eom_action = EomAction::Quit;
1687 break;
1689 case FF_UPDATE_EVENT:
1690 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1691 screen, renderer
1693 break;
1695 case FF_REFRESH_EVENT:
1696 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1697 screen, renderer
1699 break;
1701 case FF_MOVIE_DONE_EVENT:
1702 std::cout<<'\n';
1703 last_time = seconds(-1);
1704 if(eom_action != EomAction::Quit)
1706 movState = nullptr;
1707 while(fileidx < argc && !movState)
1709 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1710 if(!movState->prepare()) movState = nullptr;
1712 if(movState)
1714 movState->setTitle(screen);
1715 break;
1719 /* Nothing more to play. Shut everything down and quit. */
1720 movState = nullptr;
1722 alcMakeContextCurrent(nullptr);
1723 alcDestroyContext(context);
1724 alcCloseDevice(device);
1726 SDL_DestroyRenderer(renderer);
1727 renderer = nullptr;
1728 SDL_DestroyWindow(screen);
1729 screen = nullptr;
1731 SDL_Quit();
1732 exit(0);
1734 default:
1735 break;
1739 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1740 return 1;