Add a specific function for truncating float-to-int conversions
[openal-soft.git] / examples / alffplay.cpp
blob3bba154e1474af296aa56ae72810e68f158c951f
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <vector>
18 #include <mutex>
19 #include <deque>
20 #include <array>
21 #include <cmath>
22 #include <string>
24 extern "C" {
25 #include "libavcodec/avcodec.h"
26 #include "libavformat/avformat.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/time.h"
29 #include "libavutil/pixfmt.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/channel_layout.h"
32 #include "libswscale/swscale.h"
33 #include "libswresample/swresample.h"
36 #include "SDL.h"
38 #include "AL/alc.h"
39 #include "AL/al.h"
40 #include "AL/alext.h"
42 extern "C" {
43 #ifndef AL_SOFT_map_buffer
44 #define AL_SOFT_map_buffer 1
45 typedef unsigned int ALbitfieldSOFT;
46 #define AL_MAP_READ_BIT_SOFT 0x00000001
47 #define AL_MAP_WRITE_BIT_SOFT 0x00000002
48 #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
49 #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
50 typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
51 typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
52 typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
53 typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
54 #endif
56 #ifndef AL_SOFT_events
57 #define AL_SOFT_events 1
58 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
59 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
60 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
61 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
62 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
63 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
64 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
65 #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
66 typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
67 ALsizei length, const ALchar *message,
68 void *userParam);
69 typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
70 typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
71 typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
72 typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
73 #endif
76 namespace {
78 using nanoseconds = std::chrono::nanoseconds;
79 using microseconds = std::chrono::microseconds;
80 using milliseconds = std::chrono::milliseconds;
81 using seconds = std::chrono::seconds;
82 using seconds_d64 = std::chrono::duration<double>;
84 const std::string AppName("alffplay");
86 bool EnableDirectOut = false;
87 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
88 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
90 LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
91 LPALMAPBUFFERSOFT alMapBufferSOFT;
92 LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
94 LPALEVENTCONTROLSOFT alEventControlSOFT;
95 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
97 const seconds AVNoSyncThreshold(10);
99 const milliseconds VideoSyncThreshold(10);
100 #define VIDEO_PICTURE_QUEUE_SIZE 16
102 const seconds_d64 AudioSyncThreshold(0.03);
103 const milliseconds AudioSampleCorrectionMax(50);
104 /* Averaging filter coefficient for audio sync. */
105 #define AUDIO_DIFF_AVG_NB 20
106 const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
107 /* Per-buffer size, in time */
108 const milliseconds AudioBufferTime(20);
109 /* Buffer total size, in time (should be divisible by the buffer time) */
110 const milliseconds AudioBufferTotalTime(800);
112 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
114 enum {
115 FF_UPDATE_EVENT = SDL_USEREVENT,
116 FF_REFRESH_EVENT,
117 FF_MOVIE_DONE_EVENT
120 enum class SyncMaster {
121 Audio,
122 Video,
123 External,
125 Default = External
129 inline microseconds get_avtime()
130 { return microseconds(av_gettime()); }
132 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
133 struct AVIOContextDeleter {
134 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
136 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
138 struct AVFormatCtxDeleter {
139 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
141 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
143 struct AVCodecCtxDeleter {
144 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
146 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
148 struct AVFrameDeleter {
149 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
151 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
153 struct SwrContextDeleter {
154 void operator()(SwrContext *ptr) { swr_free(&ptr); }
156 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
158 struct SwsContextDeleter {
159 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
161 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
164 class PacketQueue {
165 std::deque<AVPacket> mPackets;
166 size_t mTotalSize{0};
168 public:
169 ~PacketQueue() { clear(); }
171 bool empty() const noexcept { return mPackets.empty(); }
172 size_t totalSize() const noexcept { return mTotalSize; }
174 void put(const AVPacket *pkt)
176 mPackets.push_back(AVPacket{});
177 if(av_packet_ref(&mPackets.back(), pkt) != 0)
178 mPackets.pop_back();
179 else
180 mTotalSize += mPackets.back().size;
183 AVPacket *front() noexcept
184 { return &mPackets.front(); }
186 void pop()
188 AVPacket *pkt = &mPackets.front();
189 mTotalSize -= pkt->size;
190 av_packet_unref(pkt);
191 mPackets.pop_front();
194 void clear()
196 for(AVPacket &pkt : mPackets)
197 av_packet_unref(&pkt);
198 mPackets.clear();
199 mTotalSize = 0;
204 struct MovieState;
206 struct AudioState {
207 MovieState &mMovie;
209 AVStream *mStream{nullptr};
210 AVCodecCtxPtr mCodecCtx;
212 std::mutex mQueueMtx;
213 std::condition_variable mQueueCond;
215 /* Used for clock difference average computation */
216 seconds_d64 mClockDiffAvg{0};
218 /* Time of the next sample to be buffered */
219 nanoseconds mCurrentPts{0};
221 /* Device clock time that the stream started at. */
222 nanoseconds mDeviceStartTime{nanoseconds::min()};
224 /* Decompressed sample frame, and swresample context for conversion */
225 AVFramePtr mDecodedFrame;
226 SwrContextPtr mSwresCtx;
228 /* Conversion format, for what gets fed to OpenAL */
229 int mDstChanLayout{0};
230 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
232 /* Storage of converted samples */
233 uint8_t *mSamples{nullptr};
234 int mSamplesLen{0}; /* In samples */
235 int mSamplesPos{0};
236 int mSamplesMax{0};
238 /* OpenAL format */
239 ALenum mFormat{AL_NONE};
240 ALsizei mFrameSize{0};
242 std::mutex mSrcMutex;
243 std::condition_variable mSrcCond;
244 std::atomic_flag mConnected;
245 ALuint mSource{0};
246 std::vector<ALuint> mBuffers;
247 ALsizei mBufferIdx{0};
249 AudioState(MovieState &movie) : mMovie(movie)
250 { mConnected.test_and_set(std::memory_order_relaxed); }
251 ~AudioState()
253 if(mSource)
254 alDeleteSources(1, &mSource);
255 if(!mBuffers.empty())
256 alDeleteBuffers(mBuffers.size(), mBuffers.data());
258 av_freep(&mSamples);
261 static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
262 ALsizei length, const ALchar *message,
263 void *userParam);
265 nanoseconds getClockNoLock();
266 nanoseconds getClock()
268 std::lock_guard<std::mutex> lock(mSrcMutex);
269 return getClockNoLock();
272 bool isBufferFilled();
273 void startPlayback();
275 int getSync();
276 int decodeFrame();
277 bool readAudio(uint8_t *samples, int length);
279 int handler();
282 struct VideoState {
283 MovieState &mMovie;
285 AVStream *mStream{nullptr};
286 AVCodecCtxPtr mCodecCtx;
288 std::mutex mQueueMtx;
289 std::condition_variable mQueueCond;
291 nanoseconds mClock{0};
292 nanoseconds mFrameTimer{0};
293 nanoseconds mFrameLastPts{0};
294 nanoseconds mFrameLastDelay{0};
295 nanoseconds mCurrentPts{0};
296 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
297 microseconds mCurrentPtsTime{0};
299 /* Decompressed video frame, and swscale context for conversion */
300 AVFramePtr mDecodedFrame;
301 SwsContextPtr mSwscaleCtx;
303 struct Picture {
304 SDL_Texture *mImage{nullptr};
305 int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
306 std::atomic<bool> mUpdated{false};
307 nanoseconds mPts{0};
309 ~Picture()
311 if(mImage)
312 SDL_DestroyTexture(mImage);
313 mImage = nullptr;
316 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
317 size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
318 std::mutex mPictQMutex;
319 std::condition_variable mPictQCond;
320 bool mFirstUpdate{true};
321 std::atomic<bool> mEOS{false};
322 std::atomic<bool> mFinalUpdate{false};
324 VideoState(MovieState &movie) : mMovie(movie) { }
326 nanoseconds getClock();
327 bool isBufferFilled();
329 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
330 void schedRefresh(milliseconds delay);
331 void display(SDL_Window *screen, SDL_Renderer *renderer);
332 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
333 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
334 int queuePicture(nanoseconds pts);
335 int handler();
338 struct MovieState {
339 AVIOContextPtr mIOContext;
340 AVFormatCtxPtr mFormatCtx;
342 SyncMaster mAVSyncType{SyncMaster::Default};
344 microseconds mClockBase{0};
345 std::atomic<bool> mPlaying{false};
347 std::mutex mSendMtx;
348 std::condition_variable mSendCond;
349 /* NOTE: false/clear = need data, true/set = no data needed */
350 std::atomic_flag mSendDataGood;
352 std::atomic<bool> mQuit{false};
354 AudioState mAudio;
355 VideoState mVideo;
357 std::thread mParseThread;
358 std::thread mAudioThread;
359 std::thread mVideoThread;
361 std::string mFilename;
363 MovieState(std::string fname)
364 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
366 ~MovieState()
368 mQuit = true;
369 if(mParseThread.joinable())
370 mParseThread.join();
373 static int decode_interrupt_cb(void *ctx);
374 bool prepare();
375 void setTitle(SDL_Window *window);
377 nanoseconds getClock();
379 nanoseconds getMasterClock();
381 nanoseconds getDuration();
383 int streamComponentOpen(int stream_index);
384 int parse_handler();
388 nanoseconds AudioState::getClockNoLock()
390 // The audio clock is the timestamp of the sample currently being heard.
391 if(alcGetInteger64vSOFT)
393 // If device start time = min, we aren't playing yet.
394 if(mDeviceStartTime == nanoseconds::min())
395 return nanoseconds::zero();
397 // Get the current device clock time and latency.
398 auto device = alcGetContextsDevice(alcGetCurrentContext());
399 ALCint64SOFT devtimes[2] = {0,0};
400 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
401 auto latency = nanoseconds(devtimes[1]);
402 auto device_time = nanoseconds(devtimes[0]);
404 // The clock is simply the current device time relative to the recorded
405 // start time. We can also subtract the latency to get more a accurate
406 // position of where the audio device actually is in the output stream.
407 return device_time - mDeviceStartTime - latency;
410 /* The source-based clock is based on 4 components:
411 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
412 * 2 - The length of the source's buffer queue
413 * (AudioBufferTime*AL_BUFFERS_QUEUED)
414 * 3 - The offset OpenAL is currently at in the source (the first value
415 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
416 * 4 - The latency between OpenAL and the DAC (the second value from
417 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
419 * Subtracting the length of the source queue from the next sample's
420 * timestamp gives the timestamp of the sample at the start of the source
421 * queue. Adding the source offset to that results in the timestamp for the
422 * sample at OpenAL's current position, and subtracting the source latency
423 * from that gives the timestamp of the sample currently at the DAC.
425 nanoseconds pts = mCurrentPts;
426 if(mSource)
428 ALint64SOFT offset[2];
429 ALint queued;
430 ALint status;
432 /* NOTE: The source state must be checked last, in case an underrun
433 * occurs and the source stops between retrieving the offset+latency
434 * and getting the state. */
435 if(alGetSourcei64vSOFT)
436 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
437 else
439 ALint ioffset;
440 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
441 offset[0] = (ALint64SOFT)ioffset << 32;
442 offset[1] = 0;
444 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
445 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
447 /* If the source is AL_STOPPED, then there was an underrun and all
448 * buffers are processed, so ignore the source queue. The audio thread
449 * will put the source into an AL_INITIAL state and clear the queue
450 * when it starts recovery. */
451 if(status != AL_STOPPED)
453 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
455 pts -= AudioBufferTime*queued;
456 pts += std::chrono::duration_cast<nanoseconds>(
457 fixed32(offset[0] / mCodecCtx->sample_rate)
460 /* Don't offset by the latency if the source isn't playing. */
461 if(status == AL_PLAYING)
462 pts -= nanoseconds(offset[1]);
465 return std::max(pts, nanoseconds::zero());
468 bool AudioState::isBufferFilled()
470 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
471 * does the source gen. So when we're able to grab the lock and the source
472 * is valid, the queue must be full.
474 std::lock_guard<std::mutex> lock(mSrcMutex);
475 return mSource != 0;
478 void AudioState::startPlayback()
480 alSourcePlay(mSource);
481 if(alcGetInteger64vSOFT)
483 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
485 // Subtract the total buffer queue time from the current pts to get the
486 // pts of the start of the queue.
487 nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
488 int64_t srctimes[2]={0,0};
489 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
490 auto device_time = nanoseconds(srctimes[1]);
491 auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
492 mCodecCtx->sample_rate;
494 // The mixer may have ticked and incremented the device time and sample
495 // offset, so subtract the source offset from the device time to get
496 // the device time the source started at. Also subtract startpts to get
497 // the device time the stream would have started at to reach where it
498 // is now.
499 mDeviceStartTime = device_time - src_offset - startpts;
503 int AudioState::getSync()
505 if(mMovie.mAVSyncType == SyncMaster::Audio)
506 return 0;
508 auto ref_clock = mMovie.getMasterClock();
509 auto diff = ref_clock - getClockNoLock();
511 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
513 /* Difference is TOO big; reset accumulated average */
514 mClockDiffAvg = seconds_d64::zero();
515 return 0;
518 /* Accumulate the diffs */
519 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
520 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
521 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
522 return 0;
524 /* Constrain the per-update difference to avoid exceedingly large skips */
525 diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
526 AudioSampleCorrectionMax);
527 return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
530 int AudioState::decodeFrame()
532 while(!mMovie.mQuit.load(std::memory_order_relaxed))
534 std::unique_lock<std::mutex> lock(mQueueMtx);
535 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
536 if(ret == AVERROR(EAGAIN))
538 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
539 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
540 mMovie.mSendCond.notify_one();
541 do {
542 mQueueCond.wait(lock);
543 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
544 } while(ret == AVERROR(EAGAIN));
546 lock.unlock();
547 if(ret == AVERROR_EOF) break;
548 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
549 mMovie.mSendCond.notify_one();
550 if(ret < 0)
552 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
553 return 0;
556 if(mDecodedFrame->nb_samples <= 0)
558 av_frame_unref(mDecodedFrame.get());
559 continue;
562 /* If provided, update w/ pts */
563 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
564 mCurrentPts = std::chrono::duration_cast<nanoseconds>(
565 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
568 if(mDecodedFrame->nb_samples > mSamplesMax)
570 av_freep(&mSamples);
571 av_samples_alloc(
572 &mSamples, nullptr, mCodecCtx->channels,
573 mDecodedFrame->nb_samples, mDstSampleFmt, 0
575 mSamplesMax = mDecodedFrame->nb_samples;
577 /* Return the amount of sample frames converted */
578 int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
579 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
582 av_frame_unref(mDecodedFrame.get());
583 return data_size;
586 return 0;
589 /* Duplicates the sample at in to out, count times. The frame size is a
590 * multiple of the template type size.
592 template<typename T>
593 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
595 const T *sample = reinterpret_cast<const T*>(in);
596 T *dst = reinterpret_cast<T*>(out);
597 if(frame_size == sizeof(T))
598 std::fill_n(dst, count, *sample);
599 else
601 /* NOTE: frame_size is a multiple of sizeof(T). */
602 int type_mult = frame_size / sizeof(T);
603 int i = 0;
604 std::generate_n(dst, count*type_mult,
605 [sample,type_mult,&i]() -> T
607 T ret = sample[i];
608 i = (i+1)%type_mult;
609 return ret;
616 bool AudioState::readAudio(uint8_t *samples, int length)
618 int sample_skip = getSync();
619 int audio_size = 0;
621 /* Read the next chunk of data, refill the buffer, and queue it
622 * on the source */
623 length /= mFrameSize;
624 while(audio_size < length)
626 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
628 int frame_len = decodeFrame();
629 if(frame_len <= 0) break;
631 mSamplesLen = frame_len;
632 mSamplesPos = std::min(mSamplesLen, sample_skip);
633 sample_skip -= mSamplesPos;
635 // Adjust the device start time and current pts by the amount we're
636 // skipping/duplicating, so that the clock remains correct for the
637 // current stream position.
638 auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
639 mDeviceStartTime -= skip;
640 mCurrentPts += skip;
641 continue;
644 int rem = length - audio_size;
645 if(mSamplesPos >= 0)
647 int len = mSamplesLen - mSamplesPos;
648 if(rem > len) rem = len;
649 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
651 else
653 rem = std::min(rem, -mSamplesPos);
655 /* Add samples by copying the first sample */
656 if((mFrameSize&7) == 0)
657 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
658 else if((mFrameSize&3) == 0)
659 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
660 else if((mFrameSize&1) == 0)
661 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
662 else
663 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
666 mSamplesPos += rem;
667 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
668 samples += rem*mFrameSize;
669 audio_size += rem;
671 if(audio_size <= 0)
672 return false;
674 if(audio_size < length)
676 int rem = length - audio_size;
677 std::fill_n(samples, rem*mFrameSize,
678 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
679 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
680 audio_size += rem;
682 return true;
686 void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
687 ALsizei length, const ALchar *message,
688 void *userParam)
690 AudioState *self = reinterpret_cast<AudioState*>(userParam);
692 if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
694 /* Temporarily lock the source mutex to ensure it's not between
695 * checking the processed count and going to sleep.
697 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
698 self->mSrcCond.notify_one();
699 return;
702 std::cout<< "---- AL Event on AudioState "<<self<<" ----\nEvent: ";
703 switch(eventType)
705 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
706 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
707 case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
708 case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
709 case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
710 case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
711 default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
712 std::dec<<std::setw(0)<<std::setfill(' '); break;
714 std::cout<< "\n"
715 "Object ID: "<<object<<'\n'<<
716 "Parameter: "<<param<<'\n'<<
717 "Message: "<<std::string(message, length)<<"\n----"<<
718 std::endl;
720 if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
722 { std::lock_guard<std::mutex> lock(self->mSrcMutex);
723 self->mConnected.clear(std::memory_order_release);
725 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
726 self->mSrcCond.notify_one();
730 int AudioState::handler()
732 const std::array<ALenum,6> types{{
733 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
734 AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
735 AL_EVENT_TYPE_DISCONNECTED_SOFT
737 std::unique_lock<std::mutex> lock(mSrcMutex);
738 milliseconds sleep_time = AudioBufferTime / 3;
739 ALenum fmt;
741 if(alEventControlSOFT)
743 alEventControlSOFT(types.size(), types.data(), AL_TRUE);
744 alEventCallbackSOFT(EventCallback, this);
745 sleep_time = AudioBufferTotalTime;
748 /* Find a suitable format for OpenAL. */
749 mDstChanLayout = 0;
750 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
752 mDstSampleFmt = AV_SAMPLE_FMT_U8;
753 mFrameSize = 1;
754 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
755 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
756 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
758 mDstChanLayout = mCodecCtx->channel_layout;
759 mFrameSize *= 8;
760 mFormat = fmt;
762 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
763 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
764 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
765 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
767 mDstChanLayout = mCodecCtx->channel_layout;
768 mFrameSize *= 6;
769 mFormat = fmt;
771 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
773 mDstChanLayout = mCodecCtx->channel_layout;
774 mFrameSize *= 1;
775 mFormat = AL_FORMAT_MONO8;
777 if(!mDstChanLayout)
779 mDstChanLayout = AV_CH_LAYOUT_STEREO;
780 mFrameSize *= 2;
781 mFormat = AL_FORMAT_STEREO8;
784 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
785 alIsExtensionPresent("AL_EXT_FLOAT32"))
787 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
788 mFrameSize = 4;
789 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
790 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
791 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
793 mDstChanLayout = mCodecCtx->channel_layout;
794 mFrameSize *= 8;
795 mFormat = fmt;
797 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
798 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
799 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
800 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
802 mDstChanLayout = mCodecCtx->channel_layout;
803 mFrameSize *= 6;
804 mFormat = fmt;
806 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
808 mDstChanLayout = mCodecCtx->channel_layout;
809 mFrameSize *= 1;
810 mFormat = AL_FORMAT_MONO_FLOAT32;
812 if(!mDstChanLayout)
814 mDstChanLayout = AV_CH_LAYOUT_STEREO;
815 mFrameSize *= 2;
816 mFormat = AL_FORMAT_STEREO_FLOAT32;
819 if(!mDstChanLayout)
821 mDstSampleFmt = AV_SAMPLE_FMT_S16;
822 mFrameSize = 2;
823 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
824 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
825 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
827 mDstChanLayout = mCodecCtx->channel_layout;
828 mFrameSize *= 8;
829 mFormat = fmt;
831 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
832 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
833 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
834 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
836 mDstChanLayout = mCodecCtx->channel_layout;
837 mFrameSize *= 6;
838 mFormat = fmt;
840 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
842 mDstChanLayout = mCodecCtx->channel_layout;
843 mFrameSize *= 1;
844 mFormat = AL_FORMAT_MONO16;
846 if(!mDstChanLayout)
848 mDstChanLayout = AV_CH_LAYOUT_STEREO;
849 mFrameSize *= 2;
850 mFormat = AL_FORMAT_STEREO16;
853 void *samples = nullptr;
854 ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
855 mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
857 mSamples = NULL;
858 mSamplesMax = 0;
859 mSamplesPos = 0;
860 mSamplesLen = 0;
862 mDecodedFrame.reset(av_frame_alloc());
863 if(!mDecodedFrame)
865 std::cerr<< "Failed to allocate audio frame" <<std::endl;
866 goto finish;
869 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
870 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
871 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
872 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
873 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
874 0, nullptr
876 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
878 std::cerr<< "Failed to initialize audio converter" <<std::endl;
879 goto finish;
882 mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
883 alGenBuffers(mBuffers.size(), mBuffers.data());
884 alGenSources(1, &mSource);
886 if(EnableDirectOut)
887 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
889 if(alGetError() != AL_NO_ERROR)
890 goto finish;
892 if(!alBufferStorageSOFT)
893 samples = av_malloc(buffer_len);
894 else
896 for(ALuint bufid : mBuffers)
897 alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
898 AL_MAP_WRITE_BIT_SOFT);
899 if(alGetError() != AL_NO_ERROR)
901 fprintf(stderr, "Failed to use mapped buffers\n");
902 samples = av_malloc(buffer_len);
906 while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
907 mConnected.test_and_set(std::memory_order_relaxed))
909 /* First remove any processed buffers. */
910 ALint processed;
911 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
912 while(processed > 0)
914 std::array<ALuint,4> bids;
915 alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
916 bids.data());
917 processed -= std::min<ALsizei>(bids.size(), processed);
920 /* Refill the buffer queue. */
921 ALint queued;
922 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
923 while((ALuint)queued < mBuffers.size())
925 ALuint bufid = mBuffers[mBufferIdx];
927 uint8_t *ptr = reinterpret_cast<uint8_t*>(
928 samples ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
930 if(!ptr) break;
932 /* Read the next chunk of data, filling the buffer, and queue it on
933 * the source */
934 bool got_audio = readAudio(ptr, buffer_len);
935 if(!samples) alUnmapBufferSOFT(bufid);
936 if(!got_audio) break;
938 if(samples)
939 alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
941 alSourceQueueBuffers(mSource, 1, &bufid);
942 mBufferIdx = (mBufferIdx+1) % mBuffers.size();
943 ++queued;
945 if(queued == 0)
946 break;
948 /* Check that the source is playing. */
949 ALint state;
950 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
951 if(state == AL_STOPPED)
953 /* AL_STOPPED means there was an underrun. Clear the buffer queue
954 * since this likely means we're late, and rewind the source to get
955 * it back into an AL_INITIAL state.
957 alSourceRewind(mSource);
958 alSourcei(mSource, AL_BUFFER, 0);
959 continue;
962 /* (re)start the source if needed, and wait for a buffer to finish */
963 if(state != AL_PLAYING && state != AL_PAUSED &&
964 mMovie.mPlaying.load(std::memory_order_relaxed))
965 startPlayback();
967 mSrcCond.wait_for(lock, sleep_time);
970 alSourceRewind(mSource);
971 alSourcei(mSource, AL_BUFFER, 0);
973 finish:
974 av_freep(&samples);
976 if(alEventControlSOFT)
978 alEventControlSOFT(types.size(), types.data(), AL_FALSE);
979 alEventCallbackSOFT(nullptr, nullptr);
982 return 0;
986 nanoseconds VideoState::getClock()
988 /* NOTE: This returns incorrect times while not playing. */
989 auto delta = get_avtime() - mCurrentPtsTime;
990 return mCurrentPts + delta;
993 bool VideoState::isBufferFilled()
995 std::unique_lock<std::mutex> lock(mPictQMutex);
996 return mPictQSize >= mPictQ.size();
999 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
1001 SDL_Event evt{};
1002 evt.user.type = FF_REFRESH_EVENT;
1003 evt.user.data1 = opaque;
1004 SDL_PushEvent(&evt);
1005 return 0; /* 0 means stop timer */
1008 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
1009 void VideoState::schedRefresh(milliseconds delay)
1011 SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
1014 /* Called by VideoState::refreshTimer to display the next video frame. */
1015 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
1017 Picture *vp = &mPictQ[mPictQRead];
1019 if(!vp->mImage)
1020 return;
1022 float aspect_ratio;
1023 int win_w, win_h;
1024 int w, h, x, y;
1026 if(mCodecCtx->sample_aspect_ratio.num == 0)
1027 aspect_ratio = 0.0f;
1028 else
1030 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
1031 mCodecCtx->height;
1033 if(aspect_ratio <= 0.0f)
1034 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
1036 SDL_GetWindowSize(screen, &win_w, &win_h);
1037 h = win_h;
1038 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
1039 if(w > win_w)
1041 w = win_w;
1042 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
1044 x = (win_w - w) / 2;
1045 y = (win_h - h) / 2;
1047 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
1048 SDL_Rect dst_rect{ x, y, w, h };
1049 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
1050 SDL_RenderPresent(renderer);
1053 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
1054 * was created. It handles the display of the next decoded video frame (if not
1055 * falling behind), and sets up the timer for the following video frame.
1057 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
1059 if(!mStream)
1061 if(mEOS)
1063 mFinalUpdate = true;
1064 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1065 mPictQCond.notify_all();
1066 return;
1068 schedRefresh(milliseconds(100));
1069 return;
1071 if(!mMovie.mPlaying.load(std::memory_order_relaxed))
1073 schedRefresh(milliseconds(1));
1074 return;
1077 std::unique_lock<std::mutex> lock(mPictQMutex);
1078 retry:
1079 if(mPictQSize == 0)
1081 if(mEOS)
1082 mFinalUpdate = true;
1083 else
1084 schedRefresh(milliseconds(1));
1085 lock.unlock();
1086 mPictQCond.notify_all();
1087 return;
1090 Picture *vp = &mPictQ[mPictQRead];
1091 mCurrentPts = vp->mPts;
1092 mCurrentPtsTime = get_avtime();
1094 /* Get delay using the frame pts and the pts from last frame. */
1095 auto delay = vp->mPts - mFrameLastPts;
1096 if(delay <= seconds::zero() || delay >= seconds(1))
1098 /* If incorrect delay, use previous one. */
1099 delay = mFrameLastDelay;
1101 /* Save for next frame. */
1102 mFrameLastDelay = delay;
1103 mFrameLastPts = vp->mPts;
1105 /* Update delay to sync to clock if not master source. */
1106 if(mMovie.mAVSyncType != SyncMaster::Video)
1108 auto ref_clock = mMovie.getMasterClock();
1109 auto diff = vp->mPts - ref_clock;
1111 /* Skip or repeat the frame. Take delay into account. */
1112 auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
1113 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
1115 if(diff <= -sync_threshold)
1116 delay = nanoseconds::zero();
1117 else if(diff >= sync_threshold)
1118 delay *= 2;
1122 mFrameTimer += delay;
1123 /* Compute the REAL delay. */
1124 auto actual_delay = mFrameTimer - get_avtime();
1125 if(!(actual_delay >= VideoSyncThreshold))
1127 /* We don't have time to handle this picture, just skip to the next one. */
1128 mPictQRead = (mPictQRead+1)%mPictQ.size();
1129 mPictQSize--;
1130 goto retry;
1132 schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
1134 /* Show the picture! */
1135 display(screen, renderer);
1137 /* Update queue for next picture. */
1138 mPictQRead = (mPictQRead+1)%mPictQ.size();
1139 mPictQSize--;
1140 lock.unlock();
1141 mPictQCond.notify_all();
1144 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1145 * main thread where the renderer was created.
1147 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
1149 Picture *vp = &mPictQ[mPictQWrite];
1150 bool fmt_updated = false;
1152 /* allocate or resize the buffer! */
1153 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
1155 fmt_updated = true;
1156 if(vp->mImage)
1157 SDL_DestroyTexture(vp->mImage);
1158 vp->mImage = SDL_CreateTexture(
1159 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1160 mCodecCtx->coded_width, mCodecCtx->coded_height
1162 if(!vp->mImage)
1163 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1164 vp->mWidth = mCodecCtx->width;
1165 vp->mHeight = mCodecCtx->height;
1167 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
1169 /* For the first update, set the window size to the video size. */
1170 mFirstUpdate = false;
1172 int w = vp->mWidth;
1173 int h = vp->mHeight;
1174 if(mCodecCtx->sample_aspect_ratio.den != 0)
1176 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1177 if(aspect_ratio >= 1.0)
1178 w = (int)(w*aspect_ratio + 0.5);
1179 else if(aspect_ratio > 0.0)
1180 h = (int)(h/aspect_ratio + 0.5);
1182 SDL_SetWindowSize(screen, w, h);
1186 if(vp->mImage)
1188 AVFrame *frame = mDecodedFrame.get();
1189 void *pixels = nullptr;
1190 int pitch = 0;
1192 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1193 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1194 frame->data[0], frame->linesize[0],
1195 frame->data[1], frame->linesize[1],
1196 frame->data[2], frame->linesize[2]
1198 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1199 std::cerr<< "Failed to lock texture" <<std::endl;
1200 else
1202 // Convert the image into YUV format that SDL uses
1203 int coded_w = mCodecCtx->coded_width;
1204 int coded_h = mCodecCtx->coded_height;
1205 int w = mCodecCtx->width;
1206 int h = mCodecCtx->height;
1207 if(!mSwscaleCtx || fmt_updated)
1209 mSwscaleCtx.reset(sws_getContext(
1210 w, h, mCodecCtx->pix_fmt,
1211 w, h, AV_PIX_FMT_YUV420P, 0,
1212 nullptr, nullptr, nullptr
1216 /* point pict at the queue */
1217 uint8_t *pict_data[3];
1218 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1219 pict_data[1] = pict_data[0] + coded_w*coded_h;
1220 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1222 int pict_linesize[3];
1223 pict_linesize[0] = pitch;
1224 pict_linesize[1] = pitch / 2;
1225 pict_linesize[2] = pitch / 2;
1227 sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
1228 frame->linesize, 0, h, pict_data, pict_linesize);
1229 SDL_UnlockTexture(vp->mImage);
1233 vp->mUpdated.store(true, std::memory_order_release);
1234 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1235 mPictQCond.notify_one();
1238 int VideoState::queuePicture(nanoseconds pts)
1240 /* Wait until we have space for a new pic */
1241 std::unique_lock<std::mutex> lock(mPictQMutex);
1242 while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
1243 mPictQCond.wait(lock);
1244 lock.unlock();
1246 if(mMovie.mQuit.load(std::memory_order_relaxed))
1247 return -1;
1249 Picture *vp = &mPictQ[mPictQWrite];
1251 /* We have to create/update the picture in the main thread */
1252 vp->mUpdated.store(false, std::memory_order_relaxed);
1253 SDL_Event evt{};
1254 evt.user.type = FF_UPDATE_EVENT;
1255 evt.user.data1 = this;
1256 SDL_PushEvent(&evt);
1258 /* Wait until the picture is updated. */
1259 lock.lock();
1260 while(!vp->mUpdated.load(std::memory_order_relaxed))
1262 if(mMovie.mQuit.load(std::memory_order_relaxed))
1263 return -1;
1264 mPictQCond.wait(lock);
1266 if(mMovie.mQuit.load(std::memory_order_relaxed))
1267 return -1;
1268 vp->mPts = pts;
1270 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1271 mPictQSize++;
1272 lock.unlock();
1274 return 0;
1277 int VideoState::handler()
1279 mDecodedFrame.reset(av_frame_alloc());
1280 while(!mMovie.mQuit.load(std::memory_order_relaxed))
1282 std::unique_lock<std::mutex> lock(mQueueMtx);
1283 /* Decode video frame */
1284 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1285 if(ret == AVERROR(EAGAIN))
1287 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1288 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
1289 mMovie.mSendCond.notify_one();
1290 do {
1291 mQueueCond.wait(lock);
1292 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1293 } while(ret == AVERROR(EAGAIN));
1295 lock.unlock();
1296 if(ret == AVERROR_EOF) break;
1297 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1298 mMovie.mSendCond.notify_one();
1299 if(ret < 0)
1301 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1302 continue;
1305 /* Get the PTS for this frame. */
1306 nanoseconds pts;
1307 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
1308 mClock = std::chrono::duration_cast<nanoseconds>(
1309 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
1311 pts = mClock;
1313 /* Update the video clock to the next expected PTS. */
1314 auto frame_delay = av_q2d(mCodecCtx->time_base);
1315 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1316 mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
1318 if(queuePicture(pts) < 0)
1319 break;
1320 av_frame_unref(mDecodedFrame.get());
1322 mEOS = true;
1324 std::unique_lock<std::mutex> lock(mPictQMutex);
1325 if(mMovie.mQuit.load(std::memory_order_relaxed))
1327 mPictQRead = 0;
1328 mPictQWrite = 0;
1329 mPictQSize = 0;
1331 while(!mFinalUpdate)
1332 mPictQCond.wait(lock);
1334 return 0;
1338 int MovieState::decode_interrupt_cb(void *ctx)
1340 return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1343 bool MovieState::prepare()
1345 AVIOContext *avioctx = nullptr;
1346 AVIOInterruptCB intcb = { decode_interrupt_cb, this };
1347 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1349 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1350 return false;
1352 mIOContext.reset(avioctx);
1354 /* Open movie file. If avformat_open_input fails it will automatically free
1355 * this context, so don't set it onto a smart pointer yet.
1357 AVFormatContext *fmtctx = avformat_alloc_context();
1358 fmtctx->pb = mIOContext.get();
1359 fmtctx->interrupt_callback = intcb;
1360 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1362 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1363 return false;
1365 mFormatCtx.reset(fmtctx);
1367 /* Retrieve stream information */
1368 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1370 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1371 return false;
1374 mVideo.schedRefresh(milliseconds(40));
1376 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1377 return true;
1380 void MovieState::setTitle(SDL_Window *window)
1382 auto pos1 = mFilename.rfind('/');
1383 auto pos2 = mFilename.rfind('\\');
1384 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1385 (pos2 == std::string::npos) ? pos1 :
1386 std::max(pos1, pos2)) + 1;
1387 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1390 nanoseconds MovieState::getClock()
1392 if(!mPlaying.load(std::memory_order_relaxed))
1393 return nanoseconds::zero();
1394 return get_avtime() - mClockBase;
1397 nanoseconds MovieState::getMasterClock()
1399 if(mAVSyncType == SyncMaster::Video)
1400 return mVideo.getClock();
1401 if(mAVSyncType == SyncMaster::Audio)
1402 return mAudio.getClock();
1403 return getClock();
1406 nanoseconds MovieState::getDuration()
1407 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1409 int MovieState::streamComponentOpen(int stream_index)
1411 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1412 return -1;
1414 /* Get a pointer to the codec context for the stream, and open the
1415 * associated codec.
1417 AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
1418 if(!avctx) return -1;
1420 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1421 return -1;
1423 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1424 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1426 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1427 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1428 return -1;
1431 /* Initialize and start the media type handler */
1432 switch(avctx->codec_type)
1434 case AVMEDIA_TYPE_AUDIO:
1435 mAudio.mStream = mFormatCtx->streams[stream_index];
1436 mAudio.mCodecCtx = std::move(avctx);
1438 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1439 break;
1441 case AVMEDIA_TYPE_VIDEO:
1442 mVideo.mStream = mFormatCtx->streams[stream_index];
1443 mVideo.mCodecCtx = std::move(avctx);
1445 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1446 break;
1448 default:
1449 return -1;
1452 return stream_index;
1455 int MovieState::parse_handler()
1457 int video_index = -1;
1458 int audio_index = -1;
1460 /* Dump information about file onto standard error */
1461 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1463 /* Find the first video and audio streams */
1464 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1466 auto codecpar = mFormatCtx->streams[i]->codecpar;
1467 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1468 video_index = streamComponentOpen(i);
1469 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1470 audio_index = streamComponentOpen(i);
1473 if(video_index < 0 && audio_index < 0)
1475 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1476 mQuit = true;
1479 PacketQueue audio_queue, video_queue;
1480 bool input_finished = false;
1482 /* Main packet reading/dispatching loop */
1483 while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
1485 AVPacket packet;
1486 if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1487 input_finished = true;
1488 else
1490 /* Copy the packet into the queue it's meant for. */
1491 if(packet.stream_index == video_index)
1492 video_queue.put(&packet);
1493 else if(packet.stream_index == audio_index)
1494 audio_queue.put(&packet);
1495 av_packet_unref(&packet);
1498 do {
1499 /* Send whatever queued packets we have. */
1500 if(!audio_queue.empty())
1502 std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
1503 int ret;
1504 do {
1505 ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
1506 if(ret != AVERROR(EAGAIN)) audio_queue.pop();
1507 } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
1508 lock.unlock();
1509 mAudio.mQueueCond.notify_one();
1511 if(!video_queue.empty())
1513 std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
1514 int ret;
1515 do {
1516 ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
1517 if(ret != AVERROR(EAGAIN)) video_queue.pop();
1518 } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
1519 lock.unlock();
1520 mVideo.mQueueCond.notify_one();
1522 /* If the queues are completely empty, or it's not full and there's
1523 * more input to read, go get more.
1525 size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
1526 if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
1527 break;
1529 if(!mPlaying.load(std::memory_order_relaxed))
1531 if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
1532 (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
1534 /* Set the base time 50ms ahead of the current av time. */
1535 mClockBase = get_avtime() + milliseconds(50);
1536 mVideo.mCurrentPtsTime = mClockBase;
1537 mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
1538 mAudio.startPlayback();
1539 mPlaying.store(std::memory_order_release);
1542 /* Nothing to send or get for now, wait a bit and try again. */
1543 { std::unique_lock<std::mutex> lock(mSendMtx);
1544 if(mSendDataGood.test_and_set(std::memory_order_relaxed))
1545 mSendCond.wait_for(lock, milliseconds(10));
1547 } while(!mQuit.load(std::memory_order_relaxed));
1549 /* Pass a null packet to finish the send buffers (the receive functions
1550 * will get AVERROR_EOF when emptied).
1552 if(mVideo.mCodecCtx)
1554 { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
1555 avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
1557 mVideo.mQueueCond.notify_one();
1559 if(mAudio.mCodecCtx)
1561 { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
1562 avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
1564 mAudio.mQueueCond.notify_one();
1566 video_queue.clear();
1567 audio_queue.clear();
1569 /* all done - wait for it */
1570 if(mVideoThread.joinable())
1571 mVideoThread.join();
1572 if(mAudioThread.joinable())
1573 mAudioThread.join();
1575 mVideo.mEOS = true;
1576 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1577 while(!mVideo.mFinalUpdate)
1578 mVideo.mPictQCond.wait(lock);
1579 lock.unlock();
1581 SDL_Event evt{};
1582 evt.user.type = FF_MOVIE_DONE_EVENT;
1583 SDL_PushEvent(&evt);
1585 return 0;
1589 // Helper class+method to print the time with human-readable formatting.
1590 struct PrettyTime {
1591 seconds mTime;
1593 inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1595 using hours = std::chrono::hours;
1596 using minutes = std::chrono::minutes;
1597 using std::chrono::duration_cast;
1599 seconds t = rhs.mTime;
1600 if(t.count() < 0)
1602 os << '-';
1603 t *= -1;
1606 // Only handle up to hour formatting
1607 if(t >= hours(1))
1608 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1609 << (duration_cast<minutes>(t).count() % 60) << 'm';
1610 else
1611 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1612 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1613 << std::setfill(' ');
1614 return os;
1617 } // namespace
1620 int main(int argc, char *argv[])
1622 std::unique_ptr<MovieState> movState;
1624 if(argc < 2)
1626 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1627 return 1;
1629 /* Register all formats and codecs */
1630 av_register_all();
1631 /* Initialize networking protocols */
1632 avformat_network_init();
1634 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1636 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1637 return 1;
1640 /* Make a window to put our video */
1641 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1642 if(!screen)
1644 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1645 return 1;
1647 /* Make a renderer to handle the texture image surface and rendering. */
1648 Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
1649 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
1650 if(renderer)
1652 SDL_RendererInfo rinf{};
1653 bool ok = false;
1655 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1656 * software renderer. */
1657 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1659 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1660 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1662 if(!ok)
1664 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1665 SDL_DestroyRenderer(renderer);
1666 renderer = nullptr;
1669 if(!renderer)
1671 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1672 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1674 if(!renderer)
1676 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1677 return 1;
1679 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1680 SDL_RenderFillRect(renderer, nullptr);
1681 SDL_RenderPresent(renderer);
1683 /* Open an audio device */
1684 int fileidx = 1;
1685 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1687 ALCdevice *dev = NULL;
1688 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1690 fileidx = 3;
1691 dev = alcOpenDevice(argv[2]);
1692 if(dev) return dev;
1693 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1695 return alcOpenDevice(nullptr);
1696 }();
1697 ALCcontext *context = alcCreateContext(device, nullptr);
1698 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1700 std::cerr<< "Failed to set up audio device" <<std::endl;
1701 if(context)
1702 alcDestroyContext(context);
1703 return 1;
1706 const ALCchar *name = nullptr;
1707 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1708 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1709 if(!name || alcGetError(device) != AL_NO_ERROR)
1710 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1711 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1713 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1715 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1716 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1717 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1721 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1723 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1724 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1725 alGetProcAddress("alGetSourcei64vSOFT")
1728 if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
1730 std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
1731 alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
1732 alGetProcAddress("alBufferStorageSOFT"));
1733 alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
1734 alGetProcAddress("alMapBufferSOFT"));
1735 alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
1736 alGetProcAddress("alUnmapBufferSOFT"));
1738 if(alIsExtensionPresent("AL_SOFTX_events"))
1740 std::cout<< "Found AL_SOFT_events" <<std::endl;
1741 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1742 alGetProcAddress("alEventControlSOFT"));
1743 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
1744 alGetProcAddress("alEventCallbackSOFT"));
1747 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1749 ++fileidx;
1750 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1751 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1752 else
1754 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1755 EnableDirectOut = true;
1759 while(fileidx < argc && !movState)
1761 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1762 if(!movState->prepare()) movState = nullptr;
1764 if(!movState)
1766 std::cerr<< "Could not start a video" <<std::endl;
1767 return 1;
1769 movState->setTitle(screen);
1771 /* Default to going to the next movie at the end of one. */
1772 enum class EomAction {
1773 Next, Quit
1774 } eom_action = EomAction::Next;
1775 seconds last_time(-1);
1776 SDL_Event event;
1777 while(1)
1779 int have_evt = SDL_WaitEventTimeout(&event, 10);
1781 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
1782 if(cur_time != last_time)
1784 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
1785 std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
1786 last_time = cur_time;
1788 if(!have_evt) continue;
1790 switch(event.type)
1792 case SDL_KEYDOWN:
1793 switch(event.key.keysym.sym)
1795 case SDLK_ESCAPE:
1796 movState->mQuit = true;
1797 eom_action = EomAction::Quit;
1798 break;
1800 case SDLK_n:
1801 movState->mQuit = true;
1802 eom_action = EomAction::Next;
1803 break;
1805 default:
1806 break;
1808 break;
1810 case SDL_WINDOWEVENT:
1811 switch(event.window.event)
1813 case SDL_WINDOWEVENT_RESIZED:
1814 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1815 SDL_RenderFillRect(renderer, nullptr);
1816 break;
1818 default:
1819 break;
1821 break;
1823 case SDL_QUIT:
1824 movState->mQuit = true;
1825 eom_action = EomAction::Quit;
1826 break;
1828 case FF_UPDATE_EVENT:
1829 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1830 screen, renderer
1832 break;
1834 case FF_REFRESH_EVENT:
1835 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1836 screen, renderer
1838 break;
1840 case FF_MOVIE_DONE_EVENT:
1841 std::cout<<'\n';
1842 last_time = seconds(-1);
1843 if(eom_action != EomAction::Quit)
1845 movState = nullptr;
1846 while(fileidx < argc && !movState)
1848 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1849 if(!movState->prepare()) movState = nullptr;
1851 if(movState)
1853 movState->setTitle(screen);
1854 break;
1858 /* Nothing more to play. Shut everything down and quit. */
1859 movState = nullptr;
1861 alcMakeContextCurrent(nullptr);
1862 alcDestroyContext(context);
1863 alcCloseDevice(device);
1865 SDL_DestroyRenderer(renderer);
1866 renderer = nullptr;
1867 SDL_DestroyWindow(screen);
1868 screen = nullptr;
1870 SDL_Quit();
1871 exit(0);
1873 default:
1874 break;
1878 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1879 return 1;