Try to improve alffplay underrun device time adjustment
[openal-soft.git] / examples / alffplay.cpp
blob771e5ded727c9ab5bff09305a0b554eeaf965d51
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <vector>
18 #include <mutex>
19 #include <deque>
20 #include <array>
21 #include <cmath>
22 #include <string>
24 extern "C" {
25 #include "libavcodec/avcodec.h"
26 #include "libavformat/avformat.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/time.h"
29 #include "libavutil/pixfmt.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/channel_layout.h"
32 #include "libswscale/swscale.h"
33 #include "libswresample/swresample.h"
36 #include "SDL.h"
38 #include "AL/alc.h"
39 #include "AL/al.h"
40 #include "AL/alext.h"
42 #include "common/alhelpers.h"
44 extern "C" {
45 /* Undefine this to disable use of experimental extensions. Don't use for
46 * production code! Interfaces and behavior may change prior to being
47 * finalized.
49 #define ALLOW_EXPERIMENTAL_EXTS
51 #ifdef ALLOW_EXPERIMENTAL_EXTS
52 #ifndef AL_SOFT_map_buffer
53 #define AL_SOFT_map_buffer 1
54 typedef unsigned int ALbitfieldSOFT;
55 #define AL_MAP_READ_BIT_SOFT 0x00000001
56 #define AL_MAP_WRITE_BIT_SOFT 0x00000002
57 #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
58 #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
59 typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
60 typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
61 typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
62 typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
63 #endif
65 #ifndef AL_SOFT_events
66 #define AL_SOFT_events 1
67 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
68 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
69 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
70 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
71 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
72 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
73 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
74 #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
75 typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
76 ALsizei length, const ALchar *message,
77 void *userParam);
78 typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
79 typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
80 typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
81 typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
82 #endif
83 #endif /* ALLOW_EXPERIMENTAL_EXTS */
86 namespace {
88 #ifndef M_PI
89 #define M_PI (3.14159265358979323846)
90 #endif
92 using nanoseconds = std::chrono::nanoseconds;
93 using microseconds = std::chrono::microseconds;
94 using milliseconds = std::chrono::milliseconds;
95 using seconds = std::chrono::seconds;
96 using seconds_d64 = std::chrono::duration<double>;
98 const std::string AppName("alffplay");
100 bool EnableDirectOut = false;
101 bool EnableWideStereo = false;
102 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
103 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
105 #ifdef AL_SOFT_map_buffer
106 LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
107 LPALMAPBUFFERSOFT alMapBufferSOFT;
108 LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
109 #endif
111 #ifdef AL_SOFT_events
112 LPALEVENTCONTROLSOFT alEventControlSOFT;
113 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
114 #endif
116 const seconds AVNoSyncThreshold(10);
118 const milliseconds VideoSyncThreshold(10);
119 #define VIDEO_PICTURE_QUEUE_SIZE 16
121 const seconds_d64 AudioSyncThreshold(0.03);
122 const milliseconds AudioSampleCorrectionMax(50);
123 /* Averaging filter coefficient for audio sync. */
124 #define AUDIO_DIFF_AVG_NB 20
125 const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
126 /* Per-buffer size, in time */
127 const milliseconds AudioBufferTime(20);
128 /* Buffer total size, in time (should be divisible by the buffer time) */
129 const milliseconds AudioBufferTotalTime(800);
131 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
133 enum {
134 FF_UPDATE_EVENT = SDL_USEREVENT,
135 FF_REFRESH_EVENT,
136 FF_MOVIE_DONE_EVENT
139 enum class SyncMaster {
140 Audio,
141 Video,
142 External,
144 Default = External
148 inline microseconds get_avtime()
149 { return microseconds(av_gettime()); }
151 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
152 struct AVIOContextDeleter {
153 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
155 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
157 struct AVFormatCtxDeleter {
158 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
160 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
162 struct AVCodecCtxDeleter {
163 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
165 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
167 struct AVFrameDeleter {
168 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
170 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
172 struct SwrContextDeleter {
173 void operator()(SwrContext *ptr) { swr_free(&ptr); }
175 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
177 struct SwsContextDeleter {
178 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
180 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
183 class PacketQueue {
184 std::deque<AVPacket> mPackets;
185 size_t mTotalSize{0};
187 public:
188 ~PacketQueue() { clear(); }
190 bool empty() const noexcept { return mPackets.empty(); }
191 size_t totalSize() const noexcept { return mTotalSize; }
193 void put(const AVPacket *pkt)
195 mPackets.push_back(AVPacket{});
196 if(av_packet_ref(&mPackets.back(), pkt) != 0)
197 mPackets.pop_back();
198 else
199 mTotalSize += mPackets.back().size;
202 AVPacket *front() noexcept
203 { return &mPackets.front(); }
205 void pop()
207 AVPacket *pkt = &mPackets.front();
208 mTotalSize -= pkt->size;
209 av_packet_unref(pkt);
210 mPackets.pop_front();
213 void clear()
215 for(AVPacket &pkt : mPackets)
216 av_packet_unref(&pkt);
217 mPackets.clear();
218 mTotalSize = 0;
223 struct MovieState;
225 struct AudioState {
226 MovieState &mMovie;
228 AVStream *mStream{nullptr};
229 AVCodecCtxPtr mCodecCtx;
231 std::mutex mQueueMtx;
232 std::condition_variable mQueueCond;
234 /* Used for clock difference average computation */
235 seconds_d64 mClockDiffAvg{0};
237 /* Time of the next sample to be buffered */
238 nanoseconds mCurrentPts{0};
240 /* Device clock time that the stream started at. */
241 nanoseconds mDeviceStartTime{nanoseconds::min()};
243 /* Decompressed sample frame, and swresample context for conversion */
244 AVFramePtr mDecodedFrame;
245 SwrContextPtr mSwresCtx;
247 /* Conversion format, for what gets fed to OpenAL */
248 int mDstChanLayout{0};
249 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
251 /* Storage of converted samples */
252 uint8_t *mSamples{nullptr};
253 int mSamplesLen{0}; /* In samples */
254 int mSamplesPos{0};
255 int mSamplesMax{0};
257 /* OpenAL format */
258 ALenum mFormat{AL_NONE};
259 ALsizei mFrameSize{0};
261 std::mutex mSrcMutex;
262 std::condition_variable mSrcCond;
263 std::atomic_flag mConnected;
264 ALuint mSource{0};
265 std::vector<ALuint> mBuffers;
266 ALsizei mBufferIdx{0};
268 AudioState(MovieState &movie) : mMovie(movie)
269 { mConnected.test_and_set(std::memory_order_relaxed); }
270 ~AudioState()
272 if(mSource)
273 alDeleteSources(1, &mSource);
274 if(!mBuffers.empty())
275 alDeleteBuffers(mBuffers.size(), mBuffers.data());
277 av_freep(&mSamples);
280 #ifdef AL_SOFT_events
281 static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
282 ALsizei length, const ALchar *message,
283 void *userParam);
284 #endif
286 nanoseconds getClockNoLock();
287 nanoseconds getClock()
289 std::lock_guard<std::mutex> lock(mSrcMutex);
290 return getClockNoLock();
293 bool isBufferFilled();
294 void startPlayback();
296 int getSync();
297 int decodeFrame();
298 bool readAudio(uint8_t *samples, int length);
300 int handler();
303 struct VideoState {
304 MovieState &mMovie;
306 AVStream *mStream{nullptr};
307 AVCodecCtxPtr mCodecCtx;
309 std::mutex mQueueMtx;
310 std::condition_variable mQueueCond;
312 nanoseconds mClock{0};
313 nanoseconds mFrameTimer{0};
314 nanoseconds mFrameLastPts{0};
315 nanoseconds mFrameLastDelay{0};
316 nanoseconds mCurrentPts{0};
317 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
318 microseconds mCurrentPtsTime{0};
320 /* Decompressed video frame, and swscale context for conversion */
321 AVFramePtr mDecodedFrame;
322 SwsContextPtr mSwscaleCtx;
324 struct Picture {
325 SDL_Texture *mImage{nullptr};
326 int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
327 std::atomic<bool> mUpdated{false};
328 nanoseconds mPts{0};
330 ~Picture()
332 if(mImage)
333 SDL_DestroyTexture(mImage);
334 mImage = nullptr;
337 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
338 size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
339 std::mutex mPictQMutex;
340 std::condition_variable mPictQCond;
341 bool mFirstUpdate{true};
342 std::atomic<bool> mEOS{false};
343 std::atomic<bool> mFinalUpdate{false};
345 VideoState(MovieState &movie) : mMovie(movie) { }
347 nanoseconds getClock();
348 bool isBufferFilled();
350 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
351 void schedRefresh(milliseconds delay);
352 void display(SDL_Window *screen, SDL_Renderer *renderer);
353 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
354 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
355 int queuePicture(nanoseconds pts);
356 int handler();
359 struct MovieState {
360 AVIOContextPtr mIOContext;
361 AVFormatCtxPtr mFormatCtx;
363 SyncMaster mAVSyncType{SyncMaster::Default};
365 microseconds mClockBase{0};
366 std::atomic<bool> mPlaying{false};
368 std::mutex mSendMtx;
369 std::condition_variable mSendCond;
370 /* NOTE: false/clear = need data, true/set = no data needed */
371 std::atomic_flag mSendDataGood;
373 std::atomic<bool> mQuit{false};
375 AudioState mAudio;
376 VideoState mVideo;
378 std::thread mParseThread;
379 std::thread mAudioThread;
380 std::thread mVideoThread;
382 std::string mFilename;
384 MovieState(std::string fname)
385 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
387 ~MovieState()
389 mQuit = true;
390 if(mParseThread.joinable())
391 mParseThread.join();
394 static int decode_interrupt_cb(void *ctx);
395 bool prepare();
396 void setTitle(SDL_Window *window);
398 nanoseconds getClock();
400 nanoseconds getMasterClock();
402 nanoseconds getDuration();
404 int streamComponentOpen(int stream_index);
405 int parse_handler();
409 nanoseconds AudioState::getClockNoLock()
411 // The audio clock is the timestamp of the sample currently being heard.
412 if(alcGetInteger64vSOFT)
414 // If device start time = min, we aren't playing yet.
415 if(mDeviceStartTime == nanoseconds::min())
416 return nanoseconds::zero();
418 // Get the current device clock time and latency.
419 auto device = alcGetContextsDevice(alcGetCurrentContext());
420 ALCint64SOFT devtimes[2] = {0,0};
421 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
422 auto latency = nanoseconds(devtimes[1]);
423 auto device_time = nanoseconds(devtimes[0]);
425 // The clock is simply the current device time relative to the recorded
426 // start time. We can also subtract the latency to get more a accurate
427 // position of where the audio device actually is in the output stream.
428 return device_time - mDeviceStartTime - latency;
431 /* The source-based clock is based on 4 components:
432 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
433 * 2 - The length of the source's buffer queue
434 * (AudioBufferTime*AL_BUFFERS_QUEUED)
435 * 3 - The offset OpenAL is currently at in the source (the first value
436 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
437 * 4 - The latency between OpenAL and the DAC (the second value from
438 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
440 * Subtracting the length of the source queue from the next sample's
441 * timestamp gives the timestamp of the sample at the start of the source
442 * queue. Adding the source offset to that results in the timestamp for the
443 * sample at OpenAL's current position, and subtracting the source latency
444 * from that gives the timestamp of the sample currently at the DAC.
446 nanoseconds pts = mCurrentPts;
447 if(mSource)
449 ALint64SOFT offset[2];
450 ALint queued;
451 ALint status;
453 /* NOTE: The source state must be checked last, in case an underrun
454 * occurs and the source stops between retrieving the offset+latency
455 * and getting the state. */
456 if(alGetSourcei64vSOFT)
457 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
458 else
460 ALint ioffset;
461 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
462 offset[0] = (ALint64SOFT)ioffset << 32;
463 offset[1] = 0;
465 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
466 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
468 /* If the source is AL_STOPPED, then there was an underrun and all
469 * buffers are processed, so ignore the source queue. The audio thread
470 * will put the source into an AL_INITIAL state and clear the queue
471 * when it starts recovery. */
472 if(status != AL_STOPPED)
474 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
476 pts -= AudioBufferTime*queued;
477 pts += std::chrono::duration_cast<nanoseconds>(
478 fixed32(offset[0] / mCodecCtx->sample_rate)
481 /* Don't offset by the latency if the source isn't playing. */
482 if(status == AL_PLAYING)
483 pts -= nanoseconds(offset[1]);
486 return std::max(pts, nanoseconds::zero());
489 bool AudioState::isBufferFilled()
491 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
492 * does the source gen. So when we're able to grab the lock and the source
493 * is valid, the queue must be full.
495 std::lock_guard<std::mutex> lock(mSrcMutex);
496 return mSource != 0;
499 void AudioState::startPlayback()
501 alSourcePlay(mSource);
502 if(alcGetInteger64vSOFT)
504 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
506 // Subtract the total buffer queue time from the current pts to get the
507 // pts of the start of the queue.
508 nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
509 int64_t srctimes[2]={0,0};
510 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
511 auto device_time = nanoseconds(srctimes[1]);
512 auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
513 mCodecCtx->sample_rate;
515 // The mixer may have ticked and incremented the device time and sample
516 // offset, so subtract the source offset from the device time to get
517 // the device time the source started at. Also subtract startpts to get
518 // the device time the stream would have started at to reach where it
519 // is now.
520 mDeviceStartTime = device_time - src_offset - startpts;
524 int AudioState::getSync()
526 if(mMovie.mAVSyncType == SyncMaster::Audio)
527 return 0;
529 auto ref_clock = mMovie.getMasterClock();
530 auto diff = ref_clock - getClockNoLock();
532 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
534 /* Difference is TOO big; reset accumulated average */
535 mClockDiffAvg = seconds_d64::zero();
536 return 0;
539 /* Accumulate the diffs */
540 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
541 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
542 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
543 return 0;
545 /* Constrain the per-update difference to avoid exceedingly large skips */
546 diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
547 AudioSampleCorrectionMax);
548 return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
551 int AudioState::decodeFrame()
553 while(!mMovie.mQuit.load(std::memory_order_relaxed))
555 std::unique_lock<std::mutex> lock(mQueueMtx);
556 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
557 if(ret == AVERROR(EAGAIN))
559 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
560 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
561 mMovie.mSendCond.notify_one();
562 do {
563 mQueueCond.wait(lock);
564 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
565 } while(ret == AVERROR(EAGAIN));
567 lock.unlock();
568 if(ret == AVERROR_EOF) break;
569 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
570 mMovie.mSendCond.notify_one();
571 if(ret < 0)
573 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
574 return 0;
577 if(mDecodedFrame->nb_samples <= 0)
579 av_frame_unref(mDecodedFrame.get());
580 continue;
583 /* If provided, update w/ pts */
584 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
585 mCurrentPts = std::chrono::duration_cast<nanoseconds>(
586 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
589 if(mDecodedFrame->nb_samples > mSamplesMax)
591 av_freep(&mSamples);
592 av_samples_alloc(
593 &mSamples, nullptr, mCodecCtx->channels,
594 mDecodedFrame->nb_samples, mDstSampleFmt, 0
596 mSamplesMax = mDecodedFrame->nb_samples;
598 /* Return the amount of sample frames converted */
599 int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
600 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
603 av_frame_unref(mDecodedFrame.get());
604 return data_size;
607 return 0;
610 /* Duplicates the sample at in to out, count times. The frame size is a
611 * multiple of the template type size.
613 template<typename T>
614 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
616 const T *sample = reinterpret_cast<const T*>(in);
617 T *dst = reinterpret_cast<T*>(out);
618 if(frame_size == sizeof(T))
619 std::fill_n(dst, count, *sample);
620 else
622 /* NOTE: frame_size is a multiple of sizeof(T). */
623 int type_mult = frame_size / sizeof(T);
624 int i = 0;
625 std::generate_n(dst, count*type_mult,
626 [sample,type_mult,&i]() -> T
628 T ret = sample[i];
629 i = (i+1)%type_mult;
630 return ret;
637 bool AudioState::readAudio(uint8_t *samples, int length)
639 int sample_skip = getSync();
640 int audio_size = 0;
642 /* Read the next chunk of data, refill the buffer, and queue it
643 * on the source */
644 length /= mFrameSize;
645 while(audio_size < length)
647 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
649 int frame_len = decodeFrame();
650 if(frame_len <= 0) break;
652 mSamplesLen = frame_len;
653 mSamplesPos = std::min(mSamplesLen, sample_skip);
654 sample_skip -= mSamplesPos;
656 // Adjust the device start time and current pts by the amount we're
657 // skipping/duplicating, so that the clock remains correct for the
658 // current stream position.
659 auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
660 mDeviceStartTime -= skip;
661 mCurrentPts += skip;
662 continue;
665 int rem = length - audio_size;
666 if(mSamplesPos >= 0)
668 int len = mSamplesLen - mSamplesPos;
669 if(rem > len) rem = len;
670 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
672 else
674 rem = std::min(rem, -mSamplesPos);
676 /* Add samples by copying the first sample */
677 if((mFrameSize&7) == 0)
678 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
679 else if((mFrameSize&3) == 0)
680 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
681 else if((mFrameSize&1) == 0)
682 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
683 else
684 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
687 mSamplesPos += rem;
688 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
689 samples += rem*mFrameSize;
690 audio_size += rem;
692 if(audio_size <= 0)
693 return false;
695 if(audio_size < length)
697 int rem = length - audio_size;
698 std::fill_n(samples, rem*mFrameSize,
699 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
700 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
701 audio_size += rem;
703 return true;
707 #ifdef AL_SOFT_events
708 void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
709 ALsizei length, const ALchar *message,
710 void *userParam)
712 AudioState *self = reinterpret_cast<AudioState*>(userParam);
714 if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
716 /* Temporarily lock the source mutex to ensure it's not between
717 * checking the processed count and going to sleep.
719 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
720 self->mSrcCond.notify_one();
721 return;
724 std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
725 switch(eventType)
727 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
728 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
729 case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
730 case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
731 case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
732 case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
733 default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
734 std::dec<<std::setw(0)<<std::setfill(' '); break;
736 std::cout<< "\n"
737 "Object ID: "<<object<<"\n"
738 "Parameter: "<<param<<"\n"
739 "Message: "<<std::string(message, length)<<"\n----"<<
740 std::endl;
742 if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
744 { std::lock_guard<std::mutex> lock(self->mSrcMutex);
745 self->mConnected.clear(std::memory_order_release);
747 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
748 self->mSrcCond.notify_one();
751 #endif
753 int AudioState::handler()
755 std::unique_lock<std::mutex> lock(mSrcMutex);
756 milliseconds sleep_time = AudioBufferTime / 3;
757 ALenum fmt;
759 #ifdef AL_SOFT_events
760 const std::array<ALenum,6> evt_types{{
761 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
762 AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
763 AL_EVENT_TYPE_DISCONNECTED_SOFT
765 if(alEventControlSOFT)
767 alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
768 alEventCallbackSOFT(EventCallback, this);
769 sleep_time = AudioBufferTotalTime;
771 #endif
773 /* Find a suitable format for OpenAL. */
774 mDstChanLayout = 0;
775 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
777 mDstSampleFmt = AV_SAMPLE_FMT_U8;
778 mFrameSize = 1;
779 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
780 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
781 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
783 mDstChanLayout = mCodecCtx->channel_layout;
784 mFrameSize *= 8;
785 mFormat = fmt;
787 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
788 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
789 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
790 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
792 mDstChanLayout = mCodecCtx->channel_layout;
793 mFrameSize *= 6;
794 mFormat = fmt;
796 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
798 mDstChanLayout = mCodecCtx->channel_layout;
799 mFrameSize *= 1;
800 mFormat = AL_FORMAT_MONO8;
802 if(!mDstChanLayout)
804 mDstChanLayout = AV_CH_LAYOUT_STEREO;
805 mFrameSize *= 2;
806 mFormat = AL_FORMAT_STEREO8;
809 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
810 alIsExtensionPresent("AL_EXT_FLOAT32"))
812 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
813 mFrameSize = 4;
814 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
815 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
816 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
818 mDstChanLayout = mCodecCtx->channel_layout;
819 mFrameSize *= 8;
820 mFormat = fmt;
822 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
823 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
824 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
825 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
827 mDstChanLayout = mCodecCtx->channel_layout;
828 mFrameSize *= 6;
829 mFormat = fmt;
831 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
833 mDstChanLayout = mCodecCtx->channel_layout;
834 mFrameSize *= 1;
835 mFormat = AL_FORMAT_MONO_FLOAT32;
837 if(!mDstChanLayout)
839 mDstChanLayout = AV_CH_LAYOUT_STEREO;
840 mFrameSize *= 2;
841 mFormat = AL_FORMAT_STEREO_FLOAT32;
844 if(!mDstChanLayout)
846 mDstSampleFmt = AV_SAMPLE_FMT_S16;
847 mFrameSize = 2;
848 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
849 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
850 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
852 mDstChanLayout = mCodecCtx->channel_layout;
853 mFrameSize *= 8;
854 mFormat = fmt;
856 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
857 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
858 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
859 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
861 mDstChanLayout = mCodecCtx->channel_layout;
862 mFrameSize *= 6;
863 mFormat = fmt;
865 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
867 mDstChanLayout = mCodecCtx->channel_layout;
868 mFrameSize *= 1;
869 mFormat = AL_FORMAT_MONO16;
871 if(!mDstChanLayout)
873 mDstChanLayout = AV_CH_LAYOUT_STEREO;
874 mFrameSize *= 2;
875 mFormat = AL_FORMAT_STEREO16;
878 void *samples = nullptr;
879 ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
880 mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
882 mSamples = NULL;
883 mSamplesMax = 0;
884 mSamplesPos = 0;
885 mSamplesLen = 0;
887 mDecodedFrame.reset(av_frame_alloc());
888 if(!mDecodedFrame)
890 std::cerr<< "Failed to allocate audio frame" <<std::endl;
891 goto finish;
894 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
895 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
896 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
897 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
898 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
899 0, nullptr
901 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
903 std::cerr<< "Failed to initialize audio converter" <<std::endl;
904 goto finish;
907 mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
908 alGenBuffers(mBuffers.size(), mBuffers.data());
909 alGenSources(1, &mSource);
911 if(EnableDirectOut)
912 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
913 if(EnableWideStereo)
915 ALfloat angles[2] = { (ALfloat)(M_PI/3.0), (ALfloat)(-M_PI/3.0) };
916 alSourcefv(mSource, AL_STEREO_ANGLES, angles);
919 if(alGetError() != AL_NO_ERROR)
920 goto finish;
922 #ifdef AL_SOFT_map_buffer
923 if(alBufferStorageSOFT)
925 for(ALuint bufid : mBuffers)
926 alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
927 AL_MAP_WRITE_BIT_SOFT);
928 if(alGetError() != AL_NO_ERROR)
930 fprintf(stderr, "Failed to use mapped buffers\n");
931 samples = av_malloc(buffer_len);
934 else
935 #endif
936 samples = av_malloc(buffer_len);
938 while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
939 mConnected.test_and_set(std::memory_order_relaxed))
941 /* First remove any processed buffers. */
942 ALint processed;
943 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
944 while(processed > 0)
946 std::array<ALuint,4> bids;
947 alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
948 bids.data());
949 processed -= std::min<ALsizei>(bids.size(), processed);
952 /* Refill the buffer queue. */
953 ALint queued;
954 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
955 while((ALuint)queued < mBuffers.size())
957 ALuint bufid = mBuffers[mBufferIdx];
959 uint8_t *ptr = reinterpret_cast<uint8_t*>(samples
960 #ifdef AL_SOFT_map_buffer
961 ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
962 #endif
964 if(!ptr) break;
966 /* Read the next chunk of data, filling the buffer, and queue it on
967 * the source */
968 bool got_audio = readAudio(ptr, buffer_len);
969 #ifdef AL_SOFT_map_buffer
970 if(!samples) alUnmapBufferSOFT(bufid);
971 #endif
972 if(!got_audio) break;
974 if(samples)
975 alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
977 alSourceQueueBuffers(mSource, 1, &bufid);
978 mBufferIdx = (mBufferIdx+1) % mBuffers.size();
979 ++queued;
981 if(queued == 0)
982 break;
984 /* Check that the source is playing. */
985 ALint state;
986 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
987 if(state == AL_STOPPED)
989 /* AL_STOPPED means there was an underrun. Clear the buffer queue
990 * since this likely means we're late, and rewind the source to get
991 * it back into an AL_INITIAL state.
993 alSourceRewind(mSource);
994 alSourcei(mSource, AL_BUFFER, 0);
995 if(alcGetInteger64vSOFT)
997 /* Also update the device start time with the current device
998 * clock, so the decoder knows we're running behind.
1000 int64_t devtime{};
1001 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1002 ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
1003 auto device_time = nanoseconds{devtime};
1005 mDeviceStartTime = device_time - mCurrentPts + AudioBufferTotalTime;
1007 continue;
1010 /* (re)start the source if needed, and wait for a buffer to finish */
1011 if(state != AL_PLAYING && state != AL_PAUSED &&
1012 mMovie.mPlaying.load(std::memory_order_relaxed))
1013 startPlayback();
1015 mSrcCond.wait_for(lock, sleep_time);
1018 alSourceRewind(mSource);
1019 alSourcei(mSource, AL_BUFFER, 0);
1021 finish:
1022 av_freep(&samples);
1024 #ifdef AL_SOFT_events
1025 if(alEventControlSOFT)
1027 alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
1028 alEventCallbackSOFT(nullptr, nullptr);
1030 #endif
1032 return 0;
1036 nanoseconds VideoState::getClock()
1038 /* NOTE: This returns incorrect times while not playing. */
1039 auto delta = get_avtime() - mCurrentPtsTime;
1040 return mCurrentPts + delta;
1043 bool VideoState::isBufferFilled()
1045 std::unique_lock<std::mutex> lock(mPictQMutex);
1046 return mPictQSize >= mPictQ.size();
1049 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
1051 SDL_Event evt{};
1052 evt.user.type = FF_REFRESH_EVENT;
1053 evt.user.data1 = opaque;
1054 SDL_PushEvent(&evt);
1055 return 0; /* 0 means stop timer */
1058 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
1059 void VideoState::schedRefresh(milliseconds delay)
1061 SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
1064 /* Called by VideoState::refreshTimer to display the next video frame. */
1065 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
1067 Picture *vp = &mPictQ[mPictQRead];
1069 if(!vp->mImage)
1070 return;
1072 float aspect_ratio;
1073 int win_w, win_h;
1074 int w, h, x, y;
1076 if(mCodecCtx->sample_aspect_ratio.num == 0)
1077 aspect_ratio = 0.0f;
1078 else
1080 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
1081 mCodecCtx->height;
1083 if(aspect_ratio <= 0.0f)
1084 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
1086 SDL_GetWindowSize(screen, &win_w, &win_h);
1087 h = win_h;
1088 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
1089 if(w > win_w)
1091 w = win_w;
1092 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
1094 x = (win_w - w) / 2;
1095 y = (win_h - h) / 2;
1097 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
1098 SDL_Rect dst_rect{ x, y, w, h };
1099 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
1100 SDL_RenderPresent(renderer);
1103 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
1104 * was created. It handles the display of the next decoded video frame (if not
1105 * falling behind), and sets up the timer for the following video frame.
1107 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
1109 if(!mStream)
1111 if(mEOS)
1113 mFinalUpdate = true;
1114 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1115 mPictQCond.notify_all();
1116 return;
1118 schedRefresh(milliseconds(100));
1119 return;
1121 if(!mMovie.mPlaying.load(std::memory_order_relaxed))
1123 schedRefresh(milliseconds(1));
1124 return;
1127 std::unique_lock<std::mutex> lock(mPictQMutex);
1128 retry:
1129 if(mPictQSize == 0)
1131 if(mEOS)
1132 mFinalUpdate = true;
1133 else
1134 schedRefresh(milliseconds(1));
1135 lock.unlock();
1136 mPictQCond.notify_all();
1137 return;
1140 Picture *vp = &mPictQ[mPictQRead];
1141 mCurrentPts = vp->mPts;
1142 mCurrentPtsTime = get_avtime();
1144 /* Get delay using the frame pts and the pts from last frame. */
1145 auto delay = vp->mPts - mFrameLastPts;
1146 if(delay <= seconds::zero() || delay >= seconds(1))
1148 /* If incorrect delay, use previous one. */
1149 delay = mFrameLastDelay;
1151 /* Save for next frame. */
1152 mFrameLastDelay = delay;
1153 mFrameLastPts = vp->mPts;
1155 /* Update delay to sync to clock if not master source. */
1156 if(mMovie.mAVSyncType != SyncMaster::Video)
1158 auto ref_clock = mMovie.getMasterClock();
1159 auto diff = vp->mPts - ref_clock;
1161 /* Skip or repeat the frame. Take delay into account. */
1162 auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
1163 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
1165 if(diff <= -sync_threshold)
1166 delay = nanoseconds::zero();
1167 else if(diff >= sync_threshold)
1168 delay *= 2;
1172 mFrameTimer += delay;
1173 /* Compute the REAL delay. */
1174 auto actual_delay = mFrameTimer - get_avtime();
1175 if(!(actual_delay >= VideoSyncThreshold))
1177 /* We don't have time to handle this picture, just skip to the next one. */
1178 mPictQRead = (mPictQRead+1)%mPictQ.size();
1179 mPictQSize--;
1180 goto retry;
1182 schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
1184 /* Show the picture! */
1185 display(screen, renderer);
1187 /* Update queue for next picture. */
1188 mPictQRead = (mPictQRead+1)%mPictQ.size();
1189 mPictQSize--;
1190 lock.unlock();
1191 mPictQCond.notify_all();
1194 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1195 * main thread where the renderer was created.
1197 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
1199 Picture *vp = &mPictQ[mPictQWrite];
1200 bool fmt_updated = false;
1202 /* allocate or resize the buffer! */
1203 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
1205 fmt_updated = true;
1206 if(vp->mImage)
1207 SDL_DestroyTexture(vp->mImage);
1208 vp->mImage = SDL_CreateTexture(
1209 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1210 mCodecCtx->coded_width, mCodecCtx->coded_height
1212 if(!vp->mImage)
1213 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1214 vp->mWidth = mCodecCtx->width;
1215 vp->mHeight = mCodecCtx->height;
1217 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
1219 /* For the first update, set the window size to the video size. */
1220 mFirstUpdate = false;
1222 int w = vp->mWidth;
1223 int h = vp->mHeight;
1224 if(mCodecCtx->sample_aspect_ratio.den != 0)
1226 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1227 if(aspect_ratio >= 1.0)
1228 w = (int)(w*aspect_ratio + 0.5);
1229 else if(aspect_ratio > 0.0)
1230 h = (int)(h/aspect_ratio + 0.5);
1232 SDL_SetWindowSize(screen, w, h);
1236 if(vp->mImage)
1238 AVFrame *frame = mDecodedFrame.get();
1239 void *pixels = nullptr;
1240 int pitch = 0;
1242 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1243 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1244 frame->data[0], frame->linesize[0],
1245 frame->data[1], frame->linesize[1],
1246 frame->data[2], frame->linesize[2]
1248 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1249 std::cerr<< "Failed to lock texture" <<std::endl;
1250 else
1252 // Convert the image into YUV format that SDL uses
1253 int coded_w = mCodecCtx->coded_width;
1254 int coded_h = mCodecCtx->coded_height;
1255 int w = mCodecCtx->width;
1256 int h = mCodecCtx->height;
1257 if(!mSwscaleCtx || fmt_updated)
1259 mSwscaleCtx.reset(sws_getContext(
1260 w, h, mCodecCtx->pix_fmt,
1261 w, h, AV_PIX_FMT_YUV420P, 0,
1262 nullptr, nullptr, nullptr
1266 /* point pict at the queue */
1267 uint8_t *pict_data[3];
1268 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1269 pict_data[1] = pict_data[0] + coded_w*coded_h;
1270 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1272 int pict_linesize[3];
1273 pict_linesize[0] = pitch;
1274 pict_linesize[1] = pitch / 2;
1275 pict_linesize[2] = pitch / 2;
1277 sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
1278 frame->linesize, 0, h, pict_data, pict_linesize);
1279 SDL_UnlockTexture(vp->mImage);
1283 vp->mUpdated.store(true, std::memory_order_release);
1284 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1285 mPictQCond.notify_one();
1288 int VideoState::queuePicture(nanoseconds pts)
1290 /* Wait until we have space for a new pic */
1291 std::unique_lock<std::mutex> lock(mPictQMutex);
1292 while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
1293 mPictQCond.wait(lock);
1294 lock.unlock();
1296 if(mMovie.mQuit.load(std::memory_order_relaxed))
1297 return -1;
1299 Picture *vp = &mPictQ[mPictQWrite];
1301 /* We have to create/update the picture in the main thread */
1302 vp->mUpdated.store(false, std::memory_order_relaxed);
1303 SDL_Event evt{};
1304 evt.user.type = FF_UPDATE_EVENT;
1305 evt.user.data1 = this;
1306 SDL_PushEvent(&evt);
1308 /* Wait until the picture is updated. */
1309 lock.lock();
1310 while(!vp->mUpdated.load(std::memory_order_relaxed))
1312 if(mMovie.mQuit.load(std::memory_order_relaxed))
1313 return -1;
1314 mPictQCond.wait(lock);
1316 if(mMovie.mQuit.load(std::memory_order_relaxed))
1317 return -1;
1318 vp->mPts = pts;
1320 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1321 mPictQSize++;
1322 lock.unlock();
1324 return 0;
1327 int VideoState::handler()
1329 mDecodedFrame.reset(av_frame_alloc());
1330 while(!mMovie.mQuit.load(std::memory_order_relaxed))
1332 std::unique_lock<std::mutex> lock(mQueueMtx);
1333 /* Decode video frame */
1334 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1335 if(ret == AVERROR(EAGAIN))
1337 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1338 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
1339 mMovie.mSendCond.notify_one();
1340 do {
1341 mQueueCond.wait(lock);
1342 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1343 } while(ret == AVERROR(EAGAIN));
1345 lock.unlock();
1346 if(ret == AVERROR_EOF) break;
1347 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1348 mMovie.mSendCond.notify_one();
1349 if(ret < 0)
1351 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1352 continue;
1355 /* Get the PTS for this frame. */
1356 nanoseconds pts;
1357 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
1358 mClock = std::chrono::duration_cast<nanoseconds>(
1359 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
1361 pts = mClock;
1363 /* Update the video clock to the next expected PTS. */
1364 auto frame_delay = av_q2d(mCodecCtx->time_base);
1365 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1366 mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
1368 if(queuePicture(pts) < 0)
1369 break;
1370 av_frame_unref(mDecodedFrame.get());
1372 mEOS = true;
1374 std::unique_lock<std::mutex> lock(mPictQMutex);
1375 if(mMovie.mQuit.load(std::memory_order_relaxed))
1377 mPictQRead = 0;
1378 mPictQWrite = 0;
1379 mPictQSize = 0;
1381 while(!mFinalUpdate)
1382 mPictQCond.wait(lock);
1384 return 0;
1388 int MovieState::decode_interrupt_cb(void *ctx)
1390 return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1393 bool MovieState::prepare()
1395 AVIOContext *avioctx = nullptr;
1396 AVIOInterruptCB intcb = { decode_interrupt_cb, this };
1397 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1399 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1400 return false;
1402 mIOContext.reset(avioctx);
1404 /* Open movie file. If avformat_open_input fails it will automatically free
1405 * this context, so don't set it onto a smart pointer yet.
1407 AVFormatContext *fmtctx = avformat_alloc_context();
1408 fmtctx->pb = mIOContext.get();
1409 fmtctx->interrupt_callback = intcb;
1410 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1412 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1413 return false;
1415 mFormatCtx.reset(fmtctx);
1417 /* Retrieve stream information */
1418 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1420 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1421 return false;
1424 mVideo.schedRefresh(milliseconds(40));
1426 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1427 return true;
1430 void MovieState::setTitle(SDL_Window *window)
1432 auto pos1 = mFilename.rfind('/');
1433 auto pos2 = mFilename.rfind('\\');
1434 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1435 (pos2 == std::string::npos) ? pos1 :
1436 std::max(pos1, pos2)) + 1;
1437 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1440 nanoseconds MovieState::getClock()
1442 if(!mPlaying.load(std::memory_order_relaxed))
1443 return nanoseconds::zero();
1444 return get_avtime() - mClockBase;
1447 nanoseconds MovieState::getMasterClock()
1449 if(mAVSyncType == SyncMaster::Video)
1450 return mVideo.getClock();
1451 if(mAVSyncType == SyncMaster::Audio)
1452 return mAudio.getClock();
1453 return getClock();
1456 nanoseconds MovieState::getDuration()
1457 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1459 int MovieState::streamComponentOpen(int stream_index)
1461 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1462 return -1;
1464 /* Get a pointer to the codec context for the stream, and open the
1465 * associated codec.
1467 AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
1468 if(!avctx) return -1;
1470 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1471 return -1;
1473 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1474 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1476 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1477 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1478 return -1;
1481 /* Initialize and start the media type handler */
1482 switch(avctx->codec_type)
1484 case AVMEDIA_TYPE_AUDIO:
1485 mAudio.mStream = mFormatCtx->streams[stream_index];
1486 mAudio.mCodecCtx = std::move(avctx);
1488 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1489 break;
1491 case AVMEDIA_TYPE_VIDEO:
1492 mVideo.mStream = mFormatCtx->streams[stream_index];
1493 mVideo.mCodecCtx = std::move(avctx);
1495 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1496 break;
1498 default:
1499 return -1;
1502 return stream_index;
1505 int MovieState::parse_handler()
1507 int video_index = -1;
1508 int audio_index = -1;
1510 /* Dump information about file onto standard error */
1511 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1513 /* Find the first video and audio streams */
1514 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1516 auto codecpar = mFormatCtx->streams[i]->codecpar;
1517 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1518 video_index = streamComponentOpen(i);
1519 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1520 audio_index = streamComponentOpen(i);
1523 if(video_index < 0 && audio_index < 0)
1525 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1526 mQuit = true;
1529 PacketQueue audio_queue, video_queue;
1530 bool input_finished = false;
1532 /* Main packet reading/dispatching loop */
1533 while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
1535 AVPacket packet;
1536 if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1537 input_finished = true;
1538 else
1540 /* Copy the packet into the queue it's meant for. */
1541 if(packet.stream_index == video_index)
1542 video_queue.put(&packet);
1543 else if(packet.stream_index == audio_index)
1544 audio_queue.put(&packet);
1545 av_packet_unref(&packet);
1548 do {
1549 /* Send whatever queued packets we have. */
1550 if(!audio_queue.empty())
1552 std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
1553 int ret;
1554 do {
1555 ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
1556 if(ret != AVERROR(EAGAIN)) audio_queue.pop();
1557 } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
1558 lock.unlock();
1559 mAudio.mQueueCond.notify_one();
1561 if(!video_queue.empty())
1563 std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
1564 int ret;
1565 do {
1566 ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
1567 if(ret != AVERROR(EAGAIN)) video_queue.pop();
1568 } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
1569 lock.unlock();
1570 mVideo.mQueueCond.notify_one();
1572 /* If the queues are completely empty, or it's not full and there's
1573 * more input to read, go get more.
1575 size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
1576 if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
1577 break;
1579 if(!mPlaying.load(std::memory_order_relaxed))
1581 if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
1582 (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
1584 /* Set the base time 50ms ahead of the current av time. */
1585 mClockBase = get_avtime() + milliseconds(50);
1586 mVideo.mCurrentPtsTime = mClockBase;
1587 mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
1588 mAudio.startPlayback();
1589 mPlaying.store(std::memory_order_release);
1592 /* Nothing to send or get for now, wait a bit and try again. */
1593 { std::unique_lock<std::mutex> lock(mSendMtx);
1594 if(mSendDataGood.test_and_set(std::memory_order_relaxed))
1595 mSendCond.wait_for(lock, milliseconds(10));
1597 } while(!mQuit.load(std::memory_order_relaxed));
1599 /* Pass a null packet to finish the send buffers (the receive functions
1600 * will get AVERROR_EOF when emptied).
1602 if(mVideo.mCodecCtx)
1604 { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
1605 avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
1607 mVideo.mQueueCond.notify_one();
1609 if(mAudio.mCodecCtx)
1611 { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
1612 avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
1614 mAudio.mQueueCond.notify_one();
1616 video_queue.clear();
1617 audio_queue.clear();
1619 /* all done - wait for it */
1620 if(mVideoThread.joinable())
1621 mVideoThread.join();
1622 if(mAudioThread.joinable())
1623 mAudioThread.join();
1625 mVideo.mEOS = true;
1626 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1627 while(!mVideo.mFinalUpdate)
1628 mVideo.mPictQCond.wait(lock);
1629 lock.unlock();
1631 SDL_Event evt{};
1632 evt.user.type = FF_MOVIE_DONE_EVENT;
1633 SDL_PushEvent(&evt);
1635 return 0;
1639 // Helper class+method to print the time with human-readable formatting.
1640 struct PrettyTime {
1641 seconds mTime;
1643 inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1645 using hours = std::chrono::hours;
1646 using minutes = std::chrono::minutes;
1647 using std::chrono::duration_cast;
1649 seconds t = rhs.mTime;
1650 if(t.count() < 0)
1652 os << '-';
1653 t *= -1;
1656 // Only handle up to hour formatting
1657 if(t >= hours(1))
1658 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1659 << (duration_cast<minutes>(t).count() % 60) << 'm';
1660 else
1661 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1662 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1663 << std::setfill(' ');
1664 return os;
1667 } // namespace
1670 int main(int argc, char *argv[])
1672 std::unique_ptr<MovieState> movState;
1674 if(argc < 2)
1676 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1677 return 1;
1679 /* Register all formats and codecs */
1680 av_register_all();
1681 /* Initialize networking protocols */
1682 avformat_network_init();
1684 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1686 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1687 return 1;
1690 /* Make a window to put our video */
1691 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1692 if(!screen)
1694 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1695 return 1;
1697 /* Make a renderer to handle the texture image surface and rendering. */
1698 Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
1699 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
1700 if(renderer)
1702 SDL_RendererInfo rinf{};
1703 bool ok = false;
1705 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1706 * software renderer. */
1707 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1709 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1710 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1712 if(!ok)
1714 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1715 SDL_DestroyRenderer(renderer);
1716 renderer = nullptr;
1719 if(!renderer)
1721 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1722 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1724 if(!renderer)
1726 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1727 return 1;
1729 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1730 SDL_RenderFillRect(renderer, nullptr);
1731 SDL_RenderPresent(renderer);
1733 /* Open an audio device */
1734 ++argv; --argc;
1735 if(InitAL(&argv, &argc))
1737 std::cerr<< "Failed to set up audio device" <<std::endl;
1738 return 1;
1741 { auto device = alcGetContextsDevice(alcGetCurrentContext());
1742 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1744 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1745 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1746 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1751 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1753 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1754 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1755 alGetProcAddress("alGetSourcei64vSOFT")
1758 #ifdef AL_SOFT_map_buffer
1759 if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
1761 std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
1762 alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
1763 alGetProcAddress("alBufferStorageSOFT"));
1764 alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
1765 alGetProcAddress("alMapBufferSOFT"));
1766 alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
1767 alGetProcAddress("alUnmapBufferSOFT"));
1769 #endif
1770 #ifdef AL_SOFT_events
1771 if(alIsExtensionPresent("AL_SOFTX_events"))
1773 std::cout<< "Found AL_SOFT_events" <<std::endl;
1774 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1775 alGetProcAddress("alEventControlSOFT"));
1776 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
1777 alGetProcAddress("alEventCallbackSOFT"));
1779 #endif
1781 int fileidx = 0;
1782 for(;fileidx < argc;++fileidx)
1784 if(strcmp(argv[fileidx], "-direct") == 0)
1786 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1787 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1788 else
1790 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1791 EnableDirectOut = true;
1794 else if(strcmp(argv[fileidx], "-wide") == 0)
1796 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
1797 std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
1798 else
1800 std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
1801 EnableWideStereo = true;
1804 else
1805 break;
1808 while(fileidx < argc && !movState)
1810 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1811 if(!movState->prepare()) movState = nullptr;
1813 if(!movState)
1815 std::cerr<< "Could not start a video" <<std::endl;
1816 return 1;
1818 movState->setTitle(screen);
1820 /* Default to going to the next movie at the end of one. */
1821 enum class EomAction {
1822 Next, Quit
1823 } eom_action = EomAction::Next;
1824 seconds last_time(-1);
1825 SDL_Event event;
1826 while(1)
1828 int have_evt = SDL_WaitEventTimeout(&event, 10);
1830 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
1831 if(cur_time != last_time)
1833 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
1834 std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
1835 last_time = cur_time;
1837 if(!have_evt) continue;
1839 switch(event.type)
1841 case SDL_KEYDOWN:
1842 switch(event.key.keysym.sym)
1844 case SDLK_ESCAPE:
1845 movState->mQuit = true;
1846 eom_action = EomAction::Quit;
1847 break;
1849 case SDLK_n:
1850 movState->mQuit = true;
1851 eom_action = EomAction::Next;
1852 break;
1854 default:
1855 break;
1857 break;
1859 case SDL_WINDOWEVENT:
1860 switch(event.window.event)
1862 case SDL_WINDOWEVENT_RESIZED:
1863 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1864 SDL_RenderFillRect(renderer, nullptr);
1865 break;
1867 default:
1868 break;
1870 break;
1872 case SDL_QUIT:
1873 movState->mQuit = true;
1874 eom_action = EomAction::Quit;
1875 break;
1877 case FF_UPDATE_EVENT:
1878 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1879 screen, renderer
1881 break;
1883 case FF_REFRESH_EVENT:
1884 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1885 screen, renderer
1887 break;
1889 case FF_MOVIE_DONE_EVENT:
1890 std::cout<<'\n';
1891 last_time = seconds(-1);
1892 if(eom_action != EomAction::Quit)
1894 movState = nullptr;
1895 while(fileidx < argc && !movState)
1897 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1898 if(!movState->prepare()) movState = nullptr;
1900 if(movState)
1902 movState->setTitle(screen);
1903 break;
1907 /* Nothing more to play. Shut everything down and quit. */
1908 movState = nullptr;
1910 CloseAL();
1912 SDL_DestroyRenderer(renderer);
1913 renderer = nullptr;
1914 SDL_DestroyWindow(screen);
1915 screen = nullptr;
1917 SDL_Quit();
1918 exit(0);
1920 default:
1921 break;
1925 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1926 return 1;