fix building on freebsd (clang)
[openal-soft.git] / examples / alffplay.cpp
blob6f6773a4d3c49488eac51c55e7600b349ee05a3e
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <vector>
18 #include <mutex>
19 #include <deque>
20 #include <array>
21 #include <cmath>
23 extern "C" {
24 #include "libavcodec/avcodec.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/avio.h"
27 #include "libavutil/time.h"
28 #include "libavutil/pixfmt.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/channel_layout.h"
31 #include "libswscale/swscale.h"
32 #include "libswresample/swresample.h"
35 #include "SDL.h"
37 #include "AL/alc.h"
38 #include "AL/al.h"
39 #include "AL/alext.h"
41 extern "C" {
42 #ifndef AL_SOFT_map_buffer
43 #define AL_SOFT_map_buffer 1
44 typedef unsigned int ALbitfieldSOFT;
45 #define AL_MAP_READ_BIT_SOFT 0x00000001
46 #define AL_MAP_WRITE_BIT_SOFT 0x00000002
47 #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
48 #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
49 typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
50 typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
51 typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
52 typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
53 #endif
55 #ifndef AL_SOFT_events
56 #define AL_SOFT_events 1
57 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
58 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
59 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
60 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
61 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
62 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
63 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
64 #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
65 typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
66 ALsizei length, const ALchar *message,
67 void *userParam);
68 typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
69 typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
70 typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
71 typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
72 #endif
75 namespace {
77 using nanoseconds = std::chrono::nanoseconds;
78 using microseconds = std::chrono::microseconds;
79 using milliseconds = std::chrono::milliseconds;
80 using seconds = std::chrono::seconds;
81 using seconds_d64 = std::chrono::duration<double>;
83 const std::string AppName("alffplay");
85 bool EnableDirectOut = false;
86 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
87 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
89 LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
90 LPALMAPBUFFERSOFT alMapBufferSOFT;
91 LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
93 LPALEVENTCONTROLSOFT alEventControlSOFT;
94 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
96 const seconds AVNoSyncThreshold(10);
98 const milliseconds VideoSyncThreshold(10);
99 #define VIDEO_PICTURE_QUEUE_SIZE 16
101 const seconds_d64 AudioSyncThreshold(0.03);
102 const milliseconds AudioSampleCorrectionMax(50);
103 /* Averaging filter coefficient for audio sync. */
104 #define AUDIO_DIFF_AVG_NB 20
105 const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
106 /* Per-buffer size, in time */
107 const milliseconds AudioBufferTime(20);
108 /* Buffer total size, in time (should be divisible by the buffer time) */
109 const milliseconds AudioBufferTotalTime(800);
111 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
113 enum {
114 FF_UPDATE_EVENT = SDL_USEREVENT,
115 FF_REFRESH_EVENT,
116 FF_MOVIE_DONE_EVENT
119 enum class SyncMaster {
120 Audio,
121 Video,
122 External,
124 Default = External
128 inline microseconds get_avtime()
129 { return microseconds(av_gettime()); }
131 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
132 struct AVIOContextDeleter {
133 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
135 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
137 struct AVFormatCtxDeleter {
138 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
140 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
142 struct AVCodecCtxDeleter {
143 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
145 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
147 struct AVFrameDeleter {
148 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
150 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
152 struct SwrContextDeleter {
153 void operator()(SwrContext *ptr) { swr_free(&ptr); }
155 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
157 struct SwsContextDeleter {
158 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
160 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
163 class PacketQueue {
164 std::deque<AVPacket> mPackets;
165 size_t mTotalSize{0};
167 public:
168 ~PacketQueue() { clear(); }
170 bool empty() const noexcept { return mPackets.empty(); }
171 size_t totalSize() const noexcept { return mTotalSize; }
173 void put(const AVPacket *pkt)
175 mPackets.push_back(AVPacket{});
176 if(av_packet_ref(&mPackets.back(), pkt) != 0)
177 mPackets.pop_back();
178 else
179 mTotalSize += mPackets.back().size;
182 AVPacket *front() noexcept
183 { return &mPackets.front(); }
185 void pop()
187 AVPacket *pkt = &mPackets.front();
188 mTotalSize -= pkt->size;
189 av_packet_unref(pkt);
190 mPackets.pop_front();
193 void clear()
195 for(AVPacket &pkt : mPackets)
196 av_packet_unref(&pkt);
197 mPackets.clear();
198 mTotalSize = 0;
203 struct MovieState;
205 struct AudioState {
206 MovieState &mMovie;
208 AVStream *mStream{nullptr};
209 AVCodecCtxPtr mCodecCtx;
211 std::mutex mQueueMtx;
212 std::condition_variable mQueueCond;
214 /* Used for clock difference average computation */
215 seconds_d64 mClockDiffAvg{0};
217 /* Time of the next sample to be buffered */
218 nanoseconds mCurrentPts{0};
220 /* Device clock time that the stream started at. */
221 nanoseconds mDeviceStartTime{nanoseconds::min()};
223 /* Decompressed sample frame, and swresample context for conversion */
224 AVFramePtr mDecodedFrame;
225 SwrContextPtr mSwresCtx;
227 /* Conversion format, for what gets fed to OpenAL */
228 int mDstChanLayout{0};
229 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
231 /* Storage of converted samples */
232 uint8_t *mSamples{nullptr};
233 int mSamplesLen{0}; /* In samples */
234 int mSamplesPos{0};
235 int mSamplesMax{0};
237 /* OpenAL format */
238 ALenum mFormat{AL_NONE};
239 ALsizei mFrameSize{0};
241 std::mutex mSrcMutex;
242 std::condition_variable mSrcCond;
243 std::atomic_flag mConnected;
244 ALuint mSource{0};
245 std::vector<ALuint> mBuffers;
246 ALsizei mBufferIdx{0};
248 AudioState(MovieState &movie) : mMovie(movie)
249 { mConnected.test_and_set(std::memory_order_relaxed); }
250 ~AudioState()
252 if(mSource)
253 alDeleteSources(1, &mSource);
254 if(!mBuffers.empty())
255 alDeleteBuffers(mBuffers.size(), mBuffers.data());
257 av_freep(&mSamples);
260 static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
261 ALsizei length, const ALchar *message,
262 void *userParam);
264 nanoseconds getClockNoLock();
265 nanoseconds getClock()
267 std::lock_guard<std::mutex> lock(mSrcMutex);
268 return getClockNoLock();
271 bool isBufferFilled();
272 void startPlayback();
274 int getSync();
275 int decodeFrame();
276 bool readAudio(uint8_t *samples, int length);
278 int handler();
281 struct VideoState {
282 MovieState &mMovie;
284 AVStream *mStream{nullptr};
285 AVCodecCtxPtr mCodecCtx;
287 std::mutex mQueueMtx;
288 std::condition_variable mQueueCond;
290 nanoseconds mClock{0};
291 nanoseconds mFrameTimer{0};
292 nanoseconds mFrameLastPts{0};
293 nanoseconds mFrameLastDelay{0};
294 nanoseconds mCurrentPts{0};
295 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
296 microseconds mCurrentPtsTime{0};
298 /* Decompressed video frame, and swscale context for conversion */
299 AVFramePtr mDecodedFrame;
300 SwsContextPtr mSwscaleCtx;
302 struct Picture {
303 SDL_Texture *mImage{nullptr};
304 int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
305 std::atomic<bool> mUpdated{false};
306 nanoseconds mPts{0};
308 ~Picture()
310 if(mImage)
311 SDL_DestroyTexture(mImage);
312 mImage = nullptr;
315 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
316 size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
317 std::mutex mPictQMutex;
318 std::condition_variable mPictQCond;
319 bool mFirstUpdate{true};
320 std::atomic<bool> mEOS{false};
321 std::atomic<bool> mFinalUpdate{false};
323 VideoState(MovieState &movie) : mMovie(movie) { }
325 nanoseconds getClock();
326 bool isBufferFilled();
328 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
329 void schedRefresh(milliseconds delay);
330 void display(SDL_Window *screen, SDL_Renderer *renderer);
331 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
332 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
333 int queuePicture(nanoseconds pts);
334 int handler();
337 struct MovieState {
338 AVIOContextPtr mIOContext;
339 AVFormatCtxPtr mFormatCtx;
341 SyncMaster mAVSyncType{SyncMaster::Default};
343 microseconds mClockBase{0};
344 std::atomic<bool> mPlaying{false};
346 std::mutex mSendMtx;
347 std::condition_variable mSendCond;
348 /* NOTE: false/clear = need data, true/set = no data needed */
349 std::atomic_flag mSendDataGood;
351 std::atomic<bool> mQuit{false};
353 AudioState mAudio;
354 VideoState mVideo;
356 std::thread mParseThread;
357 std::thread mAudioThread;
358 std::thread mVideoThread;
360 std::string mFilename;
362 MovieState(std::string fname)
363 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
365 ~MovieState()
367 mQuit = true;
368 if(mParseThread.joinable())
369 mParseThread.join();
372 static int decode_interrupt_cb(void *ctx);
373 bool prepare();
374 void setTitle(SDL_Window *window);
376 nanoseconds getClock();
378 nanoseconds getMasterClock();
380 nanoseconds getDuration();
382 int streamComponentOpen(int stream_index);
383 int parse_handler();
387 nanoseconds AudioState::getClockNoLock()
389 // The audio clock is the timestamp of the sample currently being heard.
390 if(alcGetInteger64vSOFT)
392 // If device start time = min, we aren't playing yet.
393 if(mDeviceStartTime == nanoseconds::min())
394 return nanoseconds::zero();
396 // Get the current device clock time and latency.
397 auto device = alcGetContextsDevice(alcGetCurrentContext());
398 ALCint64SOFT devtimes[2] = {0,0};
399 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
400 auto latency = nanoseconds(devtimes[1]);
401 auto device_time = nanoseconds(devtimes[0]);
403 // The clock is simply the current device time relative to the recorded
404 // start time. We can also subtract the latency to get more a accurate
405 // position of where the audio device actually is in the output stream.
406 return device_time - mDeviceStartTime - latency;
409 /* The source-based clock is based on 4 components:
410 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
411 * 2 - The length of the source's buffer queue
412 * (AudioBufferTime*AL_BUFFERS_QUEUED)
413 * 3 - The offset OpenAL is currently at in the source (the first value
414 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
415 * 4 - The latency between OpenAL and the DAC (the second value from
416 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
418 * Subtracting the length of the source queue from the next sample's
419 * timestamp gives the timestamp of the sample at the start of the source
420 * queue. Adding the source offset to that results in the timestamp for the
421 * sample at OpenAL's current position, and subtracting the source latency
422 * from that gives the timestamp of the sample currently at the DAC.
424 nanoseconds pts = mCurrentPts;
425 if(mSource)
427 ALint64SOFT offset[2];
428 ALint queued;
429 ALint status;
431 /* NOTE: The source state must be checked last, in case an underrun
432 * occurs and the source stops between retrieving the offset+latency
433 * and getting the state. */
434 if(alGetSourcei64vSOFT)
435 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
436 else
438 ALint ioffset;
439 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
440 offset[0] = (ALint64SOFT)ioffset << 32;
441 offset[1] = 0;
443 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
444 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
446 /* If the source is AL_STOPPED, then there was an underrun and all
447 * buffers are processed, so ignore the source queue. The audio thread
448 * will put the source into an AL_INITIAL state and clear the queue
449 * when it starts recovery. */
450 if(status != AL_STOPPED)
452 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
454 pts -= AudioBufferTime*queued;
455 pts += std::chrono::duration_cast<nanoseconds>(
456 fixed32(offset[0] / mCodecCtx->sample_rate)
459 /* Don't offset by the latency if the source isn't playing. */
460 if(status == AL_PLAYING)
461 pts -= nanoseconds(offset[1]);
464 return std::max(pts, nanoseconds::zero());
467 bool AudioState::isBufferFilled()
469 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
470 * does the source gen. So when we're able to grab the lock and the source
471 * is valid, the queue must be full.
473 std::lock_guard<std::mutex> lock(mSrcMutex);
474 return mSource != 0;
477 void AudioState::startPlayback()
479 alSourcePlay(mSource);
480 if(alcGetInteger64vSOFT)
482 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
484 // Subtract the total buffer queue time from the current pts to get the
485 // pts of the start of the queue.
486 nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
487 int64_t srctimes[2]={0,0};
488 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
489 auto device_time = nanoseconds(srctimes[1]);
490 auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
491 mCodecCtx->sample_rate;
493 // The mixer may have ticked and incremented the device time and sample
494 // offset, so subtract the source offset from the device time to get
495 // the device time the source started at. Also subtract startpts to get
496 // the device time the stream would have started at to reach where it
497 // is now.
498 mDeviceStartTime = device_time - src_offset - startpts;
502 int AudioState::getSync()
504 if(mMovie.mAVSyncType == SyncMaster::Audio)
505 return 0;
507 auto ref_clock = mMovie.getMasterClock();
508 auto diff = ref_clock - getClockNoLock();
510 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
512 /* Difference is TOO big; reset accumulated average */
513 mClockDiffAvg = seconds_d64::zero();
514 return 0;
517 /* Accumulate the diffs */
518 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
519 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
520 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
521 return 0;
523 /* Constrain the per-update difference to avoid exceedingly large skips */
524 diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
525 AudioSampleCorrectionMax);
526 return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
529 int AudioState::decodeFrame()
531 while(!mMovie.mQuit.load(std::memory_order_relaxed))
533 std::unique_lock<std::mutex> lock(mQueueMtx);
534 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
535 if(ret == AVERROR(EAGAIN))
537 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
538 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
539 mMovie.mSendCond.notify_one();
540 do {
541 mQueueCond.wait(lock);
542 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
543 } while(ret == AVERROR(EAGAIN));
545 lock.unlock();
546 if(ret == AVERROR_EOF) break;
547 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
548 mMovie.mSendCond.notify_one();
549 if(ret < 0)
551 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
552 return 0;
555 if(mDecodedFrame->nb_samples <= 0)
557 av_frame_unref(mDecodedFrame.get());
558 continue;
561 /* If provided, update w/ pts */
562 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
563 mCurrentPts = std::chrono::duration_cast<nanoseconds>(
564 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
567 if(mDecodedFrame->nb_samples > mSamplesMax)
569 av_freep(&mSamples);
570 av_samples_alloc(
571 &mSamples, nullptr, mCodecCtx->channels,
572 mDecodedFrame->nb_samples, mDstSampleFmt, 0
574 mSamplesMax = mDecodedFrame->nb_samples;
576 /* Return the amount of sample frames converted */
577 int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
578 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
581 av_frame_unref(mDecodedFrame.get());
582 return data_size;
585 return 0;
588 /* Duplicates the sample at in to out, count times. The frame size is a
589 * multiple of the template type size.
591 template<typename T>
592 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
594 const T *sample = reinterpret_cast<const T*>(in);
595 T *dst = reinterpret_cast<T*>(out);
596 if(frame_size == sizeof(T))
597 std::fill_n(dst, count, *sample);
598 else
600 /* NOTE: frame_size is a multiple of sizeof(T). */
601 int type_mult = frame_size / sizeof(T);
602 int i = 0;
603 std::generate_n(dst, count*type_mult,
604 [sample,type_mult,&i]() -> T
606 T ret = sample[i];
607 i = (i+1)%type_mult;
608 return ret;
615 bool AudioState::readAudio(uint8_t *samples, int length)
617 int sample_skip = getSync();
618 int audio_size = 0;
620 /* Read the next chunk of data, refill the buffer, and queue it
621 * on the source */
622 length /= mFrameSize;
623 while(audio_size < length)
625 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
627 int frame_len = decodeFrame();
628 if(frame_len <= 0) break;
630 mSamplesLen = frame_len;
631 mSamplesPos = std::min(mSamplesLen, sample_skip);
632 sample_skip -= mSamplesPos;
634 // Adjust the device start time and current pts by the amount we're
635 // skipping/duplicating, so that the clock remains correct for the
636 // current stream position.
637 auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
638 mDeviceStartTime -= skip;
639 mCurrentPts += skip;
640 continue;
643 int rem = length - audio_size;
644 if(mSamplesPos >= 0)
646 int len = mSamplesLen - mSamplesPos;
647 if(rem > len) rem = len;
648 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
650 else
652 rem = std::min(rem, -mSamplesPos);
654 /* Add samples by copying the first sample */
655 if((mFrameSize&7) == 0)
656 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
657 else if((mFrameSize&3) == 0)
658 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
659 else if((mFrameSize&1) == 0)
660 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
661 else
662 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
665 mSamplesPos += rem;
666 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
667 samples += rem*mFrameSize;
668 audio_size += rem;
670 if(audio_size <= 0)
671 return false;
673 if(audio_size < length)
675 int rem = length - audio_size;
676 std::fill_n(samples, rem*mFrameSize,
677 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
678 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
679 audio_size += rem;
681 return true;
685 void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
686 ALsizei length, const ALchar *message,
687 void *userParam)
689 AudioState *self = reinterpret_cast<AudioState*>(userParam);
691 if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
693 /* Temporarily lock the source mutex to ensure it's not between
694 * checking the processed count and going to sleep.
696 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
697 self->mSrcCond.notify_one();
698 return;
701 std::cout<< "---- AL Event on AudioState "<<self<<" ----\nEvent: ";
702 switch(eventType)
704 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
705 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
706 case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
707 case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
708 case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
709 case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
710 default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
711 std::dec<<std::setw(0)<<std::setfill(' '); break;
713 std::cout<< "\n"
714 "Object ID: "<<object<<'\n'<<
715 "Parameter: "<<param<<'\n'<<
716 "Message: "<<std::string(message, length)<<"\n----"<<
717 std::endl;
719 if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
721 { std::lock_guard<std::mutex> lock(self->mSrcMutex);
722 self->mConnected.clear(std::memory_order_release);
724 std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
725 self->mSrcCond.notify_one();
729 int AudioState::handler()
731 const std::array<ALenum,6> types{{
732 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
733 AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
734 AL_EVENT_TYPE_DISCONNECTED_SOFT
736 std::unique_lock<std::mutex> lock(mSrcMutex);
737 milliseconds sleep_time = AudioBufferTime / 3;
738 ALenum fmt;
740 if(alEventControlSOFT)
742 alEventControlSOFT(types.size(), types.data(), AL_TRUE);
743 alEventCallbackSOFT(EventCallback, this);
744 sleep_time = AudioBufferTotalTime;
747 /* Find a suitable format for OpenAL. */
748 mDstChanLayout = 0;
749 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
751 mDstSampleFmt = AV_SAMPLE_FMT_U8;
752 mFrameSize = 1;
753 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
754 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
755 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
757 mDstChanLayout = mCodecCtx->channel_layout;
758 mFrameSize *= 8;
759 mFormat = fmt;
761 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
762 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
763 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
764 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
766 mDstChanLayout = mCodecCtx->channel_layout;
767 mFrameSize *= 6;
768 mFormat = fmt;
770 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
772 mDstChanLayout = mCodecCtx->channel_layout;
773 mFrameSize *= 1;
774 mFormat = AL_FORMAT_MONO8;
776 if(!mDstChanLayout)
778 mDstChanLayout = AV_CH_LAYOUT_STEREO;
779 mFrameSize *= 2;
780 mFormat = AL_FORMAT_STEREO8;
783 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
784 alIsExtensionPresent("AL_EXT_FLOAT32"))
786 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
787 mFrameSize = 4;
788 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
789 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
790 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
792 mDstChanLayout = mCodecCtx->channel_layout;
793 mFrameSize *= 8;
794 mFormat = fmt;
796 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
797 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
798 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
799 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
801 mDstChanLayout = mCodecCtx->channel_layout;
802 mFrameSize *= 6;
803 mFormat = fmt;
805 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
807 mDstChanLayout = mCodecCtx->channel_layout;
808 mFrameSize *= 1;
809 mFormat = AL_FORMAT_MONO_FLOAT32;
811 if(!mDstChanLayout)
813 mDstChanLayout = AV_CH_LAYOUT_STEREO;
814 mFrameSize *= 2;
815 mFormat = AL_FORMAT_STEREO_FLOAT32;
818 if(!mDstChanLayout)
820 mDstSampleFmt = AV_SAMPLE_FMT_S16;
821 mFrameSize = 2;
822 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
823 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
824 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
826 mDstChanLayout = mCodecCtx->channel_layout;
827 mFrameSize *= 8;
828 mFormat = fmt;
830 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
831 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
832 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
833 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
835 mDstChanLayout = mCodecCtx->channel_layout;
836 mFrameSize *= 6;
837 mFormat = fmt;
839 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
841 mDstChanLayout = mCodecCtx->channel_layout;
842 mFrameSize *= 1;
843 mFormat = AL_FORMAT_MONO16;
845 if(!mDstChanLayout)
847 mDstChanLayout = AV_CH_LAYOUT_STEREO;
848 mFrameSize *= 2;
849 mFormat = AL_FORMAT_STEREO16;
852 void *samples = nullptr;
853 ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
854 mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
856 mSamples = NULL;
857 mSamplesMax = 0;
858 mSamplesPos = 0;
859 mSamplesLen = 0;
861 mDecodedFrame.reset(av_frame_alloc());
862 if(!mDecodedFrame)
864 std::cerr<< "Failed to allocate audio frame" <<std::endl;
865 goto finish;
868 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
869 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
870 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
871 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
872 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
873 0, nullptr
875 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
877 std::cerr<< "Failed to initialize audio converter" <<std::endl;
878 goto finish;
881 mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
882 alGenBuffers(mBuffers.size(), mBuffers.data());
883 alGenSources(1, &mSource);
885 if(EnableDirectOut)
886 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
888 if(alGetError() != AL_NO_ERROR)
889 goto finish;
891 if(!alBufferStorageSOFT)
892 samples = av_malloc(buffer_len);
893 else
895 for(ALuint bufid : mBuffers)
896 alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
897 AL_MAP_WRITE_BIT_SOFT);
898 if(alGetError() != AL_NO_ERROR)
900 fprintf(stderr, "Failed to use mapped buffers\n");
901 samples = av_malloc(buffer_len);
905 while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
906 mConnected.test_and_set(std::memory_order_relaxed))
908 /* First remove any processed buffers. */
909 ALint processed;
910 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
911 while(processed > 0)
913 std::array<ALuint,4> bids;
914 alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
915 bids.data());
916 processed -= std::min<ALsizei>(bids.size(), processed);
919 /* Refill the buffer queue. */
920 ALint queued;
921 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
922 while((ALuint)queued < mBuffers.size())
924 ALuint bufid = mBuffers[mBufferIdx];
926 uint8_t *ptr = reinterpret_cast<uint8_t*>(
927 samples ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
929 if(!ptr) break;
931 /* Read the next chunk of data, filling the buffer, and queue it on
932 * the source */
933 bool got_audio = readAudio(ptr, buffer_len);
934 if(!samples) alUnmapBufferSOFT(bufid);
935 if(!got_audio) break;
937 if(samples)
938 alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
940 alSourceQueueBuffers(mSource, 1, &bufid);
941 mBufferIdx = (mBufferIdx+1) % mBuffers.size();
942 ++queued;
944 if(queued == 0)
945 break;
947 /* Check that the source is playing. */
948 ALint state;
949 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
950 if(state == AL_STOPPED)
952 /* AL_STOPPED means there was an underrun. Clear the buffer queue
953 * since this likely means we're late, and rewind the source to get
954 * it back into an AL_INITIAL state.
956 alSourceRewind(mSource);
957 alSourcei(mSource, AL_BUFFER, 0);
958 continue;
961 /* (re)start the source if needed, and wait for a buffer to finish */
962 if(state != AL_PLAYING && state != AL_PAUSED &&
963 mMovie.mPlaying.load(std::memory_order_relaxed))
964 startPlayback();
966 mSrcCond.wait_for(lock, sleep_time);
969 alSourceRewind(mSource);
970 alSourcei(mSource, AL_BUFFER, 0);
972 finish:
973 av_freep(&samples);
975 if(alEventControlSOFT)
977 alEventControlSOFT(types.size(), types.data(), AL_FALSE);
978 alEventCallbackSOFT(nullptr, nullptr);
981 return 0;
985 nanoseconds VideoState::getClock()
987 /* NOTE: This returns incorrect times while not playing. */
988 auto delta = get_avtime() - mCurrentPtsTime;
989 return mCurrentPts + delta;
992 bool VideoState::isBufferFilled()
994 std::unique_lock<std::mutex> lock(mPictQMutex);
995 return mPictQSize >= mPictQ.size();
998 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
1000 SDL_Event evt{};
1001 evt.user.type = FF_REFRESH_EVENT;
1002 evt.user.data1 = opaque;
1003 SDL_PushEvent(&evt);
1004 return 0; /* 0 means stop timer */
1007 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
1008 void VideoState::schedRefresh(milliseconds delay)
1010 SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
1013 /* Called by VideoState::refreshTimer to display the next video frame. */
1014 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
1016 Picture *vp = &mPictQ[mPictQRead];
1018 if(!vp->mImage)
1019 return;
1021 float aspect_ratio;
1022 int win_w, win_h;
1023 int w, h, x, y;
1025 if(mCodecCtx->sample_aspect_ratio.num == 0)
1026 aspect_ratio = 0.0f;
1027 else
1029 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
1030 mCodecCtx->height;
1032 if(aspect_ratio <= 0.0f)
1033 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
1035 SDL_GetWindowSize(screen, &win_w, &win_h);
1036 h = win_h;
1037 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
1038 if(w > win_w)
1040 w = win_w;
1041 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
1043 x = (win_w - w) / 2;
1044 y = (win_h - h) / 2;
1046 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
1047 SDL_Rect dst_rect{ x, y, w, h };
1048 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
1049 SDL_RenderPresent(renderer);
1052 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
1053 * was created. It handles the display of the next decoded video frame (if not
1054 * falling behind), and sets up the timer for the following video frame.
1056 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
1058 if(!mStream)
1060 if(mEOS)
1062 mFinalUpdate = true;
1063 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1064 mPictQCond.notify_all();
1065 return;
1067 schedRefresh(milliseconds(100));
1068 return;
1070 if(!mMovie.mPlaying.load(std::memory_order_relaxed))
1072 schedRefresh(milliseconds(1));
1073 return;
1076 std::unique_lock<std::mutex> lock(mPictQMutex);
1077 retry:
1078 if(mPictQSize == 0)
1080 if(mEOS)
1081 mFinalUpdate = true;
1082 else
1083 schedRefresh(milliseconds(1));
1084 lock.unlock();
1085 mPictQCond.notify_all();
1086 return;
1089 Picture *vp = &mPictQ[mPictQRead];
1090 mCurrentPts = vp->mPts;
1091 mCurrentPtsTime = get_avtime();
1093 /* Get delay using the frame pts and the pts from last frame. */
1094 auto delay = vp->mPts - mFrameLastPts;
1095 if(delay <= seconds::zero() || delay >= seconds(1))
1097 /* If incorrect delay, use previous one. */
1098 delay = mFrameLastDelay;
1100 /* Save for next frame. */
1101 mFrameLastDelay = delay;
1102 mFrameLastPts = vp->mPts;
1104 /* Update delay to sync to clock if not master source. */
1105 if(mMovie.mAVSyncType != SyncMaster::Video)
1107 auto ref_clock = mMovie.getMasterClock();
1108 auto diff = vp->mPts - ref_clock;
1110 /* Skip or repeat the frame. Take delay into account. */
1111 auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
1112 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
1114 if(diff <= -sync_threshold)
1115 delay = nanoseconds::zero();
1116 else if(diff >= sync_threshold)
1117 delay *= 2;
1121 mFrameTimer += delay;
1122 /* Compute the REAL delay. */
1123 auto actual_delay = mFrameTimer - get_avtime();
1124 if(!(actual_delay >= VideoSyncThreshold))
1126 /* We don't have time to handle this picture, just skip to the next one. */
1127 mPictQRead = (mPictQRead+1)%mPictQ.size();
1128 mPictQSize--;
1129 goto retry;
1131 schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
1133 /* Show the picture! */
1134 display(screen, renderer);
1136 /* Update queue for next picture. */
1137 mPictQRead = (mPictQRead+1)%mPictQ.size();
1138 mPictQSize--;
1139 lock.unlock();
1140 mPictQCond.notify_all();
1143 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1144 * main thread where the renderer was created.
1146 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
1148 Picture *vp = &mPictQ[mPictQWrite];
1149 bool fmt_updated = false;
1151 /* allocate or resize the buffer! */
1152 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
1154 fmt_updated = true;
1155 if(vp->mImage)
1156 SDL_DestroyTexture(vp->mImage);
1157 vp->mImage = SDL_CreateTexture(
1158 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1159 mCodecCtx->coded_width, mCodecCtx->coded_height
1161 if(!vp->mImage)
1162 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1163 vp->mWidth = mCodecCtx->width;
1164 vp->mHeight = mCodecCtx->height;
1166 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
1168 /* For the first update, set the window size to the video size. */
1169 mFirstUpdate = false;
1171 int w = vp->mWidth;
1172 int h = vp->mHeight;
1173 if(mCodecCtx->sample_aspect_ratio.den != 0)
1175 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1176 if(aspect_ratio >= 1.0)
1177 w = (int)(w*aspect_ratio + 0.5);
1178 else if(aspect_ratio > 0.0)
1179 h = (int)(h/aspect_ratio + 0.5);
1181 SDL_SetWindowSize(screen, w, h);
1185 if(vp->mImage)
1187 AVFrame *frame = mDecodedFrame.get();
1188 void *pixels = nullptr;
1189 int pitch = 0;
1191 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1192 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1193 frame->data[0], frame->linesize[0],
1194 frame->data[1], frame->linesize[1],
1195 frame->data[2], frame->linesize[2]
1197 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1198 std::cerr<< "Failed to lock texture" <<std::endl;
1199 else
1201 // Convert the image into YUV format that SDL uses
1202 int coded_w = mCodecCtx->coded_width;
1203 int coded_h = mCodecCtx->coded_height;
1204 int w = mCodecCtx->width;
1205 int h = mCodecCtx->height;
1206 if(!mSwscaleCtx || fmt_updated)
1208 mSwscaleCtx.reset(sws_getContext(
1209 w, h, mCodecCtx->pix_fmt,
1210 w, h, AV_PIX_FMT_YUV420P, 0,
1211 nullptr, nullptr, nullptr
1215 /* point pict at the queue */
1216 uint8_t *pict_data[3];
1217 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1218 pict_data[1] = pict_data[0] + coded_w*coded_h;
1219 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1221 int pict_linesize[3];
1222 pict_linesize[0] = pitch;
1223 pict_linesize[1] = pitch / 2;
1224 pict_linesize[2] = pitch / 2;
1226 sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
1227 frame->linesize, 0, h, pict_data, pict_linesize);
1228 SDL_UnlockTexture(vp->mImage);
1232 vp->mUpdated.store(true, std::memory_order_release);
1233 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1234 mPictQCond.notify_one();
1237 int VideoState::queuePicture(nanoseconds pts)
1239 /* Wait until we have space for a new pic */
1240 std::unique_lock<std::mutex> lock(mPictQMutex);
1241 while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
1242 mPictQCond.wait(lock);
1243 lock.unlock();
1245 if(mMovie.mQuit.load(std::memory_order_relaxed))
1246 return -1;
1248 Picture *vp = &mPictQ[mPictQWrite];
1250 /* We have to create/update the picture in the main thread */
1251 vp->mUpdated.store(false, std::memory_order_relaxed);
1252 SDL_Event evt{};
1253 evt.user.type = FF_UPDATE_EVENT;
1254 evt.user.data1 = this;
1255 SDL_PushEvent(&evt);
1257 /* Wait until the picture is updated. */
1258 lock.lock();
1259 while(!vp->mUpdated.load(std::memory_order_relaxed))
1261 if(mMovie.mQuit.load(std::memory_order_relaxed))
1262 return -1;
1263 mPictQCond.wait(lock);
1265 if(mMovie.mQuit.load(std::memory_order_relaxed))
1266 return -1;
1267 vp->mPts = pts;
1269 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1270 mPictQSize++;
1271 lock.unlock();
1273 return 0;
1276 int VideoState::handler()
1278 mDecodedFrame.reset(av_frame_alloc());
1279 while(!mMovie.mQuit.load(std::memory_order_relaxed))
1281 std::unique_lock<std::mutex> lock(mQueueMtx);
1282 /* Decode video frame */
1283 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1284 if(ret == AVERROR(EAGAIN))
1286 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1287 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
1288 mMovie.mSendCond.notify_one();
1289 do {
1290 mQueueCond.wait(lock);
1291 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1292 } while(ret == AVERROR(EAGAIN));
1294 lock.unlock();
1295 if(ret == AVERROR_EOF) break;
1296 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1297 mMovie.mSendCond.notify_one();
1298 if(ret < 0)
1300 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1301 continue;
1304 /* Get the PTS for this frame. */
1305 nanoseconds pts;
1306 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
1307 mClock = std::chrono::duration_cast<nanoseconds>(
1308 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
1310 pts = mClock;
1312 /* Update the video clock to the next expected PTS. */
1313 auto frame_delay = av_q2d(mCodecCtx->time_base);
1314 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1315 mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
1317 if(queuePicture(pts) < 0)
1318 break;
1319 av_frame_unref(mDecodedFrame.get());
1321 mEOS = true;
1323 std::unique_lock<std::mutex> lock(mPictQMutex);
1324 if(mMovie.mQuit.load(std::memory_order_relaxed))
1326 mPictQRead = 0;
1327 mPictQWrite = 0;
1328 mPictQSize = 0;
1330 while(!mFinalUpdate)
1331 mPictQCond.wait(lock);
1333 return 0;
1337 int MovieState::decode_interrupt_cb(void *ctx)
1339 return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1342 bool MovieState::prepare()
1344 AVIOContext *avioctx = nullptr;
1345 AVIOInterruptCB intcb = { decode_interrupt_cb, this };
1346 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1348 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1349 return false;
1351 mIOContext.reset(avioctx);
1353 /* Open movie file. If avformat_open_input fails it will automatically free
1354 * this context, so don't set it onto a smart pointer yet.
1356 AVFormatContext *fmtctx = avformat_alloc_context();
1357 fmtctx->pb = mIOContext.get();
1358 fmtctx->interrupt_callback = intcb;
1359 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1361 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1362 return false;
1364 mFormatCtx.reset(fmtctx);
1366 /* Retrieve stream information */
1367 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1369 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1370 return false;
1373 mVideo.schedRefresh(milliseconds(40));
1375 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1376 return true;
1379 void MovieState::setTitle(SDL_Window *window)
1381 auto pos1 = mFilename.rfind('/');
1382 auto pos2 = mFilename.rfind('\\');
1383 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1384 (pos2 == std::string::npos) ? pos1 :
1385 std::max(pos1, pos2)) + 1;
1386 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1389 nanoseconds MovieState::getClock()
1391 if(!mPlaying.load(std::memory_order_relaxed))
1392 return nanoseconds::zero();
1393 return get_avtime() - mClockBase;
1396 nanoseconds MovieState::getMasterClock()
1398 if(mAVSyncType == SyncMaster::Video)
1399 return mVideo.getClock();
1400 if(mAVSyncType == SyncMaster::Audio)
1401 return mAudio.getClock();
1402 return getClock();
1405 nanoseconds MovieState::getDuration()
1406 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1408 int MovieState::streamComponentOpen(int stream_index)
1410 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1411 return -1;
1413 /* Get a pointer to the codec context for the stream, and open the
1414 * associated codec.
1416 AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
1417 if(!avctx) return -1;
1419 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1420 return -1;
1422 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1423 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1425 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1426 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1427 return -1;
1430 /* Initialize and start the media type handler */
1431 switch(avctx->codec_type)
1433 case AVMEDIA_TYPE_AUDIO:
1434 mAudio.mStream = mFormatCtx->streams[stream_index];
1435 mAudio.mCodecCtx = std::move(avctx);
1437 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1438 break;
1440 case AVMEDIA_TYPE_VIDEO:
1441 mVideo.mStream = mFormatCtx->streams[stream_index];
1442 mVideo.mCodecCtx = std::move(avctx);
1444 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1445 break;
1447 default:
1448 return -1;
1451 return stream_index;
1454 int MovieState::parse_handler()
1456 int video_index = -1;
1457 int audio_index = -1;
1459 /* Dump information about file onto standard error */
1460 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1462 /* Find the first video and audio streams */
1463 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1465 auto codecpar = mFormatCtx->streams[i]->codecpar;
1466 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1467 video_index = streamComponentOpen(i);
1468 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1469 audio_index = streamComponentOpen(i);
1472 if(video_index < 0 && audio_index < 0)
1474 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1475 mQuit = true;
1478 PacketQueue audio_queue, video_queue;
1479 bool input_finished = false;
1481 /* Main packet reading/dispatching loop */
1482 while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
1484 AVPacket packet;
1485 if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1486 input_finished = true;
1487 else
1489 /* Copy the packet into the queue it's meant for. */
1490 if(packet.stream_index == video_index)
1491 video_queue.put(&packet);
1492 else if(packet.stream_index == audio_index)
1493 audio_queue.put(&packet);
1494 av_packet_unref(&packet);
1497 do {
1498 /* Send whatever queued packets we have. */
1499 if(!audio_queue.empty())
1501 std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
1502 int ret;
1503 do {
1504 ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
1505 if(ret != AVERROR(EAGAIN)) audio_queue.pop();
1506 } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
1507 lock.unlock();
1508 mAudio.mQueueCond.notify_one();
1510 if(!video_queue.empty())
1512 std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
1513 int ret;
1514 do {
1515 ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
1516 if(ret != AVERROR(EAGAIN)) video_queue.pop();
1517 } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
1518 lock.unlock();
1519 mVideo.mQueueCond.notify_one();
1521 /* If the queues are completely empty, or it's not full and there's
1522 * more input to read, go get more.
1524 size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
1525 if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
1526 break;
1528 if(!mPlaying.load(std::memory_order_relaxed))
1530 if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
1531 (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
1533 /* Set the base time 50ms ahead of the current av time. */
1534 mClockBase = get_avtime() + milliseconds(50);
1535 mVideo.mCurrentPtsTime = mClockBase;
1536 mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
1537 mAudio.startPlayback();
1538 mPlaying.store(std::memory_order_release);
1541 /* Nothing to send or get for now, wait a bit and try again. */
1542 { std::unique_lock<std::mutex> lock(mSendMtx);
1543 if(mSendDataGood.test_and_set(std::memory_order_relaxed))
1544 mSendCond.wait_for(lock, milliseconds(10));
1546 } while(!mQuit.load(std::memory_order_relaxed));
1548 /* Pass a null packet to finish the send buffers (the receive functions
1549 * will get AVERROR_EOF when emptied).
1551 if(mVideo.mCodecCtx)
1553 { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
1554 avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
1556 mVideo.mQueueCond.notify_one();
1558 if(mAudio.mCodecCtx)
1560 { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
1561 avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
1563 mAudio.mQueueCond.notify_one();
1565 video_queue.clear();
1566 audio_queue.clear();
1568 /* all done - wait for it */
1569 if(mVideoThread.joinable())
1570 mVideoThread.join();
1571 if(mAudioThread.joinable())
1572 mAudioThread.join();
1574 mVideo.mEOS = true;
1575 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1576 while(!mVideo.mFinalUpdate)
1577 mVideo.mPictQCond.wait(lock);
1578 lock.unlock();
1580 SDL_Event evt{};
1581 evt.user.type = FF_MOVIE_DONE_EVENT;
1582 SDL_PushEvent(&evt);
1584 return 0;
1588 // Helper class+method to print the time with human-readable formatting.
1589 struct PrettyTime {
1590 seconds mTime;
1592 inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1594 using hours = std::chrono::hours;
1595 using minutes = std::chrono::minutes;
1596 using std::chrono::duration_cast;
1598 seconds t = rhs.mTime;
1599 if(t.count() < 0)
1601 os << '-';
1602 t *= -1;
1605 // Only handle up to hour formatting
1606 if(t >= hours(1))
1607 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1608 << (duration_cast<minutes>(t).count() % 60) << 'm';
1609 else
1610 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1611 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1612 << std::setfill(' ');
1613 return os;
1616 } // namespace
1619 int main(int argc, char *argv[])
1621 std::unique_ptr<MovieState> movState;
1623 if(argc < 2)
1625 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1626 return 1;
1628 /* Register all formats and codecs */
1629 av_register_all();
1630 /* Initialize networking protocols */
1631 avformat_network_init();
1633 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1635 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1636 return 1;
1639 /* Make a window to put our video */
1640 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1641 if(!screen)
1643 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1644 return 1;
1646 /* Make a renderer to handle the texture image surface and rendering. */
1647 Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
1648 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
1649 if(renderer)
1651 SDL_RendererInfo rinf{};
1652 bool ok = false;
1654 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1655 * software renderer. */
1656 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1658 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1659 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1661 if(!ok)
1663 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1664 SDL_DestroyRenderer(renderer);
1665 renderer = nullptr;
1668 if(!renderer)
1670 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1671 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1673 if(!renderer)
1675 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1676 return 1;
1678 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1679 SDL_RenderFillRect(renderer, nullptr);
1680 SDL_RenderPresent(renderer);
1682 /* Open an audio device */
1683 int fileidx = 1;
1684 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1686 ALCdevice *dev = NULL;
1687 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1689 fileidx = 3;
1690 dev = alcOpenDevice(argv[2]);
1691 if(dev) return dev;
1692 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1694 return alcOpenDevice(nullptr);
1695 }();
1696 ALCcontext *context = alcCreateContext(device, nullptr);
1697 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1699 std::cerr<< "Failed to set up audio device" <<std::endl;
1700 if(context)
1701 alcDestroyContext(context);
1702 return 1;
1705 const ALCchar *name = nullptr;
1706 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1707 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1708 if(!name || alcGetError(device) != AL_NO_ERROR)
1709 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1710 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1712 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1714 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1715 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1716 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1720 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1722 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1723 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1724 alGetProcAddress("alGetSourcei64vSOFT")
1727 if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
1729 std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
1730 alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
1731 alGetProcAddress("alBufferStorageSOFT"));
1732 alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
1733 alGetProcAddress("alMapBufferSOFT"));
1734 alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
1735 alGetProcAddress("alUnmapBufferSOFT"));
1737 if(alIsExtensionPresent("AL_SOFTX_events"))
1739 std::cout<< "Found AL_SOFT_events" <<std::endl;
1740 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1741 alGetProcAddress("alEventControlSOFT"));
1742 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
1743 alGetProcAddress("alEventCallbackSOFT"));
1746 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1748 ++fileidx;
1749 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1750 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1751 else
1753 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1754 EnableDirectOut = true;
1758 while(fileidx < argc && !movState)
1760 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1761 if(!movState->prepare()) movState = nullptr;
1763 if(!movState)
1765 std::cerr<< "Could not start a video" <<std::endl;
1766 return 1;
1768 movState->setTitle(screen);
1770 /* Default to going to the next movie at the end of one. */
1771 enum class EomAction {
1772 Next, Quit
1773 } eom_action = EomAction::Next;
1774 seconds last_time(-1);
1775 SDL_Event event;
1776 while(1)
1778 int have_evt = SDL_WaitEventTimeout(&event, 10);
1780 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
1781 if(cur_time != last_time)
1783 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
1784 std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
1785 last_time = cur_time;
1787 if(!have_evt) continue;
1789 switch(event.type)
1791 case SDL_KEYDOWN:
1792 switch(event.key.keysym.sym)
1794 case SDLK_ESCAPE:
1795 movState->mQuit = true;
1796 eom_action = EomAction::Quit;
1797 break;
1799 case SDLK_n:
1800 movState->mQuit = true;
1801 eom_action = EomAction::Next;
1802 break;
1804 default:
1805 break;
1807 break;
1809 case SDL_WINDOWEVENT:
1810 switch(event.window.event)
1812 case SDL_WINDOWEVENT_RESIZED:
1813 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1814 SDL_RenderFillRect(renderer, nullptr);
1815 break;
1817 default:
1818 break;
1820 break;
1822 case SDL_QUIT:
1823 movState->mQuit = true;
1824 eom_action = EomAction::Quit;
1825 break;
1827 case FF_UPDATE_EVENT:
1828 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1829 screen, renderer
1831 break;
1833 case FF_REFRESH_EVENT:
1834 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1835 screen, renderer
1837 break;
1839 case FF_MOVIE_DONE_EVENT:
1840 std::cout<<'\n';
1841 last_time = seconds(-1);
1842 if(eom_action != EomAction::Quit)
1844 movState = nullptr;
1845 while(fileidx < argc && !movState)
1847 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1848 if(!movState->prepare()) movState = nullptr;
1850 if(movState)
1852 movState->setTitle(screen);
1853 break;
1857 /* Nothing more to play. Shut everything down and quit. */
1858 movState = nullptr;
1860 alcMakeContextCurrent(nullptr);
1861 alcDestroyContext(context);
1862 alcCloseDevice(device);
1864 SDL_DestroyRenderer(renderer);
1865 renderer = nullptr;
1866 SDL_DestroyWindow(screen);
1867 screen = nullptr;
1869 SDL_Quit();
1870 exit(0);
1872 default:
1873 break;
1877 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1878 return 1;