Construct error messages using parameterized values
[openal-soft.git] / examples / alffplay.cpp
blob6eedcd7691e9a41115d92369a2e5e9e7c39efae1
1 /*
2 * An example showing how to play a stream sync'd to video, using ffmpeg.
4 * Requires C++11.
5 */
7 #include <condition_variable>
8 #include <functional>
9 #include <algorithm>
10 #include <iostream>
11 #include <iomanip>
12 #include <cstring>
13 #include <limits>
14 #include <thread>
15 #include <chrono>
16 #include <atomic>
17 #include <vector>
18 #include <mutex>
19 #include <deque>
20 #include <array>
22 extern "C" {
23 #include "libavcodec/avcodec.h"
24 #include "libavformat/avformat.h"
25 #include "libavformat/avio.h"
26 #include "libavutil/time.h"
27 #include "libavutil/pixfmt.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/channel_layout.h"
30 #include "libswscale/swscale.h"
31 #include "libswresample/swresample.h"
34 #include "SDL.h"
36 #include "AL/alc.h"
37 #include "AL/al.h"
38 #include "AL/alext.h"
40 extern "C" {
41 #ifndef AL_SOFT_map_buffer
42 #define AL_SOFT_map_buffer 1
43 typedef unsigned int ALbitfieldSOFT;
44 #define AL_MAP_READ_BIT_SOFT 0x00000001
45 #define AL_MAP_WRITE_BIT_SOFT 0x00000002
46 #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
47 #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
48 typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
49 typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
50 typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
51 typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
52 #endif
54 #ifndef AL_SOFT_events
55 #define AL_SOFT_events 1
56 #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
57 #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
58 #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
59 #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
60 #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
61 #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
62 #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
63 typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
64 ALsizei length, const ALchar *message,
65 void *userParam);
66 typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
67 typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
68 typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
69 typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
70 #endif
73 namespace {
75 using nanoseconds = std::chrono::nanoseconds;
76 using microseconds = std::chrono::microseconds;
77 using milliseconds = std::chrono::milliseconds;
78 using seconds = std::chrono::seconds;
79 using seconds_d64 = std::chrono::duration<double>;
81 const std::string AppName("alffplay");
83 bool EnableDirectOut = false;
84 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
85 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
87 LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
88 LPALMAPBUFFERSOFT alMapBufferSOFT;
89 LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
91 LPALEVENTCONTROLSOFT alEventControlSOFT;
92 LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
94 const seconds AVNoSyncThreshold(10);
96 const milliseconds VideoSyncThreshold(10);
97 #define VIDEO_PICTURE_QUEUE_SIZE 16
99 const seconds_d64 AudioSyncThreshold(0.03);
100 const milliseconds AudioSampleCorrectionMax(50);
101 /* Averaging filter coefficient for audio sync. */
102 #define AUDIO_DIFF_AVG_NB 20
103 const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
104 /* Per-buffer size, in time */
105 const milliseconds AudioBufferTime(20);
106 /* Buffer total size, in time (should be divisible by the buffer time) */
107 const milliseconds AudioBufferTotalTime(800);
109 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
111 enum {
112 FF_UPDATE_EVENT = SDL_USEREVENT,
113 FF_REFRESH_EVENT,
114 FF_MOVIE_DONE_EVENT
117 enum class SyncMaster {
118 Audio,
119 Video,
120 External,
122 Default = External
126 inline microseconds get_avtime()
127 { return microseconds(av_gettime()); }
129 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
130 struct AVIOContextDeleter {
131 void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
133 using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
135 struct AVFormatCtxDeleter {
136 void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
138 using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
140 struct AVCodecCtxDeleter {
141 void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
143 using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
145 struct AVFrameDeleter {
146 void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
148 using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
150 struct SwrContextDeleter {
151 void operator()(SwrContext *ptr) { swr_free(&ptr); }
153 using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
155 struct SwsContextDeleter {
156 void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
158 using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
161 class PacketQueue {
162 std::deque<AVPacket> mPackets;
163 size_t mTotalSize{0};
165 public:
166 ~PacketQueue() { clear(); }
168 bool empty() const noexcept { return mPackets.empty(); }
169 size_t totalSize() const noexcept { return mTotalSize; }
171 void put(const AVPacket *pkt)
173 mPackets.push_back(AVPacket{});
174 if(av_packet_ref(&mPackets.back(), pkt) != 0)
175 mPackets.pop_back();
176 else
177 mTotalSize += mPackets.back().size;
180 AVPacket *front() noexcept
181 { return &mPackets.front(); }
183 void pop()
185 AVPacket *pkt = &mPackets.front();
186 mTotalSize -= pkt->size;
187 av_packet_unref(pkt);
188 mPackets.pop_front();
191 void clear()
193 for(AVPacket &pkt : mPackets)
194 av_packet_unref(&pkt);
195 mPackets.clear();
196 mTotalSize = 0;
201 struct MovieState;
203 struct AudioState {
204 MovieState &mMovie;
206 AVStream *mStream{nullptr};
207 AVCodecCtxPtr mCodecCtx;
209 std::mutex mQueueMtx;
210 std::condition_variable mQueueCond;
212 /* Used for clock difference average computation */
213 seconds_d64 mClockDiffAvg{0};
215 /* Time of the next sample to be buffered */
216 nanoseconds mCurrentPts{0};
218 /* Device clock time that the stream started at. */
219 nanoseconds mDeviceStartTime{nanoseconds::min()};
221 /* Decompressed sample frame, and swresample context for conversion */
222 AVFramePtr mDecodedFrame;
223 SwrContextPtr mSwresCtx;
225 /* Conversion format, for what gets fed to OpenAL */
226 int mDstChanLayout{0};
227 AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
229 /* Storage of converted samples */
230 uint8_t *mSamples{nullptr};
231 int mSamplesLen{0}; /* In samples */
232 int mSamplesPos{0};
233 int mSamplesMax{0};
235 /* OpenAL format */
236 ALenum mFormat{AL_NONE};
237 ALsizei mFrameSize{0};
239 std::mutex mSrcMutex;
240 ALuint mSource{0};
241 std::vector<ALuint> mBuffers;
242 ALsizei mBufferIdx{0};
244 AudioState(MovieState &movie) : mMovie(movie)
246 ~AudioState()
248 if(mSource)
249 alDeleteSources(1, &mSource);
250 if(!mBuffers.empty())
251 alDeleteBuffers(mBuffers.size(), mBuffers.data());
253 av_freep(&mSamples);
256 static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
257 ALsizei length, const ALchar *message,
258 void *userParam);
260 nanoseconds getClockNoLock();
261 nanoseconds getClock()
263 std::lock_guard<std::mutex> lock(mSrcMutex);
264 return getClockNoLock();
267 bool isBufferFilled();
268 void startPlayback();
270 int getSync();
271 int decodeFrame();
272 bool readAudio(uint8_t *samples, int length);
274 int handler();
277 struct VideoState {
278 MovieState &mMovie;
280 AVStream *mStream{nullptr};
281 AVCodecCtxPtr mCodecCtx;
283 std::mutex mQueueMtx;
284 std::condition_variable mQueueCond;
286 nanoseconds mClock{0};
287 nanoseconds mFrameTimer{0};
288 nanoseconds mFrameLastPts{0};
289 nanoseconds mFrameLastDelay{0};
290 nanoseconds mCurrentPts{0};
291 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
292 microseconds mCurrentPtsTime{0};
294 /* Decompressed video frame, and swscale context for conversion */
295 AVFramePtr mDecodedFrame;
296 SwsContextPtr mSwscaleCtx;
298 struct Picture {
299 SDL_Texture *mImage{nullptr};
300 int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
301 std::atomic<bool> mUpdated{false};
302 nanoseconds mPts{0};
304 ~Picture()
306 if(mImage)
307 SDL_DestroyTexture(mImage);
308 mImage = nullptr;
311 std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
312 size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
313 std::mutex mPictQMutex;
314 std::condition_variable mPictQCond;
315 bool mFirstUpdate{true};
316 std::atomic<bool> mEOS{false};
317 std::atomic<bool> mFinalUpdate{false};
319 VideoState(MovieState &movie) : mMovie(movie) { }
321 nanoseconds getClock();
322 bool isBufferFilled();
324 static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
325 void schedRefresh(milliseconds delay);
326 void display(SDL_Window *screen, SDL_Renderer *renderer);
327 void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
328 void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
329 int queuePicture(nanoseconds pts);
330 int handler();
333 struct MovieState {
334 AVIOContextPtr mIOContext;
335 AVFormatCtxPtr mFormatCtx;
337 SyncMaster mAVSyncType{SyncMaster::Default};
339 microseconds mClockBase{0};
340 std::atomic<bool> mPlaying{false};
342 std::mutex mSendMtx;
343 std::condition_variable mSendCond;
344 /* NOTE: false/clear = need data, true/set = no data needed */
345 std::atomic_flag mSendDataGood;
347 std::atomic<bool> mQuit{false};
349 AudioState mAudio;
350 VideoState mVideo;
352 std::thread mParseThread;
353 std::thread mAudioThread;
354 std::thread mVideoThread;
356 std::string mFilename;
358 MovieState(std::string fname)
359 : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
361 ~MovieState()
363 mQuit = true;
364 if(mParseThread.joinable())
365 mParseThread.join();
368 static int decode_interrupt_cb(void *ctx);
369 bool prepare();
370 void setTitle(SDL_Window *window);
372 nanoseconds getClock();
374 nanoseconds getMasterClock();
376 nanoseconds getDuration();
378 int streamComponentOpen(int stream_index);
379 int parse_handler();
383 nanoseconds AudioState::getClockNoLock()
385 // The audio clock is the timestamp of the sample currently being heard.
386 if(alcGetInteger64vSOFT)
388 // If device start time = min, we aren't playing yet.
389 if(mDeviceStartTime == nanoseconds::min())
390 return nanoseconds::zero();
392 // Get the current device clock time and latency.
393 auto device = alcGetContextsDevice(alcGetCurrentContext());
394 ALCint64SOFT devtimes[2] = {0,0};
395 alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
396 auto latency = nanoseconds(devtimes[1]);
397 auto device_time = nanoseconds(devtimes[0]);
399 // The clock is simply the current device time relative to the recorded
400 // start time. We can also subtract the latency to get more a accurate
401 // position of where the audio device actually is in the output stream.
402 return device_time - mDeviceStartTime - latency;
405 /* The source-based clock is based on 4 components:
406 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
407 * 2 - The length of the source's buffer queue
408 * (AudioBufferTime*AL_BUFFERS_QUEUED)
409 * 3 - The offset OpenAL is currently at in the source (the first value
410 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
411 * 4 - The latency between OpenAL and the DAC (the second value from
412 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
414 * Subtracting the length of the source queue from the next sample's
415 * timestamp gives the timestamp of the sample at the start of the source
416 * queue. Adding the source offset to that results in the timestamp for the
417 * sample at OpenAL's current position, and subtracting the source latency
418 * from that gives the timestamp of the sample currently at the DAC.
420 nanoseconds pts = mCurrentPts;
421 if(mSource)
423 ALint64SOFT offset[2];
424 ALint queued;
425 ALint status;
427 /* NOTE: The source state must be checked last, in case an underrun
428 * occurs and the source stops between retrieving the offset+latency
429 * and getting the state. */
430 if(alGetSourcei64vSOFT)
431 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
432 else
434 ALint ioffset;
435 alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
436 offset[0] = (ALint64SOFT)ioffset << 32;
437 offset[1] = 0;
439 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
440 alGetSourcei(mSource, AL_SOURCE_STATE, &status);
442 /* If the source is AL_STOPPED, then there was an underrun and all
443 * buffers are processed, so ignore the source queue. The audio thread
444 * will put the source into an AL_INITIAL state and clear the queue
445 * when it starts recovery. */
446 if(status != AL_STOPPED)
448 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
450 pts -= AudioBufferTime*queued;
451 pts += std::chrono::duration_cast<nanoseconds>(
452 fixed32(offset[0] / mCodecCtx->sample_rate)
455 /* Don't offset by the latency if the source isn't playing. */
456 if(status == AL_PLAYING)
457 pts -= nanoseconds(offset[1]);
460 return std::max(pts, nanoseconds::zero());
463 bool AudioState::isBufferFilled()
465 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
466 * does the source gen. So when we're able to grab the lock and the source
467 * is valid, the queue must be full.
469 std::lock_guard<std::mutex> lock(mSrcMutex);
470 return mSource != 0;
473 void AudioState::startPlayback()
475 alSourcePlay(mSource);
476 if(alcGetInteger64vSOFT)
478 using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
480 // Subtract the total buffer queue time from the current pts to get the
481 // pts of the start of the queue.
482 nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
483 int64_t srctimes[2]={0,0};
484 alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
485 auto device_time = nanoseconds(srctimes[1]);
486 auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
487 mCodecCtx->sample_rate;
489 // The mixer may have ticked and incremented the device time and sample
490 // offset, so subtract the source offset from the device time to get
491 // the device time the source started at. Also subtract startpts to get
492 // the device time the stream would have started at to reach where it
493 // is now.
494 mDeviceStartTime = device_time - src_offset - startpts;
498 int AudioState::getSync()
500 if(mMovie.mAVSyncType == SyncMaster::Audio)
501 return 0;
503 auto ref_clock = mMovie.getMasterClock();
504 auto diff = ref_clock - getClockNoLock();
506 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
508 /* Difference is TOO big; reset accumulated average */
509 mClockDiffAvg = seconds_d64::zero();
510 return 0;
513 /* Accumulate the diffs */
514 mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
515 auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
516 if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
517 return 0;
519 /* Constrain the per-update difference to avoid exceedingly large skips */
520 diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
521 AudioSampleCorrectionMax);
522 return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
525 int AudioState::decodeFrame()
527 while(!mMovie.mQuit.load(std::memory_order_relaxed))
529 std::unique_lock<std::mutex> lock(mQueueMtx);
530 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
531 if(ret == AVERROR(EAGAIN))
533 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
534 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
535 mMovie.mSendCond.notify_one();
536 do {
537 mQueueCond.wait(lock);
538 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
539 } while(ret == AVERROR(EAGAIN));
541 lock.unlock();
542 if(ret == AVERROR_EOF) break;
543 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
544 mMovie.mSendCond.notify_one();
545 if(ret < 0)
547 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
548 return 0;
551 if(mDecodedFrame->nb_samples <= 0)
553 av_frame_unref(mDecodedFrame.get());
554 continue;
557 /* If provided, update w/ pts */
558 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
559 mCurrentPts = std::chrono::duration_cast<nanoseconds>(
560 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
563 if(mDecodedFrame->nb_samples > mSamplesMax)
565 av_freep(&mSamples);
566 av_samples_alloc(
567 &mSamples, nullptr, mCodecCtx->channels,
568 mDecodedFrame->nb_samples, mDstSampleFmt, 0
570 mSamplesMax = mDecodedFrame->nb_samples;
572 /* Return the amount of sample frames converted */
573 int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
574 (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
577 av_frame_unref(mDecodedFrame.get());
578 return data_size;
581 return 0;
584 /* Duplicates the sample at in to out, count times. The frame size is a
585 * multiple of the template type size.
587 template<typename T>
588 static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
590 const T *sample = reinterpret_cast<const T*>(in);
591 T *dst = reinterpret_cast<T*>(out);
592 if(frame_size == sizeof(T))
593 std::fill_n(dst, count, *sample);
594 else
596 /* NOTE: frame_size is a multiple of sizeof(T). */
597 int type_mult = frame_size / sizeof(T);
598 int i = 0;
599 std::generate_n(dst, count*type_mult,
600 [sample,type_mult,&i]() -> T
602 T ret = sample[i];
603 i = (i+1)%type_mult;
604 return ret;
611 bool AudioState::readAudio(uint8_t *samples, int length)
613 int sample_skip = getSync();
614 int audio_size = 0;
616 /* Read the next chunk of data, refill the buffer, and queue it
617 * on the source */
618 length /= mFrameSize;
619 while(audio_size < length)
621 if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
623 int frame_len = decodeFrame();
624 if(frame_len <= 0) break;
626 mSamplesLen = frame_len;
627 mSamplesPos = std::min(mSamplesLen, sample_skip);
628 sample_skip -= mSamplesPos;
630 // Adjust the device start time and current pts by the amount we're
631 // skipping/duplicating, so that the clock remains correct for the
632 // current stream position.
633 auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
634 mDeviceStartTime -= skip;
635 mCurrentPts += skip;
636 continue;
639 int rem = length - audio_size;
640 if(mSamplesPos >= 0)
642 int len = mSamplesLen - mSamplesPos;
643 if(rem > len) rem = len;
644 memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
646 else
648 rem = std::min(rem, -mSamplesPos);
650 /* Add samples by copying the first sample */
651 if((mFrameSize&7) == 0)
652 sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
653 else if((mFrameSize&3) == 0)
654 sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
655 else if((mFrameSize&1) == 0)
656 sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
657 else
658 sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
661 mSamplesPos += rem;
662 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
663 samples += rem*mFrameSize;
664 audio_size += rem;
666 if(audio_size <= 0)
667 return false;
669 if(audio_size < length)
671 int rem = length - audio_size;
672 std::fill_n(samples, rem*mFrameSize,
673 (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
674 mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
675 audio_size += rem;
677 return true;
681 void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
682 ALsizei length, const ALchar *message,
683 void *userParam)
685 AudioState *self = reinterpret_cast<AudioState*>(userParam);
687 std::cout<< "---- AL Event on AudioState "<<self<<" ----\nEvent: ";
688 switch(eventType)
690 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
691 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
692 case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
693 case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
694 case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
695 default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
696 std::dec<<std::setw(0)<<std::setfill(' '); break;
698 std::cout<< "\n"
699 "Object ID: "<<object<<'\n'<<
700 "Parameter: "<<param<<'\n'<<
701 "Message: "<<std::string(message, length)<<"\n----"<<
702 std::endl;
705 int AudioState::handler()
707 const ALenum types[5] = {
708 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
709 AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT
711 std::unique_lock<std::mutex> lock(mSrcMutex);
712 ALenum fmt;
714 if(alEventControlSOFT)
716 alEventControlSOFT(5, types, AL_TRUE);
717 alEventCallbackSOFT(EventCallback, this);
720 /* Find a suitable format for OpenAL. */
721 mDstChanLayout = 0;
722 if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
724 mDstSampleFmt = AV_SAMPLE_FMT_U8;
725 mFrameSize = 1;
726 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
727 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
728 (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
730 mDstChanLayout = mCodecCtx->channel_layout;
731 mFrameSize *= 8;
732 mFormat = fmt;
734 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
735 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
736 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
737 (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
739 mDstChanLayout = mCodecCtx->channel_layout;
740 mFrameSize *= 6;
741 mFormat = fmt;
743 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
745 mDstChanLayout = mCodecCtx->channel_layout;
746 mFrameSize *= 1;
747 mFormat = AL_FORMAT_MONO8;
749 if(!mDstChanLayout)
751 mDstChanLayout = AV_CH_LAYOUT_STEREO;
752 mFrameSize *= 2;
753 mFormat = AL_FORMAT_STEREO8;
756 if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
757 alIsExtensionPresent("AL_EXT_FLOAT32"))
759 mDstSampleFmt = AV_SAMPLE_FMT_FLT;
760 mFrameSize = 4;
761 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
762 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
763 (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
765 mDstChanLayout = mCodecCtx->channel_layout;
766 mFrameSize *= 8;
767 mFormat = fmt;
769 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
770 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
771 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
772 (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
774 mDstChanLayout = mCodecCtx->channel_layout;
775 mFrameSize *= 6;
776 mFormat = fmt;
778 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
780 mDstChanLayout = mCodecCtx->channel_layout;
781 mFrameSize *= 1;
782 mFormat = AL_FORMAT_MONO_FLOAT32;
784 if(!mDstChanLayout)
786 mDstChanLayout = AV_CH_LAYOUT_STEREO;
787 mFrameSize *= 2;
788 mFormat = AL_FORMAT_STEREO_FLOAT32;
791 if(!mDstChanLayout)
793 mDstSampleFmt = AV_SAMPLE_FMT_S16;
794 mFrameSize = 2;
795 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
796 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
797 (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
799 mDstChanLayout = mCodecCtx->channel_layout;
800 mFrameSize *= 8;
801 mFormat = fmt;
803 if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
804 mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
805 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
806 (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
808 mDstChanLayout = mCodecCtx->channel_layout;
809 mFrameSize *= 6;
810 mFormat = fmt;
812 if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
814 mDstChanLayout = mCodecCtx->channel_layout;
815 mFrameSize *= 1;
816 mFormat = AL_FORMAT_MONO16;
818 if(!mDstChanLayout)
820 mDstChanLayout = AV_CH_LAYOUT_STEREO;
821 mFrameSize *= 2;
822 mFormat = AL_FORMAT_STEREO16;
825 void *samples = nullptr;
826 ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
827 mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
829 mSamples = NULL;
830 mSamplesMax = 0;
831 mSamplesPos = 0;
832 mSamplesLen = 0;
834 mDecodedFrame.reset(av_frame_alloc());
835 if(!mDecodedFrame)
837 std::cerr<< "Failed to allocate audio frame" <<std::endl;
838 goto finish;
841 mSwresCtx.reset(swr_alloc_set_opts(nullptr,
842 mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
843 mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
844 (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
845 mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
846 0, nullptr
848 if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
850 std::cerr<< "Failed to initialize audio converter" <<std::endl;
851 goto finish;
854 mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
855 alGenBuffers(mBuffers.size(), mBuffers.data());
856 alGenSources(1, &mSource);
858 if(EnableDirectOut)
859 alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
861 if(alGetError() != AL_NO_ERROR)
862 goto finish;
864 if(!alBufferStorageSOFT)
865 samples = av_malloc(buffer_len);
866 else
868 for(ALuint bufid : mBuffers)
869 alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
870 AL_MAP_WRITE_BIT_SOFT);
871 if(alGetError() != AL_NO_ERROR)
873 fprintf(stderr, "Failed to use mapped buffers\n");
874 samples = av_malloc(buffer_len);
878 while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed))
880 /* First remove any processed buffers. */
881 ALint processed;
882 alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
883 while(processed > 0)
885 std::array<ALuint,4> bids;
886 alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
887 bids.data());
888 processed -= std::min<ALsizei>(bids.size(), processed);
891 /* Refill the buffer queue. */
892 ALint queued;
893 alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
894 while((ALuint)queued < mBuffers.size())
896 ALuint bufid = mBuffers[mBufferIdx];
898 uint8_t *ptr = reinterpret_cast<uint8_t*>(
899 samples ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
901 if(!ptr) break;
903 /* Read the next chunk of data, filling the buffer, and queue it on
904 * the source */
905 bool got_audio = readAudio(ptr, buffer_len);
906 if(!samples) alUnmapBufferSOFT(bufid);
907 if(!got_audio) break;
909 if(samples)
910 alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
912 alSourceQueueBuffers(mSource, 1, &bufid);
913 mBufferIdx = (mBufferIdx+1) % mBuffers.size();
914 ++queued;
916 if(queued == 0)
917 break;
919 /* Check that the source is playing. */
920 ALint state;
921 alGetSourcei(mSource, AL_SOURCE_STATE, &state);
922 if(state == AL_STOPPED)
924 /* AL_STOPPED means there was an underrun. Clear the buffer queue
925 * since this likely means we're late, and rewind the source to get
926 * it back into an AL_INITIAL state.
928 alSourceRewind(mSource);
929 alSourcei(mSource, AL_BUFFER, 0);
930 continue;
933 /* (re)start the source if needed, and wait for a buffer to finish */
934 if(state != AL_PLAYING && state != AL_PAUSED &&
935 mMovie.mPlaying.load(std::memory_order_relaxed))
936 startPlayback();
938 lock.unlock();
939 SDL_Delay((AudioBufferTime/3).count());
940 lock.lock();
943 alSourceRewind(mSource);
944 alSourcei(mSource, AL_BUFFER, 0);
946 finish:
947 av_freep(&samples);
949 if(alEventControlSOFT)
951 alEventControlSOFT(5, types, AL_FALSE);
952 alEventCallbackSOFT(nullptr, nullptr);
955 return 0;
959 nanoseconds VideoState::getClock()
961 /* NOTE: This returns incorrect times while not playing. */
962 auto delta = get_avtime() - mCurrentPtsTime;
963 return mCurrentPts + delta;
966 bool VideoState::isBufferFilled()
968 std::unique_lock<std::mutex> lock(mPictQMutex);
969 return mPictQSize >= mPictQ.size();
972 Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
974 SDL_Event evt{};
975 evt.user.type = FF_REFRESH_EVENT;
976 evt.user.data1 = opaque;
977 SDL_PushEvent(&evt);
978 return 0; /* 0 means stop timer */
981 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
982 void VideoState::schedRefresh(milliseconds delay)
984 SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
987 /* Called by VideoState::refreshTimer to display the next video frame. */
988 void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
990 Picture *vp = &mPictQ[mPictQRead];
992 if(!vp->mImage)
993 return;
995 float aspect_ratio;
996 int win_w, win_h;
997 int w, h, x, y;
999 if(mCodecCtx->sample_aspect_ratio.num == 0)
1000 aspect_ratio = 0.0f;
1001 else
1003 aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
1004 mCodecCtx->height;
1006 if(aspect_ratio <= 0.0f)
1007 aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
1009 SDL_GetWindowSize(screen, &win_w, &win_h);
1010 h = win_h;
1011 w = ((int)rint(h * aspect_ratio) + 3) & ~3;
1012 if(w > win_w)
1014 w = win_w;
1015 h = ((int)rint(w / aspect_ratio) + 3) & ~3;
1017 x = (win_w - w) / 2;
1018 y = (win_h - h) / 2;
1020 SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
1021 SDL_Rect dst_rect{ x, y, w, h };
1022 SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
1023 SDL_RenderPresent(renderer);
1026 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
1027 * was created. It handles the display of the next decoded video frame (if not
1028 * falling behind), and sets up the timer for the following video frame.
1030 void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
1032 if(!mStream)
1034 if(mEOS)
1036 mFinalUpdate = true;
1037 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1038 mPictQCond.notify_all();
1039 return;
1041 schedRefresh(milliseconds(100));
1042 return;
1044 if(!mMovie.mPlaying.load(std::memory_order_relaxed))
1046 schedRefresh(milliseconds(1));
1047 return;
1050 std::unique_lock<std::mutex> lock(mPictQMutex);
1051 retry:
1052 if(mPictQSize == 0)
1054 if(mEOS)
1055 mFinalUpdate = true;
1056 else
1057 schedRefresh(milliseconds(1));
1058 lock.unlock();
1059 mPictQCond.notify_all();
1060 return;
1063 Picture *vp = &mPictQ[mPictQRead];
1064 mCurrentPts = vp->mPts;
1065 mCurrentPtsTime = get_avtime();
1067 /* Get delay using the frame pts and the pts from last frame. */
1068 auto delay = vp->mPts - mFrameLastPts;
1069 if(delay <= seconds::zero() || delay >= seconds(1))
1071 /* If incorrect delay, use previous one. */
1072 delay = mFrameLastDelay;
1074 /* Save for next frame. */
1075 mFrameLastDelay = delay;
1076 mFrameLastPts = vp->mPts;
1078 /* Update delay to sync to clock if not master source. */
1079 if(mMovie.mAVSyncType != SyncMaster::Video)
1081 auto ref_clock = mMovie.getMasterClock();
1082 auto diff = vp->mPts - ref_clock;
1084 /* Skip or repeat the frame. Take delay into account. */
1085 auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
1086 if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
1088 if(diff <= -sync_threshold)
1089 delay = nanoseconds::zero();
1090 else if(diff >= sync_threshold)
1091 delay *= 2;
1095 mFrameTimer += delay;
1096 /* Compute the REAL delay. */
1097 auto actual_delay = mFrameTimer - get_avtime();
1098 if(!(actual_delay >= VideoSyncThreshold))
1100 /* We don't have time to handle this picture, just skip to the next one. */
1101 mPictQRead = (mPictQRead+1)%mPictQ.size();
1102 mPictQSize--;
1103 goto retry;
1105 schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
1107 /* Show the picture! */
1108 display(screen, renderer);
1110 /* Update queue for next picture. */
1111 mPictQRead = (mPictQRead+1)%mPictQ.size();
1112 mPictQSize--;
1113 lock.unlock();
1114 mPictQCond.notify_all();
1117 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1118 * main thread where the renderer was created.
1120 void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
1122 Picture *vp = &mPictQ[mPictQWrite];
1123 bool fmt_updated = false;
1125 /* allocate or resize the buffer! */
1126 if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
1128 fmt_updated = true;
1129 if(vp->mImage)
1130 SDL_DestroyTexture(vp->mImage);
1131 vp->mImage = SDL_CreateTexture(
1132 renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
1133 mCodecCtx->coded_width, mCodecCtx->coded_height
1135 if(!vp->mImage)
1136 std::cerr<< "Failed to create YV12 texture!" <<std::endl;
1137 vp->mWidth = mCodecCtx->width;
1138 vp->mHeight = mCodecCtx->height;
1140 if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
1142 /* For the first update, set the window size to the video size. */
1143 mFirstUpdate = false;
1145 int w = vp->mWidth;
1146 int h = vp->mHeight;
1147 if(mCodecCtx->sample_aspect_ratio.den != 0)
1149 double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
1150 if(aspect_ratio >= 1.0)
1151 w = (int)(w*aspect_ratio + 0.5);
1152 else if(aspect_ratio > 0.0)
1153 h = (int)(h/aspect_ratio + 0.5);
1155 SDL_SetWindowSize(screen, w, h);
1159 if(vp->mImage)
1161 AVFrame *frame = mDecodedFrame.get();
1162 void *pixels = nullptr;
1163 int pitch = 0;
1165 if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
1166 SDL_UpdateYUVTexture(vp->mImage, nullptr,
1167 frame->data[0], frame->linesize[0],
1168 frame->data[1], frame->linesize[1],
1169 frame->data[2], frame->linesize[2]
1171 else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
1172 std::cerr<< "Failed to lock texture" <<std::endl;
1173 else
1175 // Convert the image into YUV format that SDL uses
1176 int coded_w = mCodecCtx->coded_width;
1177 int coded_h = mCodecCtx->coded_height;
1178 int w = mCodecCtx->width;
1179 int h = mCodecCtx->height;
1180 if(!mSwscaleCtx || fmt_updated)
1182 mSwscaleCtx.reset(sws_getContext(
1183 w, h, mCodecCtx->pix_fmt,
1184 w, h, AV_PIX_FMT_YUV420P, 0,
1185 nullptr, nullptr, nullptr
1189 /* point pict at the queue */
1190 uint8_t *pict_data[3];
1191 pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
1192 pict_data[1] = pict_data[0] + coded_w*coded_h;
1193 pict_data[2] = pict_data[1] + coded_w*coded_h/4;
1195 int pict_linesize[3];
1196 pict_linesize[0] = pitch;
1197 pict_linesize[1] = pitch / 2;
1198 pict_linesize[2] = pitch / 2;
1200 sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
1201 frame->linesize, 0, h, pict_data, pict_linesize);
1202 SDL_UnlockTexture(vp->mImage);
1206 vp->mUpdated.store(true, std::memory_order_release);
1207 std::unique_lock<std::mutex>(mPictQMutex).unlock();
1208 mPictQCond.notify_one();
1211 int VideoState::queuePicture(nanoseconds pts)
1213 /* Wait until we have space for a new pic */
1214 std::unique_lock<std::mutex> lock(mPictQMutex);
1215 while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
1216 mPictQCond.wait(lock);
1217 lock.unlock();
1219 if(mMovie.mQuit.load(std::memory_order_relaxed))
1220 return -1;
1222 Picture *vp = &mPictQ[mPictQWrite];
1224 /* We have to create/update the picture in the main thread */
1225 vp->mUpdated.store(false, std::memory_order_relaxed);
1226 SDL_Event evt{};
1227 evt.user.type = FF_UPDATE_EVENT;
1228 evt.user.data1 = this;
1229 SDL_PushEvent(&evt);
1231 /* Wait until the picture is updated. */
1232 lock.lock();
1233 while(!vp->mUpdated.load(std::memory_order_relaxed))
1235 if(mMovie.mQuit.load(std::memory_order_relaxed))
1236 return -1;
1237 mPictQCond.wait(lock);
1239 if(mMovie.mQuit.load(std::memory_order_relaxed))
1240 return -1;
1241 vp->mPts = pts;
1243 mPictQWrite = (mPictQWrite+1)%mPictQ.size();
1244 mPictQSize++;
1245 lock.unlock();
1247 return 0;
1250 int VideoState::handler()
1252 mDecodedFrame.reset(av_frame_alloc());
1253 while(!mMovie.mQuit.load(std::memory_order_relaxed))
1255 std::unique_lock<std::mutex> lock(mQueueMtx);
1256 /* Decode video frame */
1257 int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1258 if(ret == AVERROR(EAGAIN))
1260 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1261 std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
1262 mMovie.mSendCond.notify_one();
1263 do {
1264 mQueueCond.wait(lock);
1265 ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
1266 } while(ret == AVERROR(EAGAIN));
1268 lock.unlock();
1269 if(ret == AVERROR_EOF) break;
1270 mMovie.mSendDataGood.clear(std::memory_order_relaxed);
1271 mMovie.mSendCond.notify_one();
1272 if(ret < 0)
1274 std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
1275 continue;
1278 /* Get the PTS for this frame. */
1279 nanoseconds pts;
1280 if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
1281 mClock = std::chrono::duration_cast<nanoseconds>(
1282 seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
1284 pts = mClock;
1286 /* Update the video clock to the next expected PTS. */
1287 auto frame_delay = av_q2d(mCodecCtx->time_base);
1288 frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
1289 mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
1291 if(queuePicture(pts) < 0)
1292 break;
1293 av_frame_unref(mDecodedFrame.get());
1295 mEOS = true;
1297 std::unique_lock<std::mutex> lock(mPictQMutex);
1298 if(mMovie.mQuit.load(std::memory_order_relaxed))
1300 mPictQRead = 0;
1301 mPictQWrite = 0;
1302 mPictQSize = 0;
1304 while(!mFinalUpdate)
1305 mPictQCond.wait(lock);
1307 return 0;
1311 int MovieState::decode_interrupt_cb(void *ctx)
1313 return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
1316 bool MovieState::prepare()
1318 AVIOContext *avioctx = nullptr;
1319 AVIOInterruptCB intcb = { decode_interrupt_cb, this };
1320 if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
1322 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1323 return false;
1325 mIOContext.reset(avioctx);
1327 /* Open movie file. If avformat_open_input fails it will automatically free
1328 * this context, so don't set it onto a smart pointer yet.
1330 AVFormatContext *fmtctx = avformat_alloc_context();
1331 fmtctx->pb = mIOContext.get();
1332 fmtctx->interrupt_callback = intcb;
1333 if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
1335 std::cerr<< "Failed to open "<<mFilename <<std::endl;
1336 return false;
1338 mFormatCtx.reset(fmtctx);
1340 /* Retrieve stream information */
1341 if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
1343 std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
1344 return false;
1347 mVideo.schedRefresh(milliseconds(40));
1349 mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
1350 return true;
1353 void MovieState::setTitle(SDL_Window *window)
1355 auto pos1 = mFilename.rfind('/');
1356 auto pos2 = mFilename.rfind('\\');
1357 auto fpos = ((pos1 == std::string::npos) ? pos2 :
1358 (pos2 == std::string::npos) ? pos1 :
1359 std::max(pos1, pos2)) + 1;
1360 SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
1363 nanoseconds MovieState::getClock()
1365 if(!mPlaying.load(std::memory_order_relaxed))
1366 return nanoseconds::zero();
1367 return get_avtime() - mClockBase;
1370 nanoseconds MovieState::getMasterClock()
1372 if(mAVSyncType == SyncMaster::Video)
1373 return mVideo.getClock();
1374 if(mAVSyncType == SyncMaster::Audio)
1375 return mAudio.getClock();
1376 return getClock();
1379 nanoseconds MovieState::getDuration()
1380 { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
1382 int MovieState::streamComponentOpen(int stream_index)
1384 if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
1385 return -1;
1387 /* Get a pointer to the codec context for the stream, and open the
1388 * associated codec.
1390 AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
1391 if(!avctx) return -1;
1393 if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
1394 return -1;
1396 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
1397 if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
1399 std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
1400 << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
1401 return -1;
1404 /* Initialize and start the media type handler */
1405 switch(avctx->codec_type)
1407 case AVMEDIA_TYPE_AUDIO:
1408 mAudio.mStream = mFormatCtx->streams[stream_index];
1409 mAudio.mCodecCtx = std::move(avctx);
1411 mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
1412 break;
1414 case AVMEDIA_TYPE_VIDEO:
1415 mVideo.mStream = mFormatCtx->streams[stream_index];
1416 mVideo.mCodecCtx = std::move(avctx);
1418 mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
1419 break;
1421 default:
1422 return -1;
1425 return stream_index;
1428 int MovieState::parse_handler()
1430 int video_index = -1;
1431 int audio_index = -1;
1433 /* Dump information about file onto standard error */
1434 av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
1436 /* Find the first video and audio streams */
1437 for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
1439 auto codecpar = mFormatCtx->streams[i]->codecpar;
1440 if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
1441 video_index = streamComponentOpen(i);
1442 else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
1443 audio_index = streamComponentOpen(i);
1446 if(video_index < 0 && audio_index < 0)
1448 std::cerr<< mFilename<<": could not open codecs" <<std::endl;
1449 mQuit = true;
1452 PacketQueue audio_queue, video_queue;
1453 bool input_finished = false;
1455 /* Main packet reading/dispatching loop */
1456 while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
1458 AVPacket packet;
1459 if(av_read_frame(mFormatCtx.get(), &packet) < 0)
1460 input_finished = true;
1461 else
1463 /* Copy the packet into the queue it's meant for. */
1464 if(packet.stream_index == video_index)
1465 video_queue.put(&packet);
1466 else if(packet.stream_index == audio_index)
1467 audio_queue.put(&packet);
1468 av_packet_unref(&packet);
1471 do {
1472 /* Send whatever queued packets we have. */
1473 if(!audio_queue.empty())
1475 std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
1476 int ret;
1477 do {
1478 ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
1479 if(ret != AVERROR(EAGAIN)) audio_queue.pop();
1480 } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
1481 lock.unlock();
1482 mAudio.mQueueCond.notify_one();
1484 if(!video_queue.empty())
1486 std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
1487 int ret;
1488 do {
1489 ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
1490 if(ret != AVERROR(EAGAIN)) video_queue.pop();
1491 } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
1492 lock.unlock();
1493 mVideo.mQueueCond.notify_one();
1495 /* If the queues are completely empty, or it's not full and there's
1496 * more input to read, go get more.
1498 size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
1499 if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
1500 break;
1502 if(!mPlaying.load(std::memory_order_relaxed))
1504 if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
1505 (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
1507 /* Set the base time 50ms ahead of the current av time. */
1508 mClockBase = get_avtime() + milliseconds(50);
1509 mVideo.mCurrentPtsTime = mClockBase;
1510 mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
1511 mAudio.startPlayback();
1512 mPlaying.store(std::memory_order_release);
1515 /* Nothing to send or get for now, wait a bit and try again. */
1516 { std::unique_lock<std::mutex> lock(mSendMtx);
1517 if(mSendDataGood.test_and_set(std::memory_order_relaxed))
1518 mSendCond.wait_for(lock, milliseconds(10));
1520 } while(!mQuit.load(std::memory_order_relaxed));
1522 /* Pass a null packet to finish the send buffers (the receive functions
1523 * will get AVERROR_EOF when emptied).
1525 if(mVideo.mCodecCtx)
1527 { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
1528 avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
1530 mVideo.mQueueCond.notify_one();
1532 if(mAudio.mCodecCtx)
1534 { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
1535 avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
1537 mAudio.mQueueCond.notify_one();
1539 video_queue.clear();
1540 audio_queue.clear();
1542 /* all done - wait for it */
1543 if(mVideoThread.joinable())
1544 mVideoThread.join();
1545 if(mAudioThread.joinable())
1546 mAudioThread.join();
1548 mVideo.mEOS = true;
1549 std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
1550 while(!mVideo.mFinalUpdate)
1551 mVideo.mPictQCond.wait(lock);
1552 lock.unlock();
1554 SDL_Event evt{};
1555 evt.user.type = FF_MOVIE_DONE_EVENT;
1556 SDL_PushEvent(&evt);
1558 return 0;
1562 // Helper class+method to print the time with human-readable formatting.
1563 struct PrettyTime {
1564 seconds mTime;
1566 inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
1568 using hours = std::chrono::hours;
1569 using minutes = std::chrono::minutes;
1570 using std::chrono::duration_cast;
1572 seconds t = rhs.mTime;
1573 if(t.count() < 0)
1575 os << '-';
1576 t *= -1;
1579 // Only handle up to hour formatting
1580 if(t >= hours(1))
1581 os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
1582 << (duration_cast<minutes>(t).count() % 60) << 'm';
1583 else
1584 os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
1585 os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
1586 << std::setfill(' ');
1587 return os;
1590 } // namespace
1593 int main(int argc, char *argv[])
1595 std::unique_ptr<MovieState> movState;
1597 if(argc < 2)
1599 std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
1600 return 1;
1602 /* Register all formats and codecs */
1603 av_register_all();
1604 /* Initialize networking protocols */
1605 avformat_network_init();
1607 if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
1609 std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
1610 return 1;
1613 /* Make a window to put our video */
1614 SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
1615 if(!screen)
1617 std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
1618 return 1;
1620 /* Make a renderer to handle the texture image surface and rendering. */
1621 Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
1622 SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
1623 if(renderer)
1625 SDL_RendererInfo rinf{};
1626 bool ok = false;
1628 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1629 * software renderer. */
1630 if(SDL_GetRendererInfo(renderer, &rinf) == 0)
1632 for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
1633 ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
1635 if(!ok)
1637 std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
1638 SDL_DestroyRenderer(renderer);
1639 renderer = nullptr;
1642 if(!renderer)
1644 render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
1645 renderer = SDL_CreateRenderer(screen, -1, render_flags);
1647 if(!renderer)
1649 std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
1650 return 1;
1652 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1653 SDL_RenderFillRect(renderer, nullptr);
1654 SDL_RenderPresent(renderer);
1656 /* Open an audio device */
1657 int fileidx = 1;
1658 ALCdevice *device = [argc,argv,&fileidx]() -> ALCdevice*
1660 ALCdevice *dev = NULL;
1661 if(argc > 3 && strcmp(argv[1], "-device") == 0)
1663 fileidx = 3;
1664 dev = alcOpenDevice(argv[2]);
1665 if(dev) return dev;
1666 std::cerr<< "Failed to open \""<<argv[2]<<"\" - trying default" <<std::endl;
1668 return alcOpenDevice(nullptr);
1669 }();
1670 ALCcontext *context = alcCreateContext(device, nullptr);
1671 if(!context || alcMakeContextCurrent(context) == ALC_FALSE)
1673 std::cerr<< "Failed to set up audio device" <<std::endl;
1674 if(context)
1675 alcDestroyContext(context);
1676 return 1;
1679 const ALCchar *name = nullptr;
1680 if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
1681 name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
1682 if(!name || alcGetError(device) != AL_NO_ERROR)
1683 name = alcGetString(device, ALC_DEVICE_SPECIFIER);
1684 std::cout<< "Opened \""<<name<<"\"" <<std::endl;
1686 if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
1688 std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
1689 alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
1690 alcGetProcAddress(device, "alcGetInteger64vSOFT")
1694 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1696 std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
1697 alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
1698 alGetProcAddress("alGetSourcei64vSOFT")
1701 if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
1703 std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
1704 alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
1705 alGetProcAddress("alBufferStorageSOFT"));
1706 alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
1707 alGetProcAddress("alMapBufferSOFT"));
1708 alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
1709 alGetProcAddress("alUnmapBufferSOFT"));
1711 if(alIsExtensionPresent("AL_SOFTX_events"))
1713 std::cout<< "Found AL_SOFT_events" <<std::endl;
1714 alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
1715 alGetProcAddress("alEventControlSOFT"));
1716 alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
1717 alGetProcAddress("alEventCallbackSOFT"));
1720 if(fileidx < argc && strcmp(argv[fileidx], "-direct") == 0)
1722 ++fileidx;
1723 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1724 std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
1725 else
1727 std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
1728 EnableDirectOut = true;
1732 while(fileidx < argc && !movState)
1734 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1735 if(!movState->prepare()) movState = nullptr;
1737 if(!movState)
1739 std::cerr<< "Could not start a video" <<std::endl;
1740 return 1;
1742 movState->setTitle(screen);
1744 /* Default to going to the next movie at the end of one. */
1745 enum class EomAction {
1746 Next, Quit
1747 } eom_action = EomAction::Next;
1748 seconds last_time(-1);
1749 SDL_Event event;
1750 while(1)
1752 int have_evt = SDL_WaitEventTimeout(&event, 10);
1754 auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
1755 if(cur_time != last_time)
1757 auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
1758 std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
1759 last_time = cur_time;
1761 if(!have_evt) continue;
1763 switch(event.type)
1765 case SDL_KEYDOWN:
1766 switch(event.key.keysym.sym)
1768 case SDLK_ESCAPE:
1769 movState->mQuit = true;
1770 eom_action = EomAction::Quit;
1771 break;
1773 case SDLK_n:
1774 movState->mQuit = true;
1775 eom_action = EomAction::Next;
1776 break;
1778 default:
1779 break;
1781 break;
1783 case SDL_WINDOWEVENT:
1784 switch(event.window.event)
1786 case SDL_WINDOWEVENT_RESIZED:
1787 SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1788 SDL_RenderFillRect(renderer, nullptr);
1789 break;
1791 default:
1792 break;
1794 break;
1796 case SDL_QUIT:
1797 movState->mQuit = true;
1798 eom_action = EomAction::Quit;
1799 break;
1801 case FF_UPDATE_EVENT:
1802 reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
1803 screen, renderer
1805 break;
1807 case FF_REFRESH_EVENT:
1808 reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
1809 screen, renderer
1811 break;
1813 case FF_MOVIE_DONE_EVENT:
1814 std::cout<<'\n';
1815 last_time = seconds(-1);
1816 if(eom_action != EomAction::Quit)
1818 movState = nullptr;
1819 while(fileidx < argc && !movState)
1821 movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
1822 if(!movState->prepare()) movState = nullptr;
1824 if(movState)
1826 movState->setTitle(screen);
1827 break;
1831 /* Nothing more to play. Shut everything down and quit. */
1832 movState = nullptr;
1834 alcMakeContextCurrent(nullptr);
1835 alcDestroyContext(context);
1836 alcCloseDevice(device);
1838 SDL_DestroyRenderer(renderer);
1839 renderer = nullptr;
1840 SDL_DestroyWindow(screen);
1841 screen = nullptr;
1843 SDL_Quit();
1844 exit(0);
1846 default:
1847 break;
1851 std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
1852 return 1;