2 * An example showing how to play a stream sync'd to video, using ffmpeg.
14 #include <condition_variable>
28 #include <string_view>
34 _Pragma("GCC diagnostic push")
35 _Pragma("GCC diagnostic ignored \"-Wconversion\"")
36 _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
39 #include "libavcodec/avcodec.h"
40 #include "libavformat/avformat.h"
41 #include "libavformat/avio.h"
42 #include "libavformat/version.h"
43 #include "libavutil/avutil.h"
44 #include "libavutil/error.h"
45 #include "libavutil/frame.h"
46 #include "libavutil/mem.h"
47 #include "libavutil/pixfmt.h"
48 #include "libavutil/rational.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/time.h"
51 #include "libavutil/version.h"
52 #include "libavutil/channel_layout.h"
53 #include "libswscale/swscale.h"
54 #include "libswresample/swresample.h"
56 constexpr auto AVNoPtsValue
= AV_NOPTS_VALUE
;
57 constexpr auto AVErrorEOF
= AVERROR_EOF
;
62 #define SDL_MAIN_HANDLED
65 _Pragma("GCC diagnostic pop")
73 #include "common/alhelpers.h"
78 inline constexpr int64_t operator "" _i64(unsigned long long int n
) noexcept
{ return static_cast<int64_t>(n
); }
81 #define M_PI (3.14159265358979323846)
84 using fixed32
= std::chrono::duration
<int64_t,std::ratio
<1,(1_i64
<<32)>>;
85 using nanoseconds
= std::chrono::nanoseconds
;
86 using microseconds
= std::chrono::microseconds
;
87 using milliseconds
= std::chrono::milliseconds
;
88 using seconds
= std::chrono::seconds
;
89 using seconds_d64
= std::chrono::duration
<double>;
90 using std::chrono::duration_cast
;
92 const std::string AppName
{"alffplay"};
94 ALenum DirectOutMode
{AL_FALSE
};
95 bool EnableWideStereo
{false};
96 bool EnableUhj
{false};
97 bool EnableSuperStereo
{false};
98 bool DisableVideo
{false};
99 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT
;
100 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT
;
101 LPALEVENTCONTROLSOFT alEventControlSOFT
;
102 LPALEVENTCALLBACKSOFT alEventCallbackSOFT
;
104 LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT
;
106 const seconds AVNoSyncThreshold
{10};
108 #define VIDEO_PICTURE_QUEUE_SIZE 24
110 const seconds_d64 AudioSyncThreshold
{0.03};
111 const milliseconds AudioSampleCorrectionMax
{50};
112 /* Averaging filter coefficient for audio sync. */
113 #define AUDIO_DIFF_AVG_NB 20
114 const double AudioAvgFilterCoeff
{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB
)};
115 /* Per-buffer size, in time */
116 constexpr milliseconds AudioBufferTime
{20};
117 /* Buffer total size, in time (should be divisible by the buffer time) */
118 constexpr milliseconds AudioBufferTotalTime
{800};
119 constexpr auto AudioBufferCount
= AudioBufferTotalTime
/ AudioBufferTime
;
122 FF_MOVIE_DONE_EVENT
= SDL_USEREVENT
125 enum class SyncMaster
{
134 inline microseconds
get_avtime()
135 { return microseconds
{av_gettime()}; }
137 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
138 struct AVIOContextDeleter
{
139 void operator()(AVIOContext
*ptr
) { avio_closep(&ptr
); }
141 using AVIOContextPtr
= std::unique_ptr
<AVIOContext
,AVIOContextDeleter
>;
143 struct AVFormatCtxDeleter
{
144 void operator()(AVFormatContext
*ptr
) { avformat_close_input(&ptr
); }
146 using AVFormatCtxPtr
= std::unique_ptr
<AVFormatContext
,AVFormatCtxDeleter
>;
148 struct AVCodecCtxDeleter
{
149 void operator()(AVCodecContext
*ptr
) { avcodec_free_context(&ptr
); }
151 using AVCodecCtxPtr
= std::unique_ptr
<AVCodecContext
,AVCodecCtxDeleter
>;
153 struct AVPacketDeleter
{
154 void operator()(AVPacket
*pkt
) { av_packet_free(&pkt
); }
156 using AVPacketPtr
= std::unique_ptr
<AVPacket
,AVPacketDeleter
>;
158 struct AVFrameDeleter
{
159 void operator()(AVFrame
*ptr
) { av_frame_free(&ptr
); }
161 using AVFramePtr
= std::unique_ptr
<AVFrame
,AVFrameDeleter
>;
163 struct SwrContextDeleter
{
164 void operator()(SwrContext
*ptr
) { swr_free(&ptr
); }
166 using SwrContextPtr
= std::unique_ptr
<SwrContext
,SwrContextDeleter
>;
168 struct SwsContextDeleter
{
169 void operator()(SwsContext
*ptr
) { sws_freeContext(ptr
); }
171 using SwsContextPtr
= std::unique_ptr
<SwsContext
,SwsContextDeleter
>;
174 struct ChannelLayout
: public AVChannelLayout
{
175 ChannelLayout() : AVChannelLayout
{} { }
176 ~ChannelLayout() { av_channel_layout_uninit(this); }
180 template<size_t SizeLimit
>
182 std::mutex mPacketMutex
, mFrameMutex
;
183 std::condition_variable mPacketCond
;
184 std::condition_variable mInFrameCond
, mOutFrameCond
;
186 std::deque
<AVPacketPtr
> mPackets
;
187 size_t mTotalSize
{0};
188 bool mFinished
{false};
190 AVPacketPtr
getPacket()
192 std::unique_lock
<std::mutex
> plock
{mPacketMutex
};
193 while(mPackets
.empty() && !mFinished
)
194 mPacketCond
.wait(plock
);
198 auto ret
= std::move(mPackets
.front());
199 mPackets
.pop_front();
200 mTotalSize
-= static_cast<unsigned int>(ret
->size
);
205 int sendPacket(AVCodecContext
*codecctx
)
207 AVPacketPtr packet
{getPacket()};
211 std::unique_lock
<std::mutex
> flock
{mFrameMutex
};
212 while((ret
=avcodec_send_packet(codecctx
, packet
.get())) == AVERROR(EAGAIN
))
213 mInFrameCond
.wait_for(flock
, milliseconds
{50});
215 mOutFrameCond
.notify_one();
219 if(!ret
) return AVErrorEOF
;
220 std::cerr
<< "Failed to send flush packet: "<<ret
<<std::endl
;
224 std::cerr
<< "Failed to send packet: "<<ret
<<std::endl
;
228 int receiveFrame(AVCodecContext
*codecctx
, AVFrame
*frame
)
232 std::unique_lock
<std::mutex
> flock
{mFrameMutex
};
233 while((ret
=avcodec_receive_frame(codecctx
, frame
)) == AVERROR(EAGAIN
))
234 mOutFrameCond
.wait_for(flock
, milliseconds
{50});
236 mInFrameCond
.notify_one();
243 std::lock_guard
<std::mutex
> packetlock
{mPacketMutex
};
246 mPacketCond
.notify_one();
252 std::lock_guard
<std::mutex
> packetlock
{mPacketMutex
};
258 mPacketCond
.notify_one();
261 bool put(const AVPacket
*pkt
)
264 std::lock_guard
<std::mutex
> packet_lock
{mPacketMutex
};
265 if(mTotalSize
>= SizeLimit
|| mFinished
)
268 mPackets
.push_back(AVPacketPtr
{av_packet_alloc()});
269 if(av_packet_ref(mPackets
.back().get(), pkt
) != 0)
275 mTotalSize
+= static_cast<unsigned int>(mPackets
.back()->size
);
277 mPacketCond
.notify_one();
288 AVStream
*mStream
{nullptr};
289 AVCodecCtxPtr mCodecCtx
;
291 DataQueue
<size_t{2}*1024*1024> mQueue
;
293 /* Used for clock difference average computation */
294 seconds_d64 mClockDiffAvg
{0};
296 /* Time of the next sample to be buffered */
297 nanoseconds mCurrentPts
{0};
299 /* Device clock time that the stream started at. */
300 nanoseconds mDeviceStartTime
{nanoseconds::min()};
302 /* Decompressed sample frame, and swresample context for conversion */
303 AVFramePtr mDecodedFrame
;
304 SwrContextPtr mSwresCtx
;
306 /* Conversion format, for what gets fed to OpenAL */
307 uint64_t mDstChanLayout
{0};
308 AVSampleFormat mDstSampleFmt
{AV_SAMPLE_FMT_NONE
};
310 /* Storage of converted samples */
311 std::array
<uint8_t*,1> mSamples
{};
312 al::span
<uint8_t> mSamplesSpan
{};
313 int mSamplesLen
{0}; /* In samples */
317 std::vector
<uint8_t> mBufferData
;
318 std::atomic
<size_t> mReadPos
{0};
319 std::atomic
<size_t> mWritePos
{0};
322 ALenum mFormat
{AL_NONE
};
323 ALuint mFrameSize
{0};
325 std::mutex mSrcMutex
;
326 std::condition_variable mSrcCond
;
327 std::atomic_flag mConnected
{};
329 std::array
<ALuint
,AudioBufferCount
> mBuffers
{};
330 ALuint mBufferIdx
{0};
332 AudioState(MovieState
&movie
) : mMovie(movie
)
333 { mConnected
.test_and_set(std::memory_order_relaxed
); }
337 alDeleteSources(1, &mSource
);
339 alDeleteBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
341 av_freep(mSamples
.data());
344 static void AL_APIENTRY
eventCallbackC(ALenum eventType
, ALuint object
, ALuint param
,
345 ALsizei length
, const ALchar
*message
, void *userParam
) noexcept
346 { static_cast<AudioState
*>(userParam
)->eventCallback(eventType
, object
, param
, length
, message
); }
347 void eventCallback(ALenum eventType
, ALuint object
, ALuint param
, ALsizei length
,
348 const ALchar
*message
) noexcept
;
350 static ALsizei AL_APIENTRY
bufferCallbackC(void *userptr
, void *data
, ALsizei size
) noexcept
351 { return static_cast<AudioState
*>(userptr
)->bufferCallback(data
, size
); }
352 ALsizei
bufferCallback(void *data
, ALsizei size
) noexcept
;
354 nanoseconds
getClockNoLock();
355 nanoseconds
getClock()
357 std::lock_guard
<std::mutex
> lock
{mSrcMutex
};
358 return getClockNoLock();
361 bool startPlayback();
365 bool readAudio(al::span
<uint8_t> samples
, unsigned int length
, int &sample_skip
);
366 bool readAudio(int sample_skip
);
374 AVStream
*mStream
{nullptr};
375 AVCodecCtxPtr mCodecCtx
;
377 DataQueue
<size_t{14}*1024*1024> mQueue
;
379 /* The pts of the currently displayed frame, and the time (av_gettime) it
380 * was last updated - used to have running video pts
382 nanoseconds mDisplayPts
{0};
383 microseconds mDisplayPtsTime
{microseconds::min()};
384 std::mutex mDispPtsMutex
;
386 /* Swscale context for format conversion */
387 SwsContextPtr mSwscaleCtx
;
391 nanoseconds mPts
{nanoseconds::min()};
393 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
394 std::atomic
<size_t> mPictQRead
{0u}, mPictQWrite
{1u};
395 std::mutex mPictQMutex
;
396 std::condition_variable mPictQCond
;
398 SDL_Texture
*mImage
{nullptr};
399 int mWidth
{0}, mHeight
{0}; /* Full texture size */
400 bool mFirstUpdate
{true};
402 std::atomic
<bool> mEOS
{false};
403 std::atomic
<bool> mFinalUpdate
{false};
405 VideoState(MovieState
&movie
) : mMovie(movie
) { }
409 SDL_DestroyTexture(mImage
);
413 nanoseconds
getClock();
415 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
, AVFrame
*frame
) const;
416 void updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
);
421 AVIOContextPtr mIOContext
;
422 AVFormatCtxPtr mFormatCtx
;
424 SyncMaster mAVSyncType
{SyncMaster::Default
};
426 microseconds mClockBase
{microseconds::min()};
428 std::atomic
<bool> mQuit
{false};
433 std::mutex mStartupMutex
;
434 std::condition_variable mStartupCond
;
435 bool mStartupDone
{false};
437 std::thread mParseThread
;
438 std::thread mAudioThread
;
439 std::thread mVideoThread
;
441 std::string mFilename
;
443 MovieState(std::string_view fname
) : mAudio
{*this}, mVideo
{*this}, mFilename
{fname
}
448 if(mParseThread
.joinable())
452 static int decode_interrupt_cb(void *ctx
);
454 void setTitle(SDL_Window
*window
) const;
457 [[nodiscard
]] nanoseconds
getClock() const;
458 [[nodiscard
]] nanoseconds
getMasterClock();
459 [[nodiscard
]] nanoseconds
getDuration() const;
461 bool streamComponentOpen(AVStream
*stream
);
466 nanoseconds
AudioState::getClockNoLock()
468 // The audio clock is the timestamp of the sample currently being heard.
469 if(alcGetInteger64vSOFT
)
471 // If device start time = min, we aren't playing yet.
472 if(mDeviceStartTime
== nanoseconds::min())
473 return nanoseconds::zero();
475 // Get the current device clock time and latency.
476 auto device
= alcGetContextsDevice(alcGetCurrentContext());
477 std::array
<ALCint64SOFT
,2> devtimes
{};
478 alcGetInteger64vSOFT(device
, ALC_DEVICE_CLOCK_LATENCY_SOFT
, 2, devtimes
.data());
479 auto latency
= nanoseconds
{devtimes
[1]};
480 auto device_time
= nanoseconds
{devtimes
[0]};
482 // The clock is simply the current device time relative to the recorded
483 // start time. We can also subtract the latency to get more a accurate
484 // position of where the audio device actually is in the output stream.
485 return device_time
- mDeviceStartTime
- latency
;
488 if(!mBufferData
.empty())
490 if(mDeviceStartTime
== nanoseconds::min())
491 return nanoseconds::zero();
493 /* With a callback buffer and no device clock, mDeviceStartTime is
494 * actually the timestamp of the first sample frame played. The audio
495 * clock, then, is that plus the current source offset.
497 std::array
<ALint64SOFT
,2> offset
{};
498 if(alGetSourcei64vSOFT
)
499 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
.data());
503 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
504 offset
[0] = ALint64SOFT
{ioffset
} << 32;
506 /* NOTE: The source state must be checked last, in case an underrun
507 * occurs and the source stops between getting the state and retrieving
508 * the offset+latency.
511 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
514 if(status
== AL_PLAYING
|| status
== AL_PAUSED
)
515 pts
= mDeviceStartTime
- nanoseconds
{offset
[1]} +
516 duration_cast
<nanoseconds
>(fixed32
{offset
[0] / mCodecCtx
->sample_rate
});
519 /* If the source is stopped, the pts of the next sample to be heard
520 * is the pts of the next sample to be buffered, minus the amount
521 * already in the buffer ready to play.
523 const size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
524 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
525 const size_t readable
{((woffset
>=roffset
) ? woffset
: (mBufferData
.size()+woffset
)) -
528 pts
= mCurrentPts
- nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
;
534 /* The source-based clock is based on 4 components:
535 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
536 * 2 - The length of the source's buffer queue
537 * (AudioBufferTime*AL_BUFFERS_QUEUED)
538 * 3 - The offset OpenAL is currently at in the source (the first value
539 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
540 * 4 - The latency between OpenAL and the DAC (the second value from
541 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
543 * Subtracting the length of the source queue from the next sample's
544 * timestamp gives the timestamp of the sample at the start of the source
545 * queue. Adding the source offset to that results in the timestamp for the
546 * sample at OpenAL's current position, and subtracting the source latency
547 * from that gives the timestamp of the sample currently at the DAC.
549 nanoseconds pts
{mCurrentPts
};
552 std::array
<ALint64SOFT
,2> offset
{};
553 if(alGetSourcei64vSOFT
)
554 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
.data());
558 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
559 offset
[0] = ALint64SOFT
{ioffset
} << 32;
561 ALint queued
, status
;
562 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
563 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
565 /* If the source is AL_STOPPED, then there was an underrun and all
566 * buffers are processed, so ignore the source queue. The audio thread
567 * will put the source into an AL_INITIAL state and clear the queue
568 * when it starts recovery.
570 if(status
!= AL_STOPPED
)
572 pts
-= AudioBufferTime
*queued
;
573 pts
+= duration_cast
<nanoseconds
>(fixed32
{offset
[0] / mCodecCtx
->sample_rate
});
575 /* Don't offset by the latency if the source isn't playing. */
576 if(status
== AL_PLAYING
)
577 pts
-= nanoseconds
{offset
[1]};
580 return std::max(pts
, nanoseconds::zero());
583 bool AudioState::startPlayback()
585 const size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
586 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
587 const size_t readable
{((woffset
>= roffset
) ? woffset
: (mBufferData
.size()+woffset
)) -
590 if(!mBufferData
.empty())
594 if(!alcGetInteger64vSOFT
)
595 mDeviceStartTime
= mCurrentPts
-
596 nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
;
601 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
602 if(queued
== 0) return false;
605 alSourcePlay(mSource
);
606 if(alcGetInteger64vSOFT
)
608 /* Subtract the total buffer queue time from the current pts to get the
609 * pts of the start of the queue.
611 std::array
<int64_t,2> srctimes
{};
612 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_CLOCK_SOFT
, srctimes
.data());
613 auto device_time
= nanoseconds
{srctimes
[1]};
614 auto src_offset
= duration_cast
<nanoseconds
>(fixed32
{srctimes
[0]}) /
615 mCodecCtx
->sample_rate
;
617 /* The mixer may have ticked and incremented the device time and sample
618 * offset, so subtract the source offset from the device time to get
619 * the device time the source started at. Also subtract startpts to get
620 * the device time the stream would have started at to reach where it
623 if(!mBufferData
.empty())
625 nanoseconds startpts
{mCurrentPts
-
626 nanoseconds
{seconds
{readable
/mFrameSize
}}/mCodecCtx
->sample_rate
};
627 mDeviceStartTime
= device_time
- src_offset
- startpts
;
631 nanoseconds startpts
{mCurrentPts
- AudioBufferTotalTime
};
632 mDeviceStartTime
= device_time
- src_offset
- startpts
;
638 int AudioState::getSync()
640 if(mMovie
.mAVSyncType
== SyncMaster::Audio
)
643 auto ref_clock
= mMovie
.getMasterClock();
644 auto diff
= ref_clock
- getClockNoLock();
646 if(!(diff
< AVNoSyncThreshold
&& diff
> -AVNoSyncThreshold
))
648 /* Difference is TOO big; reset accumulated average */
649 mClockDiffAvg
= seconds_d64::zero();
653 /* Accumulate the diffs */
654 mClockDiffAvg
= mClockDiffAvg
*AudioAvgFilterCoeff
+ diff
;
655 auto avg_diff
= mClockDiffAvg
*(1.0 - AudioAvgFilterCoeff
);
656 if(avg_diff
< AudioSyncThreshold
/2.0 && avg_diff
> -AudioSyncThreshold
)
659 /* Constrain the per-update difference to avoid exceedingly large skips */
660 diff
= std::min
<nanoseconds
>(diff
, AudioSampleCorrectionMax
);
661 return static_cast<int>(duration_cast
<seconds
>(diff
*mCodecCtx
->sample_rate
).count());
664 int AudioState::decodeFrame()
667 while(int ret
{mQueue
.receiveFrame(mCodecCtx
.get(), mDecodedFrame
.get())})
669 if(ret
== AVErrorEOF
) return 0;
670 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
672 } while(mDecodedFrame
->nb_samples
<= 0);
674 /* If provided, update w/ pts */
675 if(mDecodedFrame
->best_effort_timestamp
!= AVNoPtsValue
)
676 mCurrentPts
= duration_cast
<nanoseconds
>(seconds_d64
{av_q2d(mStream
->time_base
) *
677 static_cast<double>(mDecodedFrame
->best_effort_timestamp
)});
679 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
681 av_freep(mSamples
.data());
682 av_samples_alloc(mSamples
.data(), nullptr, mCodecCtx
->ch_layout
.nb_channels
,
683 mDecodedFrame
->nb_samples
, mDstSampleFmt
, 0);
684 mSamplesMax
= mDecodedFrame
->nb_samples
;
685 mSamplesSpan
= {mSamples
[0], static_cast<size_t>(mSamplesMax
)*mFrameSize
};
687 /* Copy to a local to mark const. Don't know why this can't be implicit. */
688 using data_t
= decltype(decltype(mDecodedFrame
)::element_type::data
);
689 std::array
<const uint8_t*,std::extent_v
<data_t
>> cdata
{};
690 std::copy(std::begin(mDecodedFrame
->data
), std::end(mDecodedFrame
->data
), cdata
.begin());
691 /* Return the amount of sample frames converted */
692 const int data_size
{swr_convert(mSwresCtx
.get(), mSamples
.data(), mDecodedFrame
->nb_samples
,
693 cdata
.data(), mDecodedFrame
->nb_samples
)};
695 av_frame_unref(mDecodedFrame
.get());
699 /* Duplicates the sample at in to out, count times. The frame size is a
700 * multiple of the template type size.
703 void sample_dup(al::span
<uint8_t> out
, al::span
<const uint8_t> in
, size_t count
, size_t frame_size
)
705 auto sample
= al::span
{reinterpret_cast<const T
*>(in
.data()), in
.size()/sizeof(T
)};
706 auto dst
= al::span
{reinterpret_cast<T
*>(out
.data()), out
.size()/sizeof(T
)};
708 /* NOTE: frame_size is a multiple of sizeof(T). */
709 const size_t type_mult
{frame_size
/ sizeof(T
)};
711 std::fill_n(dst
.begin(), count
, sample
.front());
712 else for(size_t i
{0};i
< count
;++i
)
714 for(size_t j
{0};j
< type_mult
;++j
)
715 dst
[i
*type_mult
+ j
] = sample
[j
];
719 void sample_dup(al::span
<uint8_t> out
, al::span
<const uint8_t> in
, size_t count
, size_t frame_size
)
721 if((frame_size
&7) == 0)
722 sample_dup
<uint64_t>(out
, in
, count
, frame_size
);
723 else if((frame_size
&3) == 0)
724 sample_dup
<uint32_t>(out
, in
, count
, frame_size
);
725 else if((frame_size
&1) == 0)
726 sample_dup
<uint16_t>(out
, in
, count
, frame_size
);
728 sample_dup
<uint8_t>(out
, in
, count
, frame_size
);
731 bool AudioState::readAudio(al::span
<uint8_t> samples
, unsigned int length
, int &sample_skip
)
733 unsigned int audio_size
{0};
735 /* Read the next chunk of data, refill the buffer, and queue it
737 length
/= mFrameSize
;
738 while(mSamplesLen
> 0 && audio_size
< length
)
740 unsigned int rem
{length
- audio_size
};
743 const auto len
= static_cast<unsigned int>(mSamplesLen
- mSamplesPos
);
744 if(rem
> len
) rem
= len
;
745 const size_t boffset
{static_cast<ALuint
>(mSamplesPos
) * size_t{mFrameSize
}};
746 std::copy_n(mSamplesSpan
.cbegin()+ptrdiff_t(boffset
), rem
*size_t{mFrameSize
},
751 rem
= std::min(rem
, static_cast<unsigned int>(-mSamplesPos
));
753 /* Add samples by copying the first sample */
754 sample_dup(samples
, mSamplesSpan
, rem
, mFrameSize
);
757 mSamplesPos
+= static_cast<int>(rem
);
758 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
759 samples
= samples
.subspan(rem
*size_t{mFrameSize
});
762 while(mSamplesPos
>= mSamplesLen
)
764 mSamplesLen
= decodeFrame();
765 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
766 if(mSamplesLen
<= 0) break;
768 sample_skip
-= mSamplesPos
;
770 // Adjust the device start time and current pts by the amount we're
771 // skipping/duplicating, so that the clock remains correct for the
772 // current stream position.
773 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
774 mDeviceStartTime
-= skip
;
781 if(audio_size
< length
)
783 const unsigned int rem
{length
- audio_size
};
784 std::fill_n(samples
.begin(), rem
*mFrameSize
,
785 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
786 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
791 bool AudioState::readAudio(int sample_skip
)
793 size_t woffset
{mWritePos
.load(std::memory_order_acquire
)};
794 const size_t roffset
{mReadPos
.load(std::memory_order_relaxed
)};
795 while(mSamplesLen
> 0)
797 const size_t nsamples
{((roffset
> woffset
) ? roffset
-woffset
-1
798 : (roffset
== 0) ? (mBufferData
.size()-woffset
-1)
799 : (mBufferData
.size()-woffset
)) / mFrameSize
};
804 const size_t rem
{std::min
<size_t>(nsamples
, static_cast<ALuint
>(-mSamplesPos
))};
806 sample_dup(al::span
{mBufferData
}.subspan(woffset
), mSamplesSpan
, rem
, mFrameSize
);
807 woffset
+= rem
* mFrameSize
;
808 if(woffset
== mBufferData
.size()) woffset
= 0;
809 mWritePos
.store(woffset
, std::memory_order_release
);
811 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
812 mSamplesPos
+= static_cast<int>(rem
);
816 const size_t rem
{std::min
<size_t>(nsamples
, static_cast<ALuint
>(mSamplesLen
-mSamplesPos
))};
817 const size_t boffset
{static_cast<ALuint
>(mSamplesPos
) * size_t{mFrameSize
}};
818 const size_t nbytes
{rem
* mFrameSize
};
820 std::copy_n(mSamplesSpan
.cbegin()+ptrdiff_t(boffset
), nbytes
,
821 mBufferData
.begin()+ptrdiff_t(woffset
));
823 if(woffset
== mBufferData
.size()) woffset
= 0;
824 mWritePos
.store(woffset
, std::memory_order_release
);
826 mCurrentPts
+= nanoseconds
{seconds
{rem
}} / mCodecCtx
->sample_rate
;
827 mSamplesPos
+= static_cast<int>(rem
);
829 while(mSamplesPos
>= mSamplesLen
)
831 mSamplesLen
= decodeFrame();
832 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
833 if(mSamplesLen
<= 0) return false;
835 sample_skip
-= mSamplesPos
;
837 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
838 mDeviceStartTime
-= skip
;
847 void AL_APIENTRY
AudioState::eventCallback(ALenum eventType
, ALuint object
, ALuint param
,
848 ALsizei length
, const ALchar
*message
) noexcept
850 if(eventType
== AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
)
852 /* Temporarily lock the source mutex to ensure it's not between
853 * checking the processed count and going to sleep.
855 std::unique_lock
<std::mutex
>{mSrcMutex
}.unlock();
856 mSrcCond
.notify_one();
860 std::cout
<< "\n---- AL Event on AudioState "<<this<<" ----\nEvent: ";
863 case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
: std::cout
<< "Buffer completed"; break;
864 case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
: std::cout
<< "Source state changed"; break;
865 case AL_EVENT_TYPE_DISCONNECTED_SOFT
: std::cout
<< "Disconnected"; break;
867 std::cout
<< "0x"<<std::hex
<<std::setw(4)<<std::setfill('0')<<eventType
<<std::dec
<<
868 std::setw(0)<<std::setfill(' '); break;
871 "Object ID: "<<object
<<"\n"
872 "Parameter: "<<param
<<"\n"
873 "Message: "<<std::string
{message
, static_cast<ALuint
>(length
)}<<"\n----"<<
876 if(eventType
== AL_EVENT_TYPE_DISCONNECTED_SOFT
)
879 std::lock_guard
<std::mutex
> lock
{mSrcMutex
};
880 mConnected
.clear(std::memory_order_release
);
882 mSrcCond
.notify_one();
886 ALsizei
AudioState::bufferCallback(void *data
, ALsizei size
) noexcept
888 auto dst
= al::span
{static_cast<ALbyte
*>(data
), static_cast<ALuint
>(size
)};
891 size_t roffset
{mReadPos
.load(std::memory_order_acquire
)};
894 const size_t woffset
{mWritePos
.load(std::memory_order_relaxed
)};
895 if(woffset
== roffset
) break;
897 size_t todo
{((woffset
< roffset
) ? mBufferData
.size() : woffset
) - roffset
};
898 todo
= std::min(todo
, dst
.size());
900 std::copy_n(mBufferData
.cbegin()+ptrdiff_t(roffset
), todo
, dst
.begin());
901 dst
= dst
.subspan(todo
);
902 got
+= static_cast<ALsizei
>(todo
);
905 if(roffset
== mBufferData
.size())
908 mReadPos
.store(roffset
, std::memory_order_release
);
913 int AudioState::handler()
915 std::unique_lock
<std::mutex
> srclock
{mSrcMutex
, std::defer_lock
};
916 milliseconds sleep_time
{AudioBufferTime
/ 3};
918 struct EventControlManager
{
919 const std::array
<ALenum
,3> evt_types
{{
920 AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT
, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT
,
921 AL_EVENT_TYPE_DISCONNECTED_SOFT
}};
923 EventControlManager(milliseconds
&sleep_time
)
925 if(alEventControlSOFT
)
927 alEventControlSOFT(static_cast<ALsizei
>(evt_types
.size()), evt_types
.data(),
929 alEventCallbackSOFT(&AudioState::eventCallbackC
, this);
930 sleep_time
= AudioBufferTotalTime
;
933 ~EventControlManager()
935 if(alEventControlSOFT
)
937 alEventControlSOFT(static_cast<ALsizei
>(evt_types
.size()), evt_types
.data(),
939 alEventCallbackSOFT(nullptr, nullptr);
943 EventControlManager event_controller
{sleep_time
};
945 std::vector
<uint8_t> samples
;
946 ALsizei buffer_len
{0};
948 /* Find a suitable format for OpenAL. */
949 const auto layoutmask
= mCodecCtx
->ch_layout
.u
.mask
; /* NOLINT(*-union-access) */
952 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
953 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_DBL
954 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_DBLP
955 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_S32
956 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_S32P
957 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_S64
958 || mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_S64P
)
959 && alIsExtensionPresent("AL_EXT_FLOAT32"))
961 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
963 if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_NATIVE
)
965 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
967 if(layoutmask
== AV_CH_LAYOUT_7POINT1
)
969 mDstChanLayout
= layoutmask
;
971 mFormat
= alGetEnumValue("AL_FORMAT_71CHN32");
973 if(layoutmask
== AV_CH_LAYOUT_5POINT1
|| layoutmask
== AV_CH_LAYOUT_5POINT1_BACK
)
975 mDstChanLayout
= layoutmask
;
977 mFormat
= alGetEnumValue("AL_FORMAT_51CHN32");
979 if(layoutmask
== AV_CH_LAYOUT_QUAD
)
981 mDstChanLayout
= layoutmask
;
983 mFormat
= alGetEnumValue("AL_FORMAT_QUAD32");
986 if(layoutmask
== AV_CH_LAYOUT_MONO
)
988 mDstChanLayout
= layoutmask
;
990 mFormat
= AL_FORMAT_MONO_FLOAT32
;
993 else if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_AMBISONIC
994 && alIsExtensionPresent("AL_EXT_BFORMAT"))
996 /* Calculate what should be the ambisonic order from the number of
997 * channels, and confirm that's the number of channels. Opus allows
998 * an optional non-diegetic stereo stream with the B-Format stream,
999 * which we can ignore, so check for that too.
1001 auto order
= static_cast<int>(std::sqrt(mCodecCtx
->ch_layout
.nb_channels
)) - 1;
1002 int channels
{(order
+1) * (order
+1)};
1003 if(channels
== mCodecCtx
->ch_layout
.nb_channels
1004 || channels
+2 == mCodecCtx
->ch_layout
.nb_channels
)
1006 /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
1007 * is 4 channels for 3D buffers.
1010 mFormat
= alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32");
1013 if(!mFormat
|| mFormat
== -1)
1015 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1017 mFormat
= EnableUhj
? AL_FORMAT_UHJ2CHN_FLOAT32_SOFT
: AL_FORMAT_STEREO_FLOAT32
;
1020 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
1022 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
1024 if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_NATIVE
)
1026 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1028 if(layoutmask
== AV_CH_LAYOUT_7POINT1
)
1030 mDstChanLayout
= layoutmask
;
1032 mFormat
= alGetEnumValue("AL_FORMAT_71CHN8");
1034 if(layoutmask
== AV_CH_LAYOUT_5POINT1
|| layoutmask
== AV_CH_LAYOUT_5POINT1_BACK
)
1036 mDstChanLayout
= layoutmask
;
1038 mFormat
= alGetEnumValue("AL_FORMAT_51CHN8");
1040 if(layoutmask
== AV_CH_LAYOUT_QUAD
)
1042 mDstChanLayout
= layoutmask
;
1044 mFormat
= alGetEnumValue("AL_FORMAT_QUAD8");
1047 if(layoutmask
== AV_CH_LAYOUT_MONO
)
1049 mDstChanLayout
= layoutmask
;
1051 mFormat
= AL_FORMAT_MONO8
;
1054 else if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_AMBISONIC
1055 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1057 auto order
= static_cast<int>(std::sqrt(mCodecCtx
->ch_layout
.nb_channels
)) - 1;
1058 int channels
{(order
+1) * (order
+1)};
1059 if(channels
== mCodecCtx
->ch_layout
.nb_channels
1060 || channels
+2 == mCodecCtx
->ch_layout
.nb_channels
)
1063 mFormat
= alGetEnumValue("AL_FORMAT_BFORMAT3D_8");
1066 if(!mFormat
|| mFormat
== -1)
1068 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1070 mFormat
= EnableUhj
? AL_FORMAT_UHJ2CHN8_SOFT
: AL_FORMAT_STEREO8
;
1073 if(!mFormat
|| mFormat
== -1)
1075 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
1077 if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_NATIVE
)
1079 if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
1081 if(layoutmask
== AV_CH_LAYOUT_7POINT1
)
1083 mDstChanLayout
= layoutmask
;
1085 mFormat
= alGetEnumValue("AL_FORMAT_71CHN16");
1087 if(layoutmask
== AV_CH_LAYOUT_5POINT1
|| layoutmask
== AV_CH_LAYOUT_5POINT1_BACK
)
1089 mDstChanLayout
= layoutmask
;
1091 mFormat
= alGetEnumValue("AL_FORMAT_51CHN16");
1093 if(layoutmask
== AV_CH_LAYOUT_QUAD
)
1095 mDstChanLayout
= layoutmask
;
1097 mFormat
= alGetEnumValue("AL_FORMAT_QUAD16");
1100 if(layoutmask
== AV_CH_LAYOUT_MONO
)
1102 mDstChanLayout
= layoutmask
;
1104 mFormat
= AL_FORMAT_MONO16
;
1107 else if(mCodecCtx
->ch_layout
.order
== AV_CHANNEL_ORDER_AMBISONIC
1108 && alIsExtensionPresent("AL_EXT_BFORMAT"))
1110 auto order
= static_cast<int>(std::sqrt(mCodecCtx
->ch_layout
.nb_channels
)) - 1;
1111 int channels
{(order
+1) * (order
+1)};
1112 if(channels
== mCodecCtx
->ch_layout
.nb_channels
1113 || channels
+2 == mCodecCtx
->ch_layout
.nb_channels
)
1116 mFormat
= alGetEnumValue("AL_FORMAT_BFORMAT3D_16");
1119 if(!mFormat
|| mFormat
== -1)
1121 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
1123 mFormat
= EnableUhj
? AL_FORMAT_UHJ2CHN16_SOFT
: AL_FORMAT_STEREO16
;
1127 mSamples
.fill(nullptr);
1133 mDecodedFrame
.reset(av_frame_alloc());
1136 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
1140 /* Note that ffmpeg assumes AmbiX (ACN layout, SN3D normalization). */
1141 const bool has_bfmt_ex
{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE
};
1142 const ALenum ambi_layout
{AL_ACN_SOFT
};
1143 const ALenum ambi_scale
{AL_SN3D_SOFT
};
1147 /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
1148 * we have to drop any extra channels.
1150 ChannelLayout layout
{};
1151 av_channel_layout_from_string(&layout
, "ambisonic 1");
1154 int err
{swr_alloc_set_opts2(&ps
, &layout
, mDstSampleFmt
, mCodecCtx
->sample_rate
,
1155 &mCodecCtx
->ch_layout
, mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
, 0, nullptr)};
1156 mSwresCtx
.reset(ps
);
1159 std::array
<char,AV_ERROR_MAX_STRING_SIZE
> errstr
{};
1160 std::cerr
<< "Failed to allocate SwrContext: "
1161 <<av_make_error_string(errstr
.data(), AV_ERROR_MAX_STRING_SIZE
, err
) <<std::endl
;
1166 std::cout
<< "Found AL_SOFT_bformat_ex" <<std::endl
;
1169 std::cout
<< "Found AL_EXT_BFORMAT" <<std::endl
;
1170 /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
1171 * ordering and normalization, so a custom matrix is needed to
1172 * scale and reorder the source from AmbiX.
1174 std::vector
<double> mtx(size_t{64}*64, 0.0);
1175 mtx
[0 + 0*64] = std::sqrt(0.5);
1176 mtx
[3 + 1*64] = 1.0;
1177 mtx
[1 + 2*64] = 1.0;
1178 mtx
[2 + 3*64] = 1.0;
1179 swr_set_matrix(mSwresCtx
.get(), mtx
.data(), 64);
1184 ChannelLayout layout
{};
1185 av_channel_layout_from_mask(&layout
, mDstChanLayout
);
1188 int err
{swr_alloc_set_opts2(&ps
, &layout
, mDstSampleFmt
, mCodecCtx
->sample_rate
,
1189 &mCodecCtx
->ch_layout
, mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
, 0, nullptr)};
1190 mSwresCtx
.reset(ps
);
1193 std::array
<char,AV_ERROR_MAX_STRING_SIZE
> errstr
{};
1194 std::cerr
<< "Failed to allocate SwrContext: "
1195 <<av_make_error_string(errstr
.data(), AV_ERROR_MAX_STRING_SIZE
, err
) <<std::endl
;
1199 if(int err
{swr_init(mSwresCtx
.get())})
1201 std::array
<char,AV_ERROR_MAX_STRING_SIZE
> errstr
{};
1202 std::cerr
<< "Failed to initialize audio converter: "
1203 <<av_make_error_string(errstr
.data(), AV_ERROR_MAX_STRING_SIZE
, err
) <<std::endl
;
1207 alGenBuffers(static_cast<ALsizei
>(mBuffers
.size()), mBuffers
.data());
1208 alGenSources(1, &mSource
);
1211 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, DirectOutMode
);
1212 if(EnableWideStereo
)
1214 const std::array angles
{static_cast<float>(M_PI
/ 3.0), static_cast<float>(-M_PI
/ 3.0)};
1215 alSourcefv(mSource
, AL_STEREO_ANGLES
, angles
.data());
1219 for(ALuint bufid
: mBuffers
)
1221 alBufferi(bufid
, AL_AMBISONIC_LAYOUT_SOFT
, ambi_layout
);
1222 alBufferi(bufid
, AL_AMBISONIC_SCALING_SOFT
, ambi_scale
);
1226 if(EnableSuperStereo
)
1227 alSourcei(mSource
, AL_STEREO_MODE_SOFT
, AL_SUPER_STEREO_SOFT
);
1230 if(alGetError() != AL_NO_ERROR
)
1233 bool callback_ok
{false};
1234 if(alBufferCallbackSOFT
)
1236 alBufferCallbackSOFT(mBuffers
[0], mFormat
, mCodecCtx
->sample_rate
, bufferCallbackC
, this);
1237 alSourcei(mSource
, AL_BUFFER
, static_cast<ALint
>(mBuffers
[0]));
1238 if(alGetError() != AL_NO_ERROR
)
1240 fprintf(stderr
, "Failed to set buffer callback\n");
1241 alSourcei(mSource
, AL_BUFFER
, 0);
1245 mBufferData
.resize(static_cast<size_t>(duration_cast
<seconds
>(mCodecCtx
->sample_rate
*
1246 AudioBufferTotalTime
).count()) * mFrameSize
);
1247 std::fill(mBufferData
.begin(), mBufferData
.end(), uint8_t{});
1249 mReadPos
.store(0, std::memory_order_relaxed
);
1250 mWritePos
.store(mBufferData
.size()/mFrameSize
/2*mFrameSize
, std::memory_order_relaxed
);
1253 alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH
, 1, &refresh
);
1254 sleep_time
= milliseconds
{seconds
{1}} / refresh
;
1259 buffer_len
= static_cast<int>(duration_cast
<seconds
>(mCodecCtx
->sample_rate
*
1260 AudioBufferTime
).count() * mFrameSize
);
1262 samples
.resize(static_cast<ALuint
>(buffer_len
));
1264 /* Prefill the codec buffer. */
1265 auto packet_sender
= [this]()
1269 const int ret
{mQueue
.sendPacket(mCodecCtx
.get())};
1270 if(ret
== AVErrorEOF
) break;
1273 auto sender
= std::async(std::launch::async
, packet_sender
);
1276 if(alcGetInteger64vSOFT
)
1279 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT
,
1281 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1284 mSamplesLen
= decodeFrame();
1287 mSamplesPos
= std::min(mSamplesLen
, getSync());
1289 auto skip
= nanoseconds
{seconds
{mSamplesPos
}} / mCodecCtx
->sample_rate
;
1290 mDeviceStartTime
-= skip
;
1291 mCurrentPts
+= skip
;
1296 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1298 /* If mQuit is set, drain frames until we can't get more audio,
1299 * indicating we've reached the flush packet and the packet sender
1303 mSamplesLen
= decodeFrame();
1304 mSamplesPos
= mSamplesLen
;
1305 } while(mSamplesLen
> 0);
1310 if(!mBufferData
.empty())
1312 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
1314 /* If mQuit is not set, don't quit even if there's no more audio,
1315 * so what's buffered has a chance to play to the real end.
1317 readAudio(getSync());
1321 ALint processed
, queued
;
1323 /* First remove any processed buffers. */
1324 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
1325 while(processed
> 0)
1328 alSourceUnqueueBuffers(mSource
, 1, &bid
);
1332 /* Refill the buffer queue. */
1333 int sync_skip
{getSync()};
1334 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
1335 while(static_cast<ALuint
>(queued
) < mBuffers
.size())
1337 /* Read the next chunk of data, filling the buffer, and queue
1340 if(!readAudio(samples
, static_cast<ALuint
>(buffer_len
), sync_skip
))
1343 const ALuint bufid
{mBuffers
[mBufferIdx
]};
1344 mBufferIdx
= static_cast<ALuint
>((mBufferIdx
+1) % mBuffers
.size());
1346 alBufferData(bufid
, mFormat
, samples
.data(), buffer_len
, mCodecCtx
->sample_rate
);
1347 alSourceQueueBuffers(mSource
, 1, &bufid
);
1351 /* Check that the source is playing. */
1352 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
1353 if(state
== AL_STOPPED
)
1355 /* AL_STOPPED means there was an underrun. Clear the buffer
1356 * queue since this likely means we're late, and rewind the
1357 * source to get it back into an AL_INITIAL state.
1359 alSourceRewind(mSource
);
1360 alSourcei(mSource
, AL_BUFFER
, 0);
1361 if(alcGetInteger64vSOFT
)
1363 /* Also update the device start time with the current
1364 * device clock, so the decoder knows we're running behind.
1367 alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
1368 ALC_DEVICE_CLOCK_SOFT
, 1, &devtime
);
1369 mDeviceStartTime
= nanoseconds
{devtime
} - mCurrentPts
;
1375 /* (re)start the source if needed, and wait for a buffer to finish */
1376 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
)
1378 if(!startPlayback())
1381 if(ALenum err
{alGetError()})
1382 std::cerr
<< "Got AL error: 0x"<<std::hex
<<err
<<std::dec
1383 << " ("<<alGetString(err
)<<")" <<std::endl
;
1385 mSrcCond
.wait_for(srclock
, sleep_time
);
1388 alSourceRewind(mSource
);
1389 alSourcei(mSource
, AL_BUFFER
, 0);
1396 nanoseconds
VideoState::getClock()
1398 /* NOTE: This returns incorrect times while not playing. */
1399 std::lock_guard
<std::mutex
> displock
{mDispPtsMutex
};
1400 if(mDisplayPtsTime
== microseconds::min())
1401 return nanoseconds::zero();
1402 auto delta
= get_avtime() - mDisplayPtsTime
;
1403 return mDisplayPts
+ delta
;
1406 /* Called by VideoState::updateVideo to display the next video frame. */
1407 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
, AVFrame
*frame
) const
1412 double aspect_ratio
;
1416 int frame_width
{frame
->width
- static_cast<int>(frame
->crop_left
+ frame
->crop_right
)};
1417 int frame_height
{frame
->height
- static_cast<int>(frame
->crop_top
+ frame
->crop_bottom
)};
1418 if(frame
->sample_aspect_ratio
.num
== 0)
1422 aspect_ratio
= av_q2d(frame
->sample_aspect_ratio
) * frame_width
/
1425 if(aspect_ratio
<= 0.0)
1426 aspect_ratio
= static_cast<double>(frame_width
) / frame_height
;
1428 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
1430 w
= (static_cast<int>(std::rint(h
* aspect_ratio
)) + 3) & ~3;
1434 h
= (static_cast<int>(std::rint(w
/ aspect_ratio
)) + 3) & ~3;
1436 x
= (win_w
- w
) / 2;
1437 y
= (win_h
- h
) / 2;
1439 SDL_Rect src_rect
{ static_cast<int>(frame
->crop_left
), static_cast<int>(frame
->crop_top
),
1440 frame_width
, frame_height
};
1441 SDL_Rect dst_rect
{ x
, y
, w
, h
};
1442 SDL_RenderCopy(renderer
, mImage
, &src_rect
, &dst_rect
);
1443 SDL_RenderPresent(renderer
);
1446 /* Called regularly on the main thread where the SDL_Renderer was created. It
1447 * handles updating the textures of decoded frames and displaying the latest
1450 void VideoState::updateVideo(SDL_Window
*screen
, SDL_Renderer
*renderer
, bool redraw
)
1452 size_t read_idx
{mPictQRead
.load(std::memory_order_relaxed
)};
1453 Picture
*vp
{&mPictQ
[read_idx
]};
1455 auto clocktime
= mMovie
.getMasterClock();
1456 bool updated
{false};
1459 size_t next_idx
{(read_idx
+1)%mPictQ
.size()};
1460 if(next_idx
== mPictQWrite
.load(std::memory_order_acquire
))
1462 Picture
*nextvp
{&mPictQ
[next_idx
]};
1463 if(clocktime
< nextvp
->mPts
&& !mMovie
.mQuit
.load(std::memory_order_relaxed
))
1465 /* For the first update, ensure the first frame gets shown. */
1466 if(!mFirstUpdate
|| updated
)
1472 read_idx
= next_idx
;
1474 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1477 mFinalUpdate
= true;
1478 mPictQRead
.store(read_idx
, std::memory_order_release
);
1479 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1480 mPictQCond
.notify_one();
1484 AVFrame
*frame
{vp
->mFrame
.get()};
1487 mPictQRead
.store(read_idx
, std::memory_order_release
);
1488 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1489 mPictQCond
.notify_one();
1491 /* allocate or resize the buffer! */
1492 bool fmt_updated
{false};
1493 if(!mImage
|| mWidth
!= frame
->width
|| mHeight
!= frame
->height
)
1497 SDL_DestroyTexture(mImage
);
1498 mImage
= SDL_CreateTexture(renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
1499 frame
->width
, frame
->height
);
1501 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
1502 mWidth
= frame
->width
;
1503 mHeight
= frame
->height
;
1506 int frame_width
{frame
->width
- static_cast<int>(frame
->crop_left
+ frame
->crop_right
)};
1507 int frame_height
{frame
->height
- static_cast<int>(frame
->crop_top
+ frame
->crop_bottom
)};
1508 if(mFirstUpdate
&& frame_width
> 0 && frame_height
> 0)
1510 /* For the first update, set the window size to the video size. */
1511 mFirstUpdate
= false;
1513 if(frame
->sample_aspect_ratio
.den
!= 0)
1515 double aspect_ratio
= av_q2d(frame
->sample_aspect_ratio
);
1516 if(aspect_ratio
>= 1.0)
1517 frame_width
= static_cast<int>(std::lround(frame_width
* aspect_ratio
));
1518 else if(aspect_ratio
> 0.0)
1519 frame_height
= static_cast<int>(std::lround(frame_height
/ aspect_ratio
));
1521 SDL_SetWindowSize(screen
, frame_width
, frame_height
);
1526 void *pixels
{nullptr};
1529 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1530 SDL_UpdateYUVTexture(mImage
, nullptr,
1531 frame
->data
[0], frame
->linesize
[0],
1532 frame
->data
[1], frame
->linesize
[1],
1533 frame
->data
[2], frame
->linesize
[2]
1535 else if(SDL_LockTexture(mImage
, nullptr, &pixels
, &pitch
) != 0)
1536 std::cerr
<< "Failed to lock texture" <<std::endl
;
1539 // Convert the image into YUV format that SDL uses
1540 int w
{frame
->width
};
1541 int h
{frame
->height
};
1542 if(!mSwscaleCtx
|| fmt_updated
)
1544 mSwscaleCtx
.reset(sws_getContext(
1545 w
, h
, mCodecCtx
->pix_fmt
,
1546 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1547 nullptr, nullptr, nullptr
1551 /* point pict at the queue */
1552 const auto framesize
= static_cast<size_t>(w
)*static_cast<size_t>(h
);
1553 const auto pixelspan
= al::span
{static_cast<uint8_t*>(pixels
), framesize
*3/2};
1554 const std::array pict_data
{
1555 al::to_address(pixelspan
.begin()),
1556 al::to_address(pixelspan
.begin() + ptrdiff_t{w
}*h
),
1557 al::to_address(pixelspan
.begin() + ptrdiff_t{w
}*h
+ ptrdiff_t{w
}*h
/4)
1559 const std::array pict_linesize
{pitch
, pitch
/2, pitch
/2};
1561 sws_scale(mSwscaleCtx
.get(), std::data(frame
->data
), std::data(frame
->linesize
),
1562 0, h
, pict_data
.data(), pict_linesize
.data());
1563 SDL_UnlockTexture(mImage
);
1572 /* Show the picture! */
1573 display(screen
, renderer
, frame
);
1578 auto disp_time
= get_avtime();
1580 std::lock_guard
<std::mutex
> displock
{mDispPtsMutex
};
1581 mDisplayPts
= vp
->mPts
;
1582 mDisplayPtsTime
= disp_time
;
1584 if(mEOS
.load(std::memory_order_acquire
))
1586 if((read_idx
+1)%mPictQ
.size() == mPictQWrite
.load(std::memory_order_acquire
))
1588 mFinalUpdate
= true;
1589 std::unique_lock
<std::mutex
>{mPictQMutex
}.unlock();
1590 mPictQCond
.notify_one();
1595 int VideoState::handler()
1597 std::for_each(mPictQ
.begin(), mPictQ
.end(),
1598 [](Picture
&pict
) -> void
1599 { pict
.mFrame
= AVFramePtr
{av_frame_alloc()}; });
1601 /* Prefill the codec buffer. */
1602 auto packet_sender
= [this]()
1606 const int ret
{mQueue
.sendPacket(mCodecCtx
.get())};
1607 if(ret
== AVErrorEOF
) break;
1610 auto sender
= std::async(std::launch::async
, packet_sender
);
1613 std::lock_guard
<std::mutex
> displock
{mDispPtsMutex
};
1614 mDisplayPtsTime
= get_avtime();
1617 auto current_pts
= nanoseconds::zero();
1620 size_t write_idx
{mPictQWrite
.load(std::memory_order_relaxed
)};
1621 Picture
*vp
{&mPictQ
[write_idx
]};
1623 /* Retrieve video frame. */
1624 AVFrame
*decoded_frame
{vp
->mFrame
.get()};
1625 while(int ret
{mQueue
.receiveFrame(mCodecCtx
.get(), decoded_frame
)})
1627 if(ret
== AVErrorEOF
) goto finish
;
1628 std::cerr
<< "Failed to receive frame: "<<ret
<<std::endl
;
1631 /* Get the PTS for this frame. */
1632 if(decoded_frame
->best_effort_timestamp
!= AVNoPtsValue
)
1633 current_pts
= duration_cast
<nanoseconds
>(seconds_d64
{av_q2d(mStream
->time_base
) *
1634 static_cast<double>(decoded_frame
->best_effort_timestamp
)});
1635 vp
->mPts
= current_pts
;
1637 /* Update the video clock to the next expected PTS. */
1638 auto frame_delay
= av_q2d(mCodecCtx
->time_base
);
1639 frame_delay
+= decoded_frame
->repeat_pict
* (frame_delay
* 0.5);
1640 current_pts
+= duration_cast
<nanoseconds
>(seconds_d64
{frame_delay
});
1642 /* Put the frame in the queue to be loaded into a texture and displayed
1643 * by the rendering thread.
1645 write_idx
= (write_idx
+1)%mPictQ
.size();
1646 mPictQWrite
.store(write_idx
, std::memory_order_release
);
1648 if(write_idx
== mPictQRead
.load(std::memory_order_acquire
))
1650 /* Wait until we have space for a new pic */
1651 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1652 while(write_idx
== mPictQRead
.load(std::memory_order_acquire
))
1653 mPictQCond
.wait(lock
);
1659 std::unique_lock
<std::mutex
> lock
{mPictQMutex
};
1660 while(!mFinalUpdate
) mPictQCond
.wait(lock
);
1666 int MovieState::decode_interrupt_cb(void *ctx
)
1668 return static_cast<MovieState
*>(ctx
)->mQuit
.load(std::memory_order_relaxed
);
1671 bool MovieState::prepare()
1673 AVIOContext
*avioctx
{nullptr};
1674 AVIOInterruptCB intcb
{decode_interrupt_cb
, this};
1675 if(avio_open2(&avioctx
, mFilename
.c_str(), AVIO_FLAG_READ
, &intcb
, nullptr))
1677 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1680 mIOContext
.reset(avioctx
);
1682 /* Open movie file. If avformat_open_input fails it will automatically free
1683 * this context, so don't set it onto a smart pointer yet.
1685 AVFormatContext
*fmtctx
{avformat_alloc_context()};
1686 fmtctx
->pb
= mIOContext
.get();
1687 fmtctx
->interrupt_callback
= intcb
;
1688 if(avformat_open_input(&fmtctx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1690 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1693 mFormatCtx
.reset(fmtctx
);
1695 /* Retrieve stream information */
1696 if(avformat_find_stream_info(mFormatCtx
.get(), nullptr) < 0)
1698 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1702 /* Dump information about file onto standard error */
1703 av_dump_format(mFormatCtx
.get(), 0, mFilename
.c_str(), 0);
1705 mParseThread
= std::thread
{std::mem_fn(&MovieState::parse_handler
), this};
1707 std::unique_lock
<std::mutex
> slock
{mStartupMutex
};
1708 while(!mStartupDone
) mStartupCond
.wait(slock
);
1712 void MovieState::setTitle(SDL_Window
*window
) const
1714 auto pos1
= mFilename
.rfind('/');
1715 auto pos2
= mFilename
.rfind('\\');
1716 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1717 (pos2
== std::string::npos
) ? pos1
:
1718 std::max(pos1
, pos2
)) + 1;
1719 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1722 nanoseconds
MovieState::getClock() const
1724 if(mClockBase
== microseconds::min())
1725 return nanoseconds::zero();
1726 return get_avtime() - mClockBase
;
1729 nanoseconds
MovieState::getMasterClock()
1731 if(mAVSyncType
== SyncMaster::Video
&& mVideo
.mStream
)
1732 return mVideo
.getClock();
1733 if(mAVSyncType
== SyncMaster::Audio
&& mAudio
.mStream
)
1734 return mAudio
.getClock();
1738 nanoseconds
MovieState::getDuration() const
1739 { return std::chrono::duration
<int64_t,std::ratio
<1,AV_TIME_BASE
>>(mFormatCtx
->duration
); }
1741 bool MovieState::streamComponentOpen(AVStream
*stream
)
1743 /* Get a pointer to the codec context for the stream, and open the
1746 AVCodecCtxPtr avctx
{avcodec_alloc_context3(nullptr)};
1747 if(!avctx
) return false;
1749 if(avcodec_parameters_to_context(avctx
.get(), stream
->codecpar
))
1752 const AVCodec
*codec
{avcodec_find_decoder(avctx
->codec_id
)};
1753 if(!codec
|| avcodec_open2(avctx
.get(), codec
, nullptr) < 0)
1755 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1756 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1760 /* Initialize and start the media type handler */
1761 switch(avctx
->codec_type
)
1763 case AVMEDIA_TYPE_AUDIO
:
1764 mAudio
.mStream
= stream
;
1765 mAudio
.mCodecCtx
= std::move(avctx
);
1768 case AVMEDIA_TYPE_VIDEO
:
1769 mVideo
.mStream
= stream
;
1770 mVideo
.mCodecCtx
= std::move(avctx
);
1780 int MovieState::parse_handler()
1782 auto &audio_queue
= mAudio
.mQueue
;
1783 auto &video_queue
= mVideo
.mQueue
;
1785 int video_index
{-1};
1786 int audio_index
{-1};
1788 /* Find the first video and audio streams */
1789 const auto ctxstreams
= al::span
{mFormatCtx
->streams
, mFormatCtx
->nb_streams
};
1790 for(size_t i
{0};i
< ctxstreams
.size();++i
)
1792 auto codecpar
= ctxstreams
[i
]->codecpar
;
1793 if(codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& !DisableVideo
&& video_index
< 0
1794 && streamComponentOpen(ctxstreams
[i
]))
1795 video_index
= static_cast<int>(i
);
1796 else if(codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0
1797 && streamComponentOpen(ctxstreams
[i
]))
1798 audio_index
= static_cast<int>(i
);
1802 std::unique_lock
<std::mutex
> slock
{mStartupMutex
};
1803 mStartupDone
= true;
1805 mStartupCond
.notify_all();
1807 if(video_index
< 0 && audio_index
< 0)
1809 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1813 /* Set the base time 750ms ahead of the current av time. */
1814 mClockBase
= get_avtime() + milliseconds
{750};
1816 if(audio_index
>= 0)
1817 mAudioThread
= std::thread
{std::mem_fn(&AudioState::handler
), &mAudio
};
1818 if(video_index
>= 0)
1819 mVideoThread
= std::thread
{std::mem_fn(&VideoState::handler
), &mVideo
};
1821 /* Main packet reading/dispatching loop */
1822 AVPacketPtr packet
{av_packet_alloc()};
1823 while(!mQuit
.load(std::memory_order_relaxed
))
1825 if(av_read_frame(mFormatCtx
.get(), packet
.get()) < 0)
1828 /* Copy the packet into the queue it's meant for. */
1829 if(packet
->stream_index
== video_index
)
1831 while(!mQuit
.load(std::memory_order_acquire
) && !video_queue
.put(packet
.get()))
1832 std::this_thread::sleep_for(milliseconds
{100});
1834 else if(packet
->stream_index
== audio_index
)
1836 while(!mQuit
.load(std::memory_order_acquire
) && !audio_queue
.put(packet
.get()))
1837 std::this_thread::sleep_for(milliseconds
{100});
1840 av_packet_unref(packet
.get());
1842 /* Finish the queues so the receivers know nothing more is coming. */
1843 video_queue
.setFinished();
1844 audio_queue
.setFinished();
1846 /* all done - wait for it */
1847 if(mVideoThread
.joinable())
1848 mVideoThread
.join();
1849 if(mAudioThread
.joinable())
1850 mAudioThread
.join();
1853 std::unique_lock
<std::mutex
> lock
{mVideo
.mPictQMutex
};
1854 while(!mVideo
.mFinalUpdate
)
1855 mVideo
.mPictQCond
.wait(lock
);
1859 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1860 SDL_PushEvent(&evt
);
1865 void MovieState::stop()
1868 mAudio
.mQueue
.flush();
1869 mVideo
.mQueue
.flush();
1873 // Helper class+method to print the time with human-readable formatting.
1877 std::ostream
&operator<<(std::ostream
&os
, const PrettyTime
&rhs
)
1879 using hours
= std::chrono::hours
;
1880 using minutes
= std::chrono::minutes
;
1882 seconds t
{rhs
.mTime
};
1889 // Only handle up to hour formatting
1891 os
<< duration_cast
<hours
>(t
).count() << 'h' << std::setfill('0') << std::setw(2)
1892 << (duration_cast
<minutes
>(t
).count() % 60) << 'm';
1894 os
<< duration_cast
<minutes
>(t
).count() << 'm' << std::setfill('0');
1895 os
<< std::setw(2) << (duration_cast
<seconds
>(t
).count() % 60) << 's' << std::setw(0)
1896 << std::setfill(' ');
1901 int main(al::span
<std::string_view
> args
)
1905 std::unique_ptr
<MovieState
> movState
;
1909 std::cerr
<< "Usage: "<<args
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1912 /* Register all formats and codecs */
1913 #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
1916 /* Initialize networking protocols */
1917 avformat_network_init();
1919 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_EVENTS
))
1921 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1925 /* Make a window to put our video */
1926 SDL_Window
*screen
{SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
)};
1929 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1932 /* Make a renderer to handle the texture image surface and rendering. */
1933 Uint32 render_flags
{SDL_RENDERER_ACCELERATED
| SDL_RENDERER_PRESENTVSYNC
};
1934 SDL_Renderer
*renderer
{SDL_CreateRenderer(screen
, -1, render_flags
)};
1937 SDL_RendererInfo rinf
{};
1940 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1941 * software renderer. */
1942 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1944 for(Uint32 i
{0u};!ok
&& i
< rinf
.num_texture_formats
;i
++)
1945 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1949 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1950 SDL_DestroyRenderer(renderer
);
1956 render_flags
= SDL_RENDERER_SOFTWARE
| SDL_RENDERER_PRESENTVSYNC
;
1957 renderer
= SDL_CreateRenderer(screen
, -1, render_flags
);
1961 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1964 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1965 SDL_RenderFillRect(renderer
, nullptr);
1966 SDL_RenderPresent(renderer
);
1968 /* Open an audio device */
1969 args
= args
.subspan(1);
1970 if(InitAL(args
) != 0)
1974 ALCdevice
*device
{alcGetContextsDevice(alcGetCurrentContext())};
1975 if(alcIsExtensionPresent(device
,"ALC_SOFT_device_clock"))
1977 std::cout
<< "Found ALC_SOFT_device_clock" <<std::endl
;
1978 alcGetInteger64vSOFT
= reinterpret_cast<LPALCGETINTEGER64VSOFT
>(
1979 alcGetProcAddress(device
, "alcGetInteger64vSOFT"));
1983 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1985 std::cout
<< "Found AL_SOFT_source_latency" <<std::endl
;
1986 alGetSourcei64vSOFT
= reinterpret_cast<LPALGETSOURCEI64VSOFT
>(
1987 alGetProcAddress("alGetSourcei64vSOFT"));
1989 if(alIsExtensionPresent("AL_SOFT_events"))
1991 std::cout
<< "Found AL_SOFT_events" <<std::endl
;
1992 alEventControlSOFT
= reinterpret_cast<LPALEVENTCONTROLSOFT
>(
1993 alGetProcAddress("alEventControlSOFT"));
1994 alEventCallbackSOFT
= reinterpret_cast<LPALEVENTCALLBACKSOFT
>(
1995 alGetProcAddress("alEventCallbackSOFT"));
1997 if(alIsExtensionPresent("AL_SOFT_callback_buffer"))
1999 std::cout
<< "Found AL_SOFT_callback_buffer" <<std::endl
;
2000 alBufferCallbackSOFT
= reinterpret_cast<LPALBUFFERCALLBACKSOFT
>(
2001 alGetProcAddress("alBufferCallbackSOFT"));
2005 for(;fileidx
< args
.size();++fileidx
)
2007 if(args
[fileidx
] == "-direct")
2009 if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
2011 std::cout
<< "Found AL_SOFT_direct_channels_remix" <<std::endl
;
2012 DirectOutMode
= AL_REMIX_UNMATCHED_SOFT
;
2014 else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
2016 std::cout
<< "Found AL_SOFT_direct_channels" <<std::endl
;
2017 DirectOutMode
= AL_DROP_UNMATCHED_SOFT
;
2020 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
2022 else if(args
[fileidx
] == "-wide")
2024 if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
2025 std::cerr
<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl
;
2028 std::cout
<< "Found AL_EXT_STEREO_ANGLES" <<std::endl
;
2029 EnableWideStereo
= true;
2032 else if(args
[fileidx
] == "-uhj")
2034 if(!alIsExtensionPresent("AL_SOFT_UHJ"))
2035 std::cerr
<< "AL_SOFT_UHJ not supported for UHJ decoding" <<std::endl
;
2038 std::cout
<< "Found AL_SOFT_UHJ" <<std::endl
;
2042 else if(args
[fileidx
] == "-superstereo")
2044 if(!alIsExtensionPresent("AL_SOFT_UHJ"))
2045 std::cerr
<< "AL_SOFT_UHJ not supported for Super Stereo decoding" <<std::endl
;
2048 std::cout
<< "Found AL_SOFT_UHJ (Super Stereo)" <<std::endl
;
2049 EnableSuperStereo
= true;
2052 else if(args
[fileidx
] == "-novideo")
2053 DisableVideo
= true;
2058 while(fileidx
< args
.size() && !movState
)
2060 movState
= std::make_unique
<MovieState
>(args
[fileidx
++]);
2061 if(!movState
->prepare()) movState
= nullptr;
2065 std::cerr
<< "Could not start a video" <<std::endl
;
2068 movState
->setTitle(screen
);
2070 /* Default to going to the next movie at the end of one. */
2071 enum class EomAction
{
2073 } eom_action
{EomAction::Next
};
2074 seconds last_time
{seconds::min()};
2077 /* SDL_WaitEventTimeout is broken, just force a 10ms sleep. */
2078 std::this_thread::sleep_for(milliseconds
{10});
2080 auto cur_time
= std::chrono::duration_cast
<seconds
>(movState
->getMasterClock());
2081 if(cur_time
!= last_time
)
2083 auto end_time
= std::chrono::duration_cast
<seconds
>(movState
->getDuration());
2084 std::cout
<< " \r "<<PrettyTime
{cur_time
}<<" / "<<PrettyTime
{end_time
} <<std::flush
;
2085 last_time
= cur_time
;
2088 bool force_redraw
{false};
2090 while(SDL_PollEvent(&event
) != 0)
2095 switch(event
.key
.keysym
.sym
)
2099 eom_action
= EomAction::Quit
;
2104 eom_action
= EomAction::Next
;
2112 case SDL_WINDOWEVENT
:
2113 switch(event
.window
.event
)
2115 case SDL_WINDOWEVENT_RESIZED
:
2116 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
2117 SDL_RenderFillRect(renderer
, nullptr);
2118 force_redraw
= true;
2121 case SDL_WINDOWEVENT_EXPOSED
:
2122 force_redraw
= true;
2132 eom_action
= EomAction::Quit
;
2135 case FF_MOVIE_DONE_EVENT
:
2137 last_time
= seconds::min();
2138 if(eom_action
!= EomAction::Quit
)
2141 while(fileidx
< args
.size() && !movState
)
2143 movState
= std::make_unique
<MovieState
>(args
[fileidx
++]);
2144 if(!movState
->prepare()) movState
= nullptr;
2148 movState
->setTitle(screen
);
2153 /* Nothing more to play. Shut everything down and quit. */
2158 SDL_DestroyRenderer(renderer
);
2160 SDL_DestroyWindow(screen
);
2171 movState
->mVideo
.updateVideo(screen
, renderer
, force_redraw
);
2174 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;
2180 int main(int argc
, char *argv
[])
2183 auto args
= std::vector
<std::string_view
>(static_cast<unsigned int>(argc
));
2184 std::copy_n(argv
, args
.size(), args
.begin());
2185 return main(al::span
{args
});