2 * An example showing how to play a stream sync'd to video, using ffmpeg.
7 #include <condition_variable>
23 #include "libavcodec/avcodec.h"
24 #include "libavformat/avformat.h"
25 #include "libavformat/avio.h"
26 #include "libavutil/time.h"
27 #include "libavutil/pixfmt.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/channel_layout.h"
30 #include "libswscale/swscale.h"
31 #include "libswresample/swresample.h"
41 #ifndef ALC_SOFT_device_clock
42 #define ALC_SOFT_device_clock 1
43 typedef int64_t ALCint64SOFT
;
44 typedef uint64_t ALCuint64SOFT
;
45 #define ALC_DEVICE_CLOCK_SOFT 0x1600
46 #define ALC_DEVICE_LATENCY_SOFT 0x1601
47 #define ALC_DEVICE_CLOCK_LATENCY_SOFT 0x1602
48 #define AL_SAMPLE_OFFSET_CLOCK_SOFT 0x1202
49 #define AL_SEC_OFFSET_CLOCK_SOFT 0x1203
50 typedef void (ALC_APIENTRY
*LPALCGETINTEGER64VSOFT
)(ALCdevice
*device
, ALCenum pname
, ALsizei size
, ALCint64SOFT
*values
);
56 using nanoseconds
= std::chrono::nanoseconds
;
57 using microseconds
= std::chrono::microseconds
;
58 using milliseconds
= std::chrono::milliseconds
;
59 using seconds
= std::chrono::seconds
;
60 using seconds_d64
= std::chrono::duration
<double>;
62 const std::string
AppName("alffplay");
64 bool EnableDirectOut
= false;
65 LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT
;
66 LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT
;
68 const seconds
AVNoSyncThreshold(10);
70 const milliseconds
VideoSyncThreshold(10);
71 #define VIDEO_PICTURE_QUEUE_SIZE 16
73 const seconds_d64
AudioSyncThreshold(0.03);
74 const milliseconds
AudioSampleCorrectionMax(50);
75 /* Averaging filter coefficient for audio sync. */
76 #define AUDIO_DIFF_AVG_NB 20
77 const double AudioAvgFilterCoeff
= std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB
);
78 /* Per-buffer size, in time */
79 const milliseconds
AudioBufferTime(20);
80 /* Buffer total size, in time (should be divisible by the buffer time) */
81 const milliseconds
AudioBufferTotalTime(800);
83 #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
86 FF_UPDATE_EVENT
= SDL_USEREVENT
,
91 enum class SyncMaster
{
100 inline microseconds
get_avtime()
101 { return microseconds(av_gettime()); }
103 /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
104 struct AVIOContextDeleter
{
105 void operator()(AVIOContext
*ptr
) { avio_closep(&ptr
); }
107 using AVIOContextPtr
= std::unique_ptr
<AVIOContext
,AVIOContextDeleter
>;
109 struct AVFormatCtxDeleter
{
110 void operator()(AVFormatContext
*ptr
) { avformat_close_input(&ptr
); }
112 using AVFormatCtxPtr
= std::unique_ptr
<AVFormatContext
,AVFormatCtxDeleter
>;
114 struct AVCodecCtxDeleter
{
115 void operator()(AVCodecContext
*ptr
) { avcodec_free_context(&ptr
); }
117 using AVCodecCtxPtr
= std::unique_ptr
<AVCodecContext
,AVCodecCtxDeleter
>;
119 struct AVFrameDeleter
{
120 void operator()(AVFrame
*ptr
) { av_frame_free(&ptr
); }
122 using AVFramePtr
= std::unique_ptr
<AVFrame
,AVFrameDeleter
>;
124 struct SwrContextDeleter
{
125 void operator()(SwrContext
*ptr
) { swr_free(&ptr
); }
127 using SwrContextPtr
= std::unique_ptr
<SwrContext
,SwrContextDeleter
>;
129 struct SwsContextDeleter
{
130 void operator()(SwsContext
*ptr
) { sws_freeContext(ptr
); }
132 using SwsContextPtr
= std::unique_ptr
<SwsContext
,SwsContextDeleter
>;
136 std::deque
<AVPacket
> mPackets
;
137 size_t mTotalSize
{0};
140 ~PacketQueue() { clear(); }
142 bool empty() const noexcept
{ return mPackets
.empty(); }
143 size_t totalSize() const noexcept
{ return mTotalSize
; }
145 void put(const AVPacket
*pkt
)
147 mPackets
.push_back(AVPacket
{});
148 if(av_packet_ref(&mPackets
.back(), pkt
) != 0)
151 mTotalSize
+= mPackets
.back().size
;
154 AVPacket
*front() noexcept
155 { return &mPackets
.front(); }
159 AVPacket
*pkt
= &mPackets
.front();
160 mTotalSize
-= pkt
->size
;
161 av_packet_unref(pkt
);
162 mPackets
.pop_front();
167 for(AVPacket
&pkt
: mPackets
)
168 av_packet_unref(&pkt
);
180 AVStream
*mStream
{nullptr};
181 AVCodecCtxPtr mCodecCtx
;
183 std::mutex mQueueMtx
;
184 std::condition_variable mQueueCond
;
186 /* Used for clock difference average computation */
187 seconds_d64 mClockDiffAvg
{0};
189 /* Time of the next sample to be buffered */
190 nanoseconds mCurrentPts
{0};
192 /* Device clock time that the stream started at. */
193 nanoseconds mDeviceStartTime
{nanoseconds::min()};
195 /* Decompressed sample frame, and swresample context for conversion */
196 AVFramePtr mDecodedFrame
;
197 SwrContextPtr mSwresCtx
;
199 /* Conversion format, for what gets fed to OpenAL */
200 int mDstChanLayout
{0};
201 AVSampleFormat mDstSampleFmt
{AV_SAMPLE_FMT_NONE
};
203 /* Storage of converted samples */
204 uint8_t *mSamples
{nullptr};
205 int mSamplesLen
{0}; /* In samples */
210 ALenum mFormat
{AL_NONE
};
211 ALsizei mFrameSize
{0};
213 std::mutex mSrcMutex
;
215 std::vector
<ALuint
> mBuffers
;
216 ALsizei mBufferIdx
{0};
218 AudioState(MovieState
&movie
) : mMovie(movie
)
223 alDeleteSources(1, &mSource
);
224 if(!mBuffers
.empty())
225 alDeleteBuffers(mBuffers
.size(), mBuffers
.data());
230 nanoseconds
getClockNoLock();
231 nanoseconds
getClock()
233 std::lock_guard
<std::mutex
> lock(mSrcMutex
);
234 return getClockNoLock();
237 bool isBufferFilled();
238 void startPlayback();
242 int readAudio(uint8_t *samples
, int length
);
250 AVStream
*mStream
{nullptr};
251 AVCodecCtxPtr mCodecCtx
;
253 std::mutex mQueueMtx
;
254 std::condition_variable mQueueCond
;
256 nanoseconds mClock
{0};
257 nanoseconds mFrameTimer
{0};
258 nanoseconds mFrameLastPts
{0};
259 nanoseconds mFrameLastDelay
{0};
260 nanoseconds mCurrentPts
{0};
261 /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
262 microseconds mCurrentPtsTime
{0};
264 /* Decompressed video frame, and swscale context for conversion */
265 AVFramePtr mDecodedFrame
;
266 SwsContextPtr mSwscaleCtx
;
269 SDL_Texture
*mImage
{nullptr};
270 int mWidth
{0}, mHeight
{0}; /* Logical image size (actual size may be larger) */
271 std::atomic
<bool> mUpdated
{false};
277 SDL_DestroyTexture(mImage
);
281 std::array
<Picture
,VIDEO_PICTURE_QUEUE_SIZE
> mPictQ
;
282 size_t mPictQSize
{0}, mPictQRead
{0}, mPictQWrite
{0};
283 std::mutex mPictQMutex
;
284 std::condition_variable mPictQCond
;
285 bool mFirstUpdate
{true};
286 std::atomic
<bool> mEOS
{false};
287 std::atomic
<bool> mFinalUpdate
{false};
289 VideoState(MovieState
&movie
) : mMovie(movie
) { }
291 nanoseconds
getClock();
292 bool isBufferFilled();
294 static Uint32 SDLCALL
sdl_refresh_timer_cb(Uint32 interval
, void *opaque
);
295 void schedRefresh(milliseconds delay
);
296 void display(SDL_Window
*screen
, SDL_Renderer
*renderer
);
297 void refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
);
298 void updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
);
299 int queuePicture(nanoseconds pts
);
304 AVIOContextPtr mIOContext
;
305 AVFormatCtxPtr mFormatCtx
;
307 SyncMaster mAVSyncType
{SyncMaster::Default
};
309 microseconds mClockBase
{0};
310 std::atomic
<bool> mPlaying
{false};
313 std::condition_variable mSendCond
;
314 /* NOTE: false/clear = need data, true/set = no data needed */
315 std::atomic_flag mSendDataGood
;
317 std::atomic
<bool> mQuit
{false};
322 std::thread mParseThread
;
323 std::thread mAudioThread
;
324 std::thread mVideoThread
;
326 std::string mFilename
;
328 MovieState(std::string fname
)
329 : mAudio(*this), mVideo(*this), mFilename(std::move(fname
))
334 if(mParseThread
.joinable())
338 static int decode_interrupt_cb(void *ctx
);
340 void setTitle(SDL_Window
*window
);
342 nanoseconds
getClock();
344 nanoseconds
getMasterClock();
346 nanoseconds
getDuration();
348 int streamComponentOpen(int stream_index
);
353 nanoseconds
AudioState::getClockNoLock()
355 // The audio clock is the timestamp of the sample currently being heard.
356 if(alcGetInteger64vSOFT
)
358 // If device start time = min, we aren't playing yet.
359 if(mDeviceStartTime
== nanoseconds::min())
360 return nanoseconds::zero();
362 // Get the current device clock time and latency.
363 auto device
= alcGetContextsDevice(alcGetCurrentContext());
364 ALCint64SOFT devtimes
[2] = {0,0};
365 alcGetInteger64vSOFT(device
, ALC_DEVICE_CLOCK_LATENCY_SOFT
, 2, devtimes
);
366 auto latency
= nanoseconds(devtimes
[1]);
367 auto device_time
= nanoseconds(devtimes
[0]);
369 // The clock is simply the current device time relative to the recorded
370 // start time. We can also subtract the latency to get more a accurate
371 // position of where the audio device actually is in the output stream.
372 return device_time
- mDeviceStartTime
- latency
;
375 /* The source-based clock is based on 4 components:
376 * 1 - The timestamp of the next sample to buffer (mCurrentPts)
377 * 2 - The length of the source's buffer queue
378 * (AudioBufferTime*AL_BUFFERS_QUEUED)
379 * 3 - The offset OpenAL is currently at in the source (the first value
380 * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
381 * 4 - The latency between OpenAL and the DAC (the second value from
382 * AL_SAMPLE_OFFSET_LATENCY_SOFT)
384 * Subtracting the length of the source queue from the next sample's
385 * timestamp gives the timestamp of the sample at the start of the source
386 * queue. Adding the source offset to that results in the timestamp for the
387 * sample at OpenAL's current position, and subtracting the source latency
388 * from that gives the timestamp of the sample currently at the DAC.
390 nanoseconds pts
= mCurrentPts
;
393 ALint64SOFT offset
[2];
397 /* NOTE: The source state must be checked last, in case an underrun
398 * occurs and the source stops between retrieving the offset+latency
399 * and getting the state. */
400 if(alGetSourcei64vSOFT
)
401 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_LATENCY_SOFT
, offset
);
405 alGetSourcei(mSource
, AL_SAMPLE_OFFSET
, &ioffset
);
406 offset
[0] = (ALint64SOFT
)ioffset
<< 32;
409 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
410 alGetSourcei(mSource
, AL_SOURCE_STATE
, &status
);
412 /* If the source is AL_STOPPED, then there was an underrun and all
413 * buffers are processed, so ignore the source queue. The audio thread
414 * will put the source into an AL_INITIAL state and clear the queue
415 * when it starts recovery. */
416 if(status
!= AL_STOPPED
)
418 using fixed32
= std::chrono::duration
<int64_t,std::ratio
<1,(1ll<<32)>>;
420 pts
-= AudioBufferTime
*queued
;
421 pts
+= std::chrono::duration_cast
<nanoseconds
>(
422 fixed32(offset
[0] / mCodecCtx
->sample_rate
)
425 /* Don't offset by the latency if the source isn't playing. */
426 if(status
== AL_PLAYING
)
427 pts
-= nanoseconds(offset
[1]);
430 return std::max(pts
, nanoseconds::zero());
433 bool AudioState::isBufferFilled()
435 /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
436 * does the source gen. So when we're able to grab the lock and the source
437 * is valid, the queue must be full.
439 std::lock_guard
<std::mutex
> lock(mSrcMutex
);
443 void AudioState::startPlayback()
445 alSourcePlay(mSource
);
446 if(alcGetInteger64vSOFT
)
448 using fixed32
= std::chrono::duration
<int64_t,std::ratio
<1,(1ll<<32)>>;
450 // Subtract the total buffer queue time from the current pts to get the
451 // pts of the start of the queue.
452 nanoseconds startpts
= mCurrentPts
- AudioBufferTotalTime
;
453 int64_t srctimes
[2]={0,0};
454 alGetSourcei64vSOFT(mSource
, AL_SAMPLE_OFFSET_CLOCK_SOFT
, srctimes
);
455 auto device_time
= nanoseconds(srctimes
[1]);
456 auto src_offset
= std::chrono::duration_cast
<nanoseconds
>(fixed32(srctimes
[0])) /
457 mCodecCtx
->sample_rate
;
459 // The mixer may have ticked and incremented the device time and sample
460 // offset, so subtract the source offset from the device time to get
461 // the device time the source started at. Also subtract startpts to get
462 // the device time the stream would have started at to reach where it
464 mDeviceStartTime
= device_time
- src_offset
- startpts
;
468 int AudioState::getSync()
470 if(mMovie
.mAVSyncType
== SyncMaster::Audio
)
473 auto ref_clock
= mMovie
.getMasterClock();
474 auto diff
= ref_clock
- getClockNoLock();
476 if(!(diff
< AVNoSyncThreshold
&& diff
> -AVNoSyncThreshold
))
478 /* Difference is TOO big; reset accumulated average */
479 mClockDiffAvg
= seconds_d64::zero();
483 /* Accumulate the diffs */
484 mClockDiffAvg
= mClockDiffAvg
*AudioAvgFilterCoeff
+ diff
;
485 auto avg_diff
= mClockDiffAvg
*(1.0 - AudioAvgFilterCoeff
);
486 if(avg_diff
< AudioSyncThreshold
/2.0 && avg_diff
> -AudioSyncThreshold
)
489 /* Constrain the per-update difference to avoid exceedingly large skips */
490 diff
= std::min
<nanoseconds
>(std::max
<nanoseconds
>(diff
, -AudioSampleCorrectionMax
),
491 AudioSampleCorrectionMax
);
492 return (int)std::chrono::duration_cast
<seconds
>(diff
*mCodecCtx
->sample_rate
).count();
495 int AudioState::decodeFrame()
497 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
499 std::unique_lock
<std::mutex
> lock(mQueueMtx
);
500 int ret
= avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get());
501 if(ret
== AVERROR(EAGAIN
))
503 mMovie
.mSendDataGood
.clear(std::memory_order_relaxed
);
504 std::unique_lock
<std::mutex
>(mMovie
.mSendMtx
).unlock();
505 mMovie
.mSendCond
.notify_one();
507 mQueueCond
.wait(lock
);
508 ret
= avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get());
509 } while(ret
== AVERROR(EAGAIN
));
512 if(ret
== AVERROR_EOF
) break;
513 mMovie
.mSendDataGood
.clear(std::memory_order_relaxed
);
514 mMovie
.mSendCond
.notify_one();
517 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
521 if(mDecodedFrame
->nb_samples
<= 0)
523 av_frame_unref(mDecodedFrame
.get());
527 /* If provided, update w/ pts */
528 if(mDecodedFrame
->best_effort_timestamp
!= AV_NOPTS_VALUE
)
529 mCurrentPts
= std::chrono::duration_cast
<nanoseconds
>(
530 seconds_d64(av_q2d(mStream
->time_base
)*mDecodedFrame
->best_effort_timestamp
)
533 if(mDecodedFrame
->nb_samples
> mSamplesMax
)
537 &mSamples
, nullptr, mCodecCtx
->channels
,
538 mDecodedFrame
->nb_samples
, mDstSampleFmt
, 0
540 mSamplesMax
= mDecodedFrame
->nb_samples
;
542 /* Return the amount of sample frames converted */
543 int data_size
= swr_convert(mSwresCtx
.get(), &mSamples
, mDecodedFrame
->nb_samples
,
544 (const uint8_t**)mDecodedFrame
->data
, mDecodedFrame
->nb_samples
547 av_frame_unref(mDecodedFrame
.get());
554 /* Duplicates the sample at in to out, count times. The frame size is a
555 * multiple of the template type size.
558 static void sample_dup(uint8_t *out
, const uint8_t *in
, int count
, int frame_size
)
560 const T
*sample
= reinterpret_cast<const T
*>(in
);
561 T
*dst
= reinterpret_cast<T
*>(out
);
562 if(frame_size
== sizeof(T
))
563 std::fill_n(dst
, count
, *sample
);
566 /* NOTE: frame_size is a multiple of sizeof(T). */
567 int type_mult
= frame_size
/ sizeof(T
);
569 std::generate_n(dst
, count
*type_mult
,
570 [sample
,type_mult
,&i
]() -> T
581 int AudioState::readAudio(uint8_t *samples
, int length
)
583 int sample_skip
= getSync();
586 /* Read the next chunk of data, refill the buffer, and queue it
588 length
/= mFrameSize
;
589 while(audio_size
< length
)
591 if(mSamplesLen
<= 0 || mSamplesPos
>= mSamplesLen
)
593 int frame_len
= decodeFrame();
594 if(frame_len
<= 0) break;
596 mSamplesLen
= frame_len
;
597 mSamplesPos
= std::min(mSamplesLen
, sample_skip
);
598 sample_skip
-= mSamplesPos
;
600 // Adjust the device start time and current pts by the amount we're
601 // skipping/duplicating, so that the clock remains correct for the
602 // current stream position.
603 auto skip
= nanoseconds(seconds(mSamplesPos
)) / mCodecCtx
->sample_rate
;
604 mDeviceStartTime
-= skip
;
609 int rem
= length
- audio_size
;
612 int len
= mSamplesLen
- mSamplesPos
;
613 if(rem
> len
) rem
= len
;
614 memcpy(samples
, mSamples
+ mSamplesPos
*mFrameSize
, rem
*mFrameSize
);
618 rem
= std::min(rem
, -mSamplesPos
);
620 /* Add samples by copying the first sample */
621 if((mFrameSize
&7) == 0)
622 sample_dup
<uint64_t>(samples
, mSamples
, rem
, mFrameSize
);
623 else if((mFrameSize
&3) == 0)
624 sample_dup
<uint32_t>(samples
, mSamples
, rem
, mFrameSize
);
625 else if((mFrameSize
&1) == 0)
626 sample_dup
<uint16_t>(samples
, mSamples
, rem
, mFrameSize
);
628 sample_dup
<uint8_t>(samples
, mSamples
, rem
, mFrameSize
);
632 mCurrentPts
+= nanoseconds(seconds(rem
)) / mCodecCtx
->sample_rate
;
633 samples
+= rem
*mFrameSize
;
637 if(audio_size
< length
&& audio_size
> 0)
639 int rem
= length
- audio_size
;
640 std::fill_n(samples
, rem
*mFrameSize
,
641 (mDstSampleFmt
== AV_SAMPLE_FMT_U8
) ? 0x80 : 0x00);
642 mCurrentPts
+= nanoseconds(seconds(rem
)) / mCodecCtx
->sample_rate
;
646 return audio_size
* mFrameSize
;
650 int AudioState::handler()
652 std::unique_lock
<std::mutex
> lock(mSrcMutex
);
655 /* Find a suitable format for OpenAL. */
657 if(mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_U8P
)
659 mDstSampleFmt
= AV_SAMPLE_FMT_U8
;
661 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
662 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
663 (fmt
=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE
&& fmt
!= -1)
665 mDstChanLayout
= mCodecCtx
->channel_layout
;
669 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
670 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
671 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
672 (fmt
=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE
&& fmt
!= -1)
674 mDstChanLayout
= mCodecCtx
->channel_layout
;
678 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
680 mDstChanLayout
= mCodecCtx
->channel_layout
;
682 mFormat
= AL_FORMAT_MONO8
;
686 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
688 mFormat
= AL_FORMAT_STEREO8
;
691 if((mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLT
|| mCodecCtx
->sample_fmt
== AV_SAMPLE_FMT_FLTP
) &&
692 alIsExtensionPresent("AL_EXT_FLOAT32"))
694 mDstSampleFmt
= AV_SAMPLE_FMT_FLT
;
696 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
697 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
698 (fmt
=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE
&& fmt
!= -1)
700 mDstChanLayout
= mCodecCtx
->channel_layout
;
704 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
705 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
706 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
707 (fmt
=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE
&& fmt
!= -1)
709 mDstChanLayout
= mCodecCtx
->channel_layout
;
713 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
715 mDstChanLayout
= mCodecCtx
->channel_layout
;
717 mFormat
= AL_FORMAT_MONO_FLOAT32
;
721 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
723 mFormat
= AL_FORMAT_STEREO_FLOAT32
;
728 mDstSampleFmt
= AV_SAMPLE_FMT_S16
;
730 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_7POINT1
&&
731 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
732 (fmt
=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE
&& fmt
!= -1)
734 mDstChanLayout
= mCodecCtx
->channel_layout
;
738 if((mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1
||
739 mCodecCtx
->channel_layout
== AV_CH_LAYOUT_5POINT1_BACK
) &&
740 alIsExtensionPresent("AL_EXT_MCFORMATS") &&
741 (fmt
=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE
&& fmt
!= -1)
743 mDstChanLayout
= mCodecCtx
->channel_layout
;
747 if(mCodecCtx
->channel_layout
== AV_CH_LAYOUT_MONO
)
749 mDstChanLayout
= mCodecCtx
->channel_layout
;
751 mFormat
= AL_FORMAT_MONO16
;
755 mDstChanLayout
= AV_CH_LAYOUT_STEREO
;
757 mFormat
= AL_FORMAT_STEREO16
;
760 ALsizei buffer_len
= std::chrono::duration_cast
<std::chrono::duration
<int>>(
761 mCodecCtx
->sample_rate
* AudioBufferTime
).count() * mFrameSize
;
762 void *samples
= av_malloc(buffer_len
);
769 mDecodedFrame
.reset(av_frame_alloc());
772 std::cerr
<< "Failed to allocate audio frame" <<std::endl
;
776 mSwresCtx
.reset(swr_alloc_set_opts(nullptr,
777 mDstChanLayout
, mDstSampleFmt
, mCodecCtx
->sample_rate
,
778 mCodecCtx
->channel_layout
? mCodecCtx
->channel_layout
:
779 (uint64_t)av_get_default_channel_layout(mCodecCtx
->channels
),
780 mCodecCtx
->sample_fmt
, mCodecCtx
->sample_rate
,
783 if(!mSwresCtx
|| swr_init(mSwresCtx
.get()) != 0)
785 std::cerr
<< "Failed to initialize audio converter" <<std::endl
;
789 mBuffers
.assign(AudioBufferTotalTime
/ AudioBufferTime
, 0);
790 alGenBuffers(mBuffers
.size(), mBuffers
.data());
791 alGenSources(1, &mSource
);
794 alSourcei(mSource
, AL_DIRECT_CHANNELS_SOFT
, AL_TRUE
);
796 while(alGetError() == AL_NO_ERROR
&& !mMovie
.mQuit
.load(std::memory_order_relaxed
))
798 /* First remove any processed buffers. */
800 alGetSourcei(mSource
, AL_BUFFERS_PROCESSED
, &processed
);
803 std::array
<ALuint
,4> bids
;
804 alSourceUnqueueBuffers(mSource
, std::min
<ALsizei
>(bids
.size(), processed
),
806 processed
-= std::min
<ALsizei
>(bids
.size(), processed
);
809 /* Refill the buffer queue. */
811 alGetSourcei(mSource
, AL_BUFFERS_QUEUED
, &queued
);
812 while((ALuint
)queued
< mBuffers
.size())
816 /* Read the next chunk of data, fill the buffer, and queue it on
818 audio_size
= readAudio(reinterpret_cast<uint8_t*>(samples
), buffer_len
);
819 if(audio_size
<= 0) break;
821 ALuint bufid
= mBuffers
[mBufferIdx
++];
822 mBufferIdx
%= mBuffers
.size();
824 alBufferData(bufid
, mFormat
, samples
, audio_size
, mCodecCtx
->sample_rate
);
825 alSourceQueueBuffers(mSource
, 1, &bufid
);
831 /* Check that the source is playing. */
833 alGetSourcei(mSource
, AL_SOURCE_STATE
, &state
);
834 if(state
== AL_STOPPED
)
836 /* AL_STOPPED means there was an underrun. Clear the buffer queue
837 * since this likely means we're late, and rewind the source to get
838 * it back into an AL_INITIAL state.
840 alSourceRewind(mSource
);
841 alSourcei(mSource
, AL_BUFFER
, 0);
845 /* (re)start the source if needed, and wait for a buffer to finish */
846 if(state
!= AL_PLAYING
&& state
!= AL_PAUSED
&&
847 mMovie
.mPlaying
.load(std::memory_order_relaxed
))
851 SDL_Delay((AudioBufferTime
/3).count());
855 alSourceRewind(mSource
);
856 alSourcei(mSource
, AL_BUFFER
, 0);
865 nanoseconds
VideoState::getClock()
867 /* NOTE: This returns incorrect times while not playing. */
868 auto delta
= get_avtime() - mCurrentPtsTime
;
869 return mCurrentPts
+ delta
;
872 bool VideoState::isBufferFilled()
874 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
875 return mPictQSize
>= mPictQ
.size();
878 Uint32 SDLCALL
VideoState::sdl_refresh_timer_cb(Uint32
/*interval*/, void *opaque
)
881 evt
.user
.type
= FF_REFRESH_EVENT
;
882 evt
.user
.data1
= opaque
;
884 return 0; /* 0 means stop timer */
887 /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
888 void VideoState::schedRefresh(milliseconds delay
)
890 SDL_AddTimer(delay
.count(), sdl_refresh_timer_cb
, this);
893 /* Called by VideoState::refreshTimer to display the next video frame. */
894 void VideoState::display(SDL_Window
*screen
, SDL_Renderer
*renderer
)
896 Picture
*vp
= &mPictQ
[mPictQRead
];
905 if(mCodecCtx
->sample_aspect_ratio
.num
== 0)
909 aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
) * mCodecCtx
->width
/
912 if(aspect_ratio
<= 0.0f
)
913 aspect_ratio
= (float)mCodecCtx
->width
/ (float)mCodecCtx
->height
;
915 SDL_GetWindowSize(screen
, &win_w
, &win_h
);
917 w
= ((int)rint(h
* aspect_ratio
) + 3) & ~3;
921 h
= ((int)rint(w
/ aspect_ratio
) + 3) & ~3;
926 SDL_Rect src_rect
{ 0, 0, vp
->mWidth
, vp
->mHeight
};
927 SDL_Rect dst_rect
{ x
, y
, w
, h
};
928 SDL_RenderCopy(renderer
, vp
->mImage
, &src_rect
, &dst_rect
);
929 SDL_RenderPresent(renderer
);
932 /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
933 * was created. It handles the display of the next decoded video frame (if not
934 * falling behind), and sets up the timer for the following video frame.
936 void VideoState::refreshTimer(SDL_Window
*screen
, SDL_Renderer
*renderer
)
943 std::unique_lock
<std::mutex
>(mPictQMutex
).unlock();
944 mPictQCond
.notify_all();
947 schedRefresh(milliseconds(100));
950 if(!mMovie
.mPlaying
.load(std::memory_order_relaxed
))
952 schedRefresh(milliseconds(1));
956 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
963 schedRefresh(milliseconds(1));
965 mPictQCond
.notify_all();
969 Picture
*vp
= &mPictQ
[mPictQRead
];
970 mCurrentPts
= vp
->mPts
;
971 mCurrentPtsTime
= get_avtime();
973 /* Get delay using the frame pts and the pts from last frame. */
974 auto delay
= vp
->mPts
- mFrameLastPts
;
975 if(delay
<= seconds::zero() || delay
>= seconds(1))
977 /* If incorrect delay, use previous one. */
978 delay
= mFrameLastDelay
;
980 /* Save for next frame. */
981 mFrameLastDelay
= delay
;
982 mFrameLastPts
= vp
->mPts
;
984 /* Update delay to sync to clock if not master source. */
985 if(mMovie
.mAVSyncType
!= SyncMaster::Video
)
987 auto ref_clock
= mMovie
.getMasterClock();
988 auto diff
= vp
->mPts
- ref_clock
;
990 /* Skip or repeat the frame. Take delay into account. */
991 auto sync_threshold
= std::min
<nanoseconds
>(delay
, VideoSyncThreshold
);
992 if(!(diff
< AVNoSyncThreshold
&& diff
> -AVNoSyncThreshold
))
994 if(diff
<= -sync_threshold
)
995 delay
= nanoseconds::zero();
996 else if(diff
>= sync_threshold
)
1001 mFrameTimer
+= delay
;
1002 /* Compute the REAL delay. */
1003 auto actual_delay
= mFrameTimer
- get_avtime();
1004 if(!(actual_delay
>= VideoSyncThreshold
))
1006 /* We don't have time to handle this picture, just skip to the next one. */
1007 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
1011 schedRefresh(std::chrono::duration_cast
<milliseconds
>(actual_delay
));
1013 /* Show the picture! */
1014 display(screen
, renderer
);
1016 /* Update queue for next picture. */
1017 mPictQRead
= (mPictQRead
+1)%mPictQ
.size();
1020 mPictQCond
.notify_all();
1023 /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
1024 * main thread where the renderer was created.
1026 void VideoState::updatePicture(SDL_Window
*screen
, SDL_Renderer
*renderer
)
1028 Picture
*vp
= &mPictQ
[mPictQWrite
];
1029 bool fmt_updated
= false;
1031 /* allocate or resize the buffer! */
1032 if(!vp
->mImage
|| vp
->mWidth
!= mCodecCtx
->width
|| vp
->mHeight
!= mCodecCtx
->height
)
1036 SDL_DestroyTexture(vp
->mImage
);
1037 vp
->mImage
= SDL_CreateTexture(
1038 renderer
, SDL_PIXELFORMAT_IYUV
, SDL_TEXTUREACCESS_STREAMING
,
1039 mCodecCtx
->coded_width
, mCodecCtx
->coded_height
1042 std::cerr
<< "Failed to create YV12 texture!" <<std::endl
;
1043 vp
->mWidth
= mCodecCtx
->width
;
1044 vp
->mHeight
= mCodecCtx
->height
;
1046 if(mFirstUpdate
&& vp
->mWidth
> 0 && vp
->mHeight
> 0)
1048 /* For the first update, set the window size to the video size. */
1049 mFirstUpdate
= false;
1052 int h
= vp
->mHeight
;
1053 if(mCodecCtx
->sample_aspect_ratio
.den
!= 0)
1055 double aspect_ratio
= av_q2d(mCodecCtx
->sample_aspect_ratio
);
1056 if(aspect_ratio
>= 1.0)
1057 w
= (int)(w
*aspect_ratio
+ 0.5);
1058 else if(aspect_ratio
> 0.0)
1059 h
= (int)(h
/aspect_ratio
+ 0.5);
1061 SDL_SetWindowSize(screen
, w
, h
);
1067 AVFrame
*frame
= mDecodedFrame
.get();
1068 void *pixels
= nullptr;
1071 if(mCodecCtx
->pix_fmt
== AV_PIX_FMT_YUV420P
)
1072 SDL_UpdateYUVTexture(vp
->mImage
, nullptr,
1073 frame
->data
[0], frame
->linesize
[0],
1074 frame
->data
[1], frame
->linesize
[1],
1075 frame
->data
[2], frame
->linesize
[2]
1077 else if(SDL_LockTexture(vp
->mImage
, nullptr, &pixels
, &pitch
) != 0)
1078 std::cerr
<< "Failed to lock texture" <<std::endl
;
1081 // Convert the image into YUV format that SDL uses
1082 int coded_w
= mCodecCtx
->coded_width
;
1083 int coded_h
= mCodecCtx
->coded_height
;
1084 int w
= mCodecCtx
->width
;
1085 int h
= mCodecCtx
->height
;
1086 if(!mSwscaleCtx
|| fmt_updated
)
1088 mSwscaleCtx
.reset(sws_getContext(
1089 w
, h
, mCodecCtx
->pix_fmt
,
1090 w
, h
, AV_PIX_FMT_YUV420P
, 0,
1091 nullptr, nullptr, nullptr
1095 /* point pict at the queue */
1096 uint8_t *pict_data
[3];
1097 pict_data
[0] = reinterpret_cast<uint8_t*>(pixels
);
1098 pict_data
[1] = pict_data
[0] + coded_w
*coded_h
;
1099 pict_data
[2] = pict_data
[1] + coded_w
*coded_h
/4;
1101 int pict_linesize
[3];
1102 pict_linesize
[0] = pitch
;
1103 pict_linesize
[1] = pitch
/ 2;
1104 pict_linesize
[2] = pitch
/ 2;
1106 sws_scale(mSwscaleCtx
.get(), (const uint8_t**)frame
->data
,
1107 frame
->linesize
, 0, h
, pict_data
, pict_linesize
);
1108 SDL_UnlockTexture(vp
->mImage
);
1112 vp
->mUpdated
.store(true, std::memory_order_release
);
1113 std::unique_lock
<std::mutex
>(mPictQMutex
).unlock();
1114 mPictQCond
.notify_one();
1117 int VideoState::queuePicture(nanoseconds pts
)
1119 /* Wait until we have space for a new pic */
1120 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1121 while(mPictQSize
>= mPictQ
.size() && !mMovie
.mQuit
.load(std::memory_order_relaxed
))
1122 mPictQCond
.wait(lock
);
1125 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1128 Picture
*vp
= &mPictQ
[mPictQWrite
];
1130 /* We have to create/update the picture in the main thread */
1131 vp
->mUpdated
.store(false, std::memory_order_relaxed
);
1133 evt
.user
.type
= FF_UPDATE_EVENT
;
1134 evt
.user
.data1
= this;
1135 SDL_PushEvent(&evt
);
1137 /* Wait until the picture is updated. */
1139 while(!vp
->mUpdated
.load(std::memory_order_relaxed
))
1141 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1143 mPictQCond
.wait(lock
);
1145 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1149 mPictQWrite
= (mPictQWrite
+1)%mPictQ
.size();
1156 int VideoState::handler()
1158 mDecodedFrame
.reset(av_frame_alloc());
1159 while(!mMovie
.mQuit
.load(std::memory_order_relaxed
))
1161 std::unique_lock
<std::mutex
> lock(mQueueMtx
);
1162 /* Decode video frame */
1163 int ret
= avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get());
1164 if(ret
== AVERROR(EAGAIN
))
1166 mMovie
.mSendDataGood
.clear(std::memory_order_relaxed
);
1167 std::unique_lock
<std::mutex
>(mMovie
.mSendMtx
).unlock();
1168 mMovie
.mSendCond
.notify_one();
1170 mQueueCond
.wait(lock
);
1171 ret
= avcodec_receive_frame(mCodecCtx
.get(), mDecodedFrame
.get());
1172 } while(ret
== AVERROR(EAGAIN
));
1175 if(ret
== AVERROR_EOF
) break;
1176 mMovie
.mSendDataGood
.clear(std::memory_order_relaxed
);
1177 mMovie
.mSendCond
.notify_one();
1180 std::cerr
<< "Failed to decode frame: "<<ret
<<std::endl
;
1184 /* Get the PTS for this frame. */
1186 if(mDecodedFrame
->best_effort_timestamp
!= AV_NOPTS_VALUE
)
1187 mClock
= std::chrono::duration_cast
<nanoseconds
>(
1188 seconds_d64(av_q2d(mStream
->time_base
)*mDecodedFrame
->best_effort_timestamp
)
1192 /* Update the video clock to the next expected PTS. */
1193 auto frame_delay
= av_q2d(mCodecCtx
->time_base
);
1194 frame_delay
+= mDecodedFrame
->repeat_pict
* (frame_delay
* 0.5);
1195 mClock
+= std::chrono::duration_cast
<nanoseconds
>(seconds_d64(frame_delay
));
1197 if(queuePicture(pts
) < 0)
1199 av_frame_unref(mDecodedFrame
.get());
1203 std::unique_lock
<std::mutex
> lock(mPictQMutex
);
1204 if(mMovie
.mQuit
.load(std::memory_order_relaxed
))
1210 while(!mFinalUpdate
)
1211 mPictQCond
.wait(lock
);
1217 int MovieState::decode_interrupt_cb(void *ctx
)
1219 return reinterpret_cast<MovieState
*>(ctx
)->mQuit
.load(std::memory_order_relaxed
);
1222 bool MovieState::prepare()
1224 AVIOContext
*avioctx
= nullptr;
1225 AVIOInterruptCB intcb
= { decode_interrupt_cb
, this };
1226 if(avio_open2(&avioctx
, mFilename
.c_str(), AVIO_FLAG_READ
, &intcb
, nullptr))
1228 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1231 mIOContext
.reset(avioctx
);
1233 /* Open movie file. If avformat_open_input fails it will automatically free
1234 * this context, so don't set it onto a smart pointer yet.
1236 AVFormatContext
*fmtctx
= avformat_alloc_context();
1237 fmtctx
->pb
= mIOContext
.get();
1238 fmtctx
->interrupt_callback
= intcb
;
1239 if(avformat_open_input(&fmtctx
, mFilename
.c_str(), nullptr, nullptr) != 0)
1241 std::cerr
<< "Failed to open "<<mFilename
<<std::endl
;
1244 mFormatCtx
.reset(fmtctx
);
1246 /* Retrieve stream information */
1247 if(avformat_find_stream_info(mFormatCtx
.get(), nullptr) < 0)
1249 std::cerr
<< mFilename
<<": failed to find stream info" <<std::endl
;
1253 mVideo
.schedRefresh(milliseconds(40));
1255 mParseThread
= std::thread(std::mem_fn(&MovieState::parse_handler
), this);
1259 void MovieState::setTitle(SDL_Window
*window
)
1261 auto pos1
= mFilename
.rfind('/');
1262 auto pos2
= mFilename
.rfind('\\');
1263 auto fpos
= ((pos1
== std::string::npos
) ? pos2
:
1264 (pos2
== std::string::npos
) ? pos1
:
1265 std::max(pos1
, pos2
)) + 1;
1266 SDL_SetWindowTitle(window
, (mFilename
.substr(fpos
)+" - "+AppName
).c_str());
1269 nanoseconds
MovieState::getClock()
1271 if(!mPlaying
.load(std::memory_order_relaxed
))
1272 return nanoseconds::zero();
1273 return get_avtime() - mClockBase
;
1276 nanoseconds
MovieState::getMasterClock()
1278 if(mAVSyncType
== SyncMaster::Video
)
1279 return mVideo
.getClock();
1280 if(mAVSyncType
== SyncMaster::Audio
)
1281 return mAudio
.getClock();
1285 nanoseconds
MovieState::getDuration()
1286 { return std::chrono::duration
<int64_t,std::ratio
<1,AV_TIME_BASE
>>(mFormatCtx
->duration
); }
1288 int MovieState::streamComponentOpen(int stream_index
)
1290 if(stream_index
< 0 || (unsigned int)stream_index
>= mFormatCtx
->nb_streams
)
1293 /* Get a pointer to the codec context for the stream, and open the
1296 AVCodecCtxPtr
avctx(avcodec_alloc_context3(nullptr));
1297 if(!avctx
) return -1;
1299 if(avcodec_parameters_to_context(avctx
.get(), mFormatCtx
->streams
[stream_index
]->codecpar
))
1302 AVCodec
*codec
= avcodec_find_decoder(avctx
->codec_id
);
1303 if(!codec
|| avcodec_open2(avctx
.get(), codec
, nullptr) < 0)
1305 std::cerr
<< "Unsupported codec: "<<avcodec_get_name(avctx
->codec_id
)
1306 << " (0x"<<std::hex
<<avctx
->codec_id
<<std::dec
<<")" <<std::endl
;
1310 /* Initialize and start the media type handler */
1311 switch(avctx
->codec_type
)
1313 case AVMEDIA_TYPE_AUDIO
:
1314 mAudio
.mStream
= mFormatCtx
->streams
[stream_index
];
1315 mAudio
.mCodecCtx
= std::move(avctx
);
1317 mAudioThread
= std::thread(std::mem_fn(&AudioState::handler
), &mAudio
);
1320 case AVMEDIA_TYPE_VIDEO
:
1321 mVideo
.mStream
= mFormatCtx
->streams
[stream_index
];
1322 mVideo
.mCodecCtx
= std::move(avctx
);
1324 mVideoThread
= std::thread(std::mem_fn(&VideoState::handler
), &mVideo
);
1331 return stream_index
;
1334 int MovieState::parse_handler()
1336 int video_index
= -1;
1337 int audio_index
= -1;
1339 /* Dump information about file onto standard error */
1340 av_dump_format(mFormatCtx
.get(), 0, mFilename
.c_str(), 0);
1342 /* Find the first video and audio streams */
1343 for(unsigned int i
= 0;i
< mFormatCtx
->nb_streams
;i
++)
1345 auto codecpar
= mFormatCtx
->streams
[i
]->codecpar
;
1346 if(codecpar
->codec_type
== AVMEDIA_TYPE_VIDEO
&& video_index
< 0)
1347 video_index
= streamComponentOpen(i
);
1348 else if(codecpar
->codec_type
== AVMEDIA_TYPE_AUDIO
&& audio_index
< 0)
1349 audio_index
= streamComponentOpen(i
);
1352 if(video_index
< 0 && audio_index
< 0)
1354 std::cerr
<< mFilename
<<": could not open codecs" <<std::endl
;
1358 PacketQueue audio_queue
, video_queue
;
1359 bool input_finished
= false;
1361 /* Main packet reading/dispatching loop */
1362 while(!mQuit
.load(std::memory_order_relaxed
) && !input_finished
)
1365 if(av_read_frame(mFormatCtx
.get(), &packet
) < 0)
1366 input_finished
= true;
1369 /* Copy the packet into the queue it's meant for. */
1370 if(packet
.stream_index
== video_index
)
1371 video_queue
.put(&packet
);
1372 else if(packet
.stream_index
== audio_index
)
1373 audio_queue
.put(&packet
);
1374 av_packet_unref(&packet
);
1378 /* Send whatever queued packets we have. */
1379 if(!audio_queue
.empty())
1381 std::unique_lock
<std::mutex
> lock(mAudio
.mQueueMtx
);
1384 ret
= avcodec_send_packet(mAudio
.mCodecCtx
.get(), audio_queue
.front());
1385 if(ret
!= AVERROR(EAGAIN
)) audio_queue
.pop();
1386 } while(ret
!= AVERROR(EAGAIN
) && !audio_queue
.empty());
1388 mAudio
.mQueueCond
.notify_one();
1390 if(!video_queue
.empty())
1392 std::unique_lock
<std::mutex
> lock(mVideo
.mQueueMtx
);
1395 ret
= avcodec_send_packet(mVideo
.mCodecCtx
.get(), video_queue
.front());
1396 if(ret
!= AVERROR(EAGAIN
)) video_queue
.pop();
1397 } while(ret
!= AVERROR(EAGAIN
) && !video_queue
.empty());
1399 mVideo
.mQueueCond
.notify_one();
1401 /* If the queues are completely empty, or it's not full and there's
1402 * more input to read, go get more.
1404 size_t queue_size
= audio_queue
.totalSize() + video_queue
.totalSize();
1405 if(queue_size
== 0 || (queue_size
< MAX_QUEUE_SIZE
&& !input_finished
))
1408 if(!mPlaying
.load(std::memory_order_relaxed
))
1410 if((!mAudio
.mCodecCtx
|| mAudio
.isBufferFilled()) &&
1411 (!mVideo
.mCodecCtx
|| mVideo
.isBufferFilled()))
1413 /* Set the base time 50ms ahead of the current av time. */
1414 mClockBase
= get_avtime() + milliseconds(50);
1415 mVideo
.mCurrentPtsTime
= mClockBase
;
1416 mVideo
.mFrameTimer
= mVideo
.mCurrentPtsTime
;
1417 mAudio
.startPlayback();
1418 mPlaying
.store(std::memory_order_release
);
1421 /* Nothing to send or get for now, wait a bit and try again. */
1422 { std::unique_lock
<std::mutex
> lock(mSendMtx
);
1423 if(mSendDataGood
.test_and_set(std::memory_order_relaxed
))
1424 mSendCond
.wait_for(lock
, milliseconds(10));
1426 } while(!mQuit
.load(std::memory_order_relaxed
));
1428 /* Pass a null packet to finish the send buffers (the receive functions
1429 * will get AVERROR_EOF when emptied).
1431 if(mVideo
.mCodecCtx
)
1433 { std::lock_guard
<std::mutex
> lock(mVideo
.mQueueMtx
);
1434 avcodec_send_packet(mVideo
.mCodecCtx
.get(), nullptr);
1436 mVideo
.mQueueCond
.notify_one();
1438 if(mAudio
.mCodecCtx
)
1440 { std::lock_guard
<std::mutex
> lock(mAudio
.mQueueMtx
);
1441 avcodec_send_packet(mAudio
.mCodecCtx
.get(), nullptr);
1443 mAudio
.mQueueCond
.notify_one();
1445 video_queue
.clear();
1446 audio_queue
.clear();
1448 /* all done - wait for it */
1449 if(mVideoThread
.joinable())
1450 mVideoThread
.join();
1451 if(mAudioThread
.joinable())
1452 mAudioThread
.join();
1455 std::unique_lock
<std::mutex
> lock(mVideo
.mPictQMutex
);
1456 while(!mVideo
.mFinalUpdate
)
1457 mVideo
.mPictQCond
.wait(lock
);
1461 evt
.user
.type
= FF_MOVIE_DONE_EVENT
;
1462 SDL_PushEvent(&evt
);
1468 // Helper class+method to print the time with human-readable formatting.
1472 inline std::ostream
&operator<<(std::ostream
&os
, const PrettyTime
&rhs
)
1474 using hours
= std::chrono::hours
;
1475 using minutes
= std::chrono::minutes
;
1476 using std::chrono::duration_cast
;
1478 seconds t
= rhs
.mTime
;
1485 // Only handle up to hour formatting
1487 os
<< duration_cast
<hours
>(t
).count() << 'h' << std::setfill('0') << std::setw(2)
1488 << (duration_cast
<minutes
>(t
).count() % 60) << 'm';
1490 os
<< duration_cast
<minutes
>(t
).count() << 'm' << std::setfill('0');
1491 os
<< std::setw(2) << (duration_cast
<seconds
>(t
).count() % 60) << 's' << std::setw(0)
1492 << std::setfill(' ');
1499 int main(int argc
, char *argv
[])
1501 std::unique_ptr
<MovieState
> movState
;
1505 std::cerr
<< "Usage: "<<argv
[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl
;
1508 /* Register all formats and codecs */
1510 /* Initialize networking protocols */
1511 avformat_network_init();
1513 if(SDL_Init(SDL_INIT_VIDEO
| SDL_INIT_TIMER
))
1515 std::cerr
<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl
;
1519 /* Make a window to put our video */
1520 SDL_Window
*screen
= SDL_CreateWindow(AppName
.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE
);
1523 std::cerr
<< "SDL: could not set video mode - exiting" <<std::endl
;
1526 /* Make a renderer to handle the texture image surface and rendering. */
1527 Uint32 render_flags
= SDL_RENDERER_ACCELERATED
| SDL_RENDERER_PRESENTVSYNC
;
1528 SDL_Renderer
*renderer
= SDL_CreateRenderer(screen
, -1, render_flags
);
1531 SDL_RendererInfo rinf
{};
1534 /* Make sure the renderer supports IYUV textures. If not, fallback to a
1535 * software renderer. */
1536 if(SDL_GetRendererInfo(renderer
, &rinf
) == 0)
1538 for(Uint32 i
= 0;!ok
&& i
< rinf
.num_texture_formats
;i
++)
1539 ok
= (rinf
.texture_formats
[i
] == SDL_PIXELFORMAT_IYUV
);
1543 std::cerr
<< "IYUV pixelformat textures not supported on renderer "<<rinf
.name
<<std::endl
;
1544 SDL_DestroyRenderer(renderer
);
1550 render_flags
= SDL_RENDERER_SOFTWARE
| SDL_RENDERER_PRESENTVSYNC
;
1551 renderer
= SDL_CreateRenderer(screen
, -1, render_flags
);
1555 std::cerr
<< "SDL: could not create renderer - exiting" <<std::endl
;
1558 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1559 SDL_RenderFillRect(renderer
, nullptr);
1560 SDL_RenderPresent(renderer
);
1562 /* Open an audio device */
1564 ALCdevice
*device
= [argc
,argv
,&fileidx
]() -> ALCdevice
*
1566 ALCdevice
*dev
= NULL
;
1567 if(argc
> 3 && strcmp(argv
[1], "-device") == 0)
1570 dev
= alcOpenDevice(argv
[2]);
1572 std::cerr
<< "Failed to open \""<<argv
[2]<<"\" - trying default" <<std::endl
;
1574 return alcOpenDevice(nullptr);
1576 ALCcontext
*context
= alcCreateContext(device
, nullptr);
1577 if(!context
|| alcMakeContextCurrent(context
) == ALC_FALSE
)
1579 std::cerr
<< "Failed to set up audio device" <<std::endl
;
1581 alcDestroyContext(context
);
1585 const ALCchar
*name
= nullptr;
1586 if(alcIsExtensionPresent(device
, "ALC_ENUMERATE_ALL_EXT"))
1587 name
= alcGetString(device
, ALC_ALL_DEVICES_SPECIFIER
);
1588 if(!name
|| alcGetError(device
) != AL_NO_ERROR
)
1589 name
= alcGetString(device
, ALC_DEVICE_SPECIFIER
);
1590 std::cout
<< "Opened \""<<name
<<"\"" <<std::endl
;
1592 if(alcIsExtensionPresent(device
, "ALC_SOFT_device_clock"))
1594 std::cout
<< "Found ALC_SOFT_device_clock" <<std::endl
;
1595 alcGetInteger64vSOFT
= reinterpret_cast<LPALCGETINTEGER64VSOFT
>(
1596 alcGetProcAddress(device
, "alcGetInteger64vSOFT")
1600 if(alIsExtensionPresent("AL_SOFT_source_latency"))
1602 std::cout
<< "Found AL_SOFT_source_latency" <<std::endl
;
1603 alGetSourcei64vSOFT
= reinterpret_cast<LPALGETSOURCEI64VSOFT
>(
1604 alGetProcAddress("alGetSourcei64vSOFT")
1608 if(fileidx
< argc
&& strcmp(argv
[fileidx
], "-direct") == 0)
1611 if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
1612 std::cerr
<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl
;
1615 std::cout
<< "Found AL_SOFT_direct_channels" <<std::endl
;
1616 EnableDirectOut
= true;
1620 while(fileidx
< argc
&& !movState
)
1622 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1623 if(!movState
->prepare()) movState
= nullptr;
1627 std::cerr
<< "Could not start a video" <<std::endl
;
1630 movState
->setTitle(screen
);
1632 /* Default to going to the next movie at the end of one. */
1633 enum class EomAction
{
1635 } eom_action
= EomAction::Next
;
1636 seconds
last_time(-1);
1640 int have_evt
= SDL_WaitEventTimeout(&event
, 10);
1642 auto cur_time
= std::chrono::duration_cast
<seconds
>(movState
->getMasterClock());
1643 if(cur_time
!= last_time
)
1645 auto end_time
= std::chrono::duration_cast
<seconds
>(movState
->getDuration());
1646 std::cout
<< "\r "<<PrettyTime
{cur_time
}<<" / "<<PrettyTime
{end_time
} <<std::flush
;
1647 last_time
= cur_time
;
1649 if(!have_evt
) continue;
1654 switch(event
.key
.keysym
.sym
)
1657 movState
->mQuit
= true;
1658 eom_action
= EomAction::Quit
;
1662 movState
->mQuit
= true;
1663 eom_action
= EomAction::Next
;
1671 case SDL_WINDOWEVENT
:
1672 switch(event
.window
.event
)
1674 case SDL_WINDOWEVENT_RESIZED
:
1675 SDL_SetRenderDrawColor(renderer
, 0, 0, 0, 255);
1676 SDL_RenderFillRect(renderer
, nullptr);
1685 movState
->mQuit
= true;
1686 eom_action
= EomAction::Quit
;
1689 case FF_UPDATE_EVENT
:
1690 reinterpret_cast<VideoState
*>(event
.user
.data1
)->updatePicture(
1695 case FF_REFRESH_EVENT
:
1696 reinterpret_cast<VideoState
*>(event
.user
.data1
)->refreshTimer(
1701 case FF_MOVIE_DONE_EVENT
:
1703 last_time
= seconds(-1);
1704 if(eom_action
!= EomAction::Quit
)
1707 while(fileidx
< argc
&& !movState
)
1709 movState
= std::unique_ptr
<MovieState
>(new MovieState(argv
[fileidx
++]));
1710 if(!movState
->prepare()) movState
= nullptr;
1714 movState
->setTitle(screen
);
1719 /* Nothing more to play. Shut everything down and quit. */
1722 alcMakeContextCurrent(nullptr);
1723 alcDestroyContext(context
);
1724 alcCloseDevice(device
);
1726 SDL_DestroyRenderer(renderer
);
1728 SDL_DestroyWindow(screen
);
1739 std::cerr
<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl
;