Bug 1839170 - Refactor Snap pulling, Add Firefox Snap Core22 and GNOME 42 SDK symbols...
[gecko.git] / dom / media / webaudio / AudioNodeExternalInputTrack.cpp
blob2142752d39bc8746eb7bba07ed106778648ccc44
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 * You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "AlignedTArray.h"
7 #include "AlignmentUtils.h"
8 #include "AudioNodeEngine.h"
9 #include "AudioNodeExternalInputTrack.h"
10 #include "AudioChannelFormat.h"
11 #include "mozilla/dom/MediaStreamAudioSourceNode.h"
13 using namespace mozilla::dom;
15 namespace mozilla {
17 AudioNodeExternalInputTrack::AudioNodeExternalInputTrack(
18 AudioNodeEngine* aEngine, TrackRate aSampleRate)
19 : AudioNodeTrack(aEngine, NO_TRACK_FLAGS, aSampleRate) {
20 MOZ_COUNT_CTOR(AudioNodeExternalInputTrack);
23 AudioNodeExternalInputTrack::~AudioNodeExternalInputTrack() {
24 MOZ_COUNT_DTOR(AudioNodeExternalInputTrack);
27 /* static */
28 already_AddRefed<AudioNodeExternalInputTrack>
29 AudioNodeExternalInputTrack::Create(MediaTrackGraph* aGraph,
30 AudioNodeEngine* aEngine) {
31 AudioContext* ctx = aEngine->NodeMainThread()->Context();
32 MOZ_ASSERT(NS_IsMainThread());
33 MOZ_ASSERT(aGraph == ctx->Graph());
35 RefPtr<AudioNodeExternalInputTrack> track =
36 new AudioNodeExternalInputTrack(aEngine, aGraph->GraphRate());
37 track->mSuspendedCount += ctx->ShouldSuspendNewTrack();
38 aGraph->AddTrack(track);
39 return track.forget();
42 /**
43 * Copies the data in aInput to aOffsetInBlock within aBlock.
44 * aBlock must have been allocated with AllocateInputBlock and have a channel
45 * count that's a superset of the channels in aInput.
47 template <typename T>
48 static void CopyChunkToBlock(AudioChunk& aInput, AudioBlock* aBlock,
49 uint32_t aOffsetInBlock) {
50 uint32_t blockChannels = aBlock->ChannelCount();
51 AutoTArray<const T*, 2> channels;
52 if (aInput.IsNull()) {
53 channels.SetLength(blockChannels);
54 PodZero(channels.Elements(), blockChannels);
55 } else {
56 const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
57 channels.SetLength(inputChannels.Length());
58 PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
59 if (channels.Length() != blockChannels) {
60 // We only need to upmix here because aBlock's channel count has been
61 // chosen to be a superset of the channel count of every chunk.
62 AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
66 for (uint32_t c = 0; c < blockChannels; ++c) {
67 float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
68 if (channels[c]) {
69 ConvertAudioSamplesWithScale(channels[c], outputData,
70 aInput.GetDuration(), aInput.mVolume);
71 } else {
72 PodZero(outputData, aInput.GetDuration());
77 /**
78 * Converts the data in aSegment to a single chunk aBlock. aSegment must have
79 * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the
80 * channels in every chunk of aSegment. aBlock must be float format or null.
82 static void ConvertSegmentToAudioBlock(AudioSegment* aSegment,
83 AudioBlock* aBlock,
84 int32_t aFallbackChannelCount) {
85 NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE,
86 "Bad segment duration");
89 AudioSegment::ChunkIterator ci(*aSegment);
90 NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!");
91 if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE &&
92 (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) {
93 bool aligned = true;
94 for (size_t i = 0; i < ci->mChannelData.Length(); ++i) {
95 if (!IS_ALIGNED16(ci->mChannelData[i])) {
96 aligned = false;
97 break;
101 // Return this chunk directly to avoid copying data.
102 if (aligned) {
103 *aBlock = *ci;
104 return;
109 aBlock->AllocateChannels(aFallbackChannelCount);
111 uint32_t duration = 0;
112 for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) {
113 switch (ci->mBufferFormat) {
114 case AUDIO_FORMAT_S16: {
115 CopyChunkToBlock<int16_t>(*ci, aBlock, duration);
116 break;
118 case AUDIO_FORMAT_FLOAT32: {
119 CopyChunkToBlock<float>(*ci, aBlock, duration);
120 break;
122 case AUDIO_FORMAT_SILENCE: {
123 // The actual type of the sample does not matter here, but we still need
124 // to send some audio to the graph.
125 CopyChunkToBlock<float>(*ci, aBlock, duration);
126 break;
129 duration += ci->GetDuration();
133 void AudioNodeExternalInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
134 uint32_t aFlags) {
135 // According to spec, number of outputs is always 1.
136 MOZ_ASSERT(mLastChunks.Length() == 1);
138 // GC stuff can result in our input track being destroyed before this track.
139 // Handle that.
140 if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
141 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
142 return;
145 MOZ_ASSERT(mInputs.Length() == 1);
147 MediaTrack* source = mInputs[0]->GetSource();
148 AutoTArray<AudioSegment, 1> audioSegments;
149 uint32_t inputChannels = 0;
151 MOZ_ASSERT(source->GetData()->GetType() == MediaSegment::AUDIO,
152 "AudioNodeExternalInputTrack shouldn't have a video input");
154 const AudioSegment& inputSegment =
155 *mInputs[0]->GetSource()->GetData<AudioSegment>();
156 if (!inputSegment.IsNull()) {
157 AudioSegment& segment = *audioSegments.AppendElement();
158 GraphTime next;
159 for (GraphTime t = aFrom; t < aTo; t = next) {
160 MediaInputPort::InputInterval interval =
161 MediaInputPort::GetNextInputInterval(mInputs[0], t);
162 interval.mEnd = std::min(interval.mEnd, aTo);
163 if (interval.mStart >= interval.mEnd) {
164 break;
166 next = interval.mEnd;
168 // We know this track does not block during the processing interval ---
169 // we're not finished, we don't underrun, and we're not suspended.
170 TrackTime outputStart = GraphTimeToTrackTime(interval.mStart);
171 TrackTime outputEnd = GraphTimeToTrackTime(interval.mEnd);
172 TrackTime ticks = outputEnd - outputStart;
174 if (interval.mInputIsBlocked) {
175 segment.AppendNullData(ticks);
176 } else {
177 // The input track is not blocked in this interval, so no need to call
178 // GraphTimeToTrackTimeWithBlocking.
179 TrackTime inputStart =
180 std::min(inputSegment.GetDuration(),
181 source->GraphTimeToTrackTime(interval.mStart));
182 TrackTime inputEnd =
183 std::min(inputSegment.GetDuration(),
184 source->GraphTimeToTrackTime(interval.mEnd));
186 segment.AppendSlice(inputSegment, inputStart, inputEnd);
187 // Pad if we're looking past the end of the track
188 segment.AppendNullData(ticks - (inputEnd - inputStart));
192 for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded();
193 iter.Next()) {
194 inputChannels =
195 GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
199 uint32_t accumulateIndex = 0;
200 if (inputChannels) {
201 DownmixBufferType downmixBuffer;
202 ASSERT_ALIGNED16(downmixBuffer.Elements());
203 for (auto& audioSegment : audioSegments) {
204 AudioBlock tmpChunk;
205 ConvertSegmentToAudioBlock(&audioSegment, &tmpChunk, inputChannels);
206 if (!tmpChunk.IsNull()) {
207 if (accumulateIndex == 0) {
208 mLastChunks[0].AllocateChannels(inputChannels);
210 AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0],
211 &downmixBuffer);
212 accumulateIndex++;
216 if (accumulateIndex == 0) {
217 mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
221 bool AudioNodeExternalInputTrack::IsEnabled() {
222 return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled();
225 } // namespace mozilla