1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "AudioContext.h"
8 #include "nsContentUtils.h"
9 #include "nsPIDOMWindow.h"
10 #include "mozilla/ErrorResult.h"
11 #include "mozilla/dom/AudioContextBinding.h"
12 #include "mozilla/dom/OfflineAudioContextBinding.h"
13 #include "MediaStreamGraph.h"
14 #include "mozilla/dom/AnalyserNode.h"
15 #include "AudioDestinationNode.h"
16 #include "AudioBufferSourceNode.h"
17 #include "AudioBuffer.h"
19 #include "DelayNode.h"
20 #include "PannerNode.h"
21 #include "AudioListener.h"
22 #include "DynamicsCompressorNode.h"
23 #include "BiquadFilterNode.h"
24 #include "ScriptProcessorNode.h"
25 #include "ChannelMergerNode.h"
26 #include "ChannelSplitterNode.h"
27 #include "WaveShaperNode.h"
28 #include "WaveTable.h"
29 #include "nsNetUtil.h"
34 NS_IMPL_CYCLE_COLLECTION_INHERITED_2(AudioContext
, nsDOMEventTargetHelper
,
35 mDestination
, mListener
)
37 NS_IMPL_ADDREF_INHERITED(AudioContext
, nsDOMEventTargetHelper
)
38 NS_IMPL_RELEASE_INHERITED(AudioContext
, nsDOMEventTargetHelper
)
39 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioContext
)
40 NS_INTERFACE_MAP_END_INHERITING(nsDOMEventTargetHelper
)
42 static uint8_t gWebAudioOutputKey
;
44 AudioContext::AudioContext(nsPIDOMWindow
* aWindow
,
46 uint32_t aNumberOfChannels
,
49 : mSampleRate(aIsOffline
? aSampleRate
: IdealAudioRate())
50 , mDestination(new AudioDestinationNode(this, aIsOffline
,
52 aLength
, aSampleRate
))
53 , mIsOffline(aIsOffline
)
55 // Actually play audio
56 mDestination
->Stream()->AddAudioOutput(&gWebAudioOutputKey
);
57 nsDOMEventTargetHelper::BindToOwner(aWindow
);
61 mAudioBufferSourceNodes
.Init();
62 mScriptProcessorNodes
.Init();
65 AudioContext::~AudioContext()
70 AudioContext::WrapObject(JSContext
* aCx
, JS::Handle
<JSObject
*> aScope
)
73 return OfflineAudioContextBinding::Wrap(aCx
, aScope
, this);
75 return AudioContextBinding::Wrap(aCx
, aScope
, this);
79 /* static */ already_AddRefed
<AudioContext
>
80 AudioContext::Constructor(const GlobalObject
& aGlobal
, ErrorResult
& aRv
)
82 nsCOMPtr
<nsPIDOMWindow
> window
= do_QueryInterface(aGlobal
.Get());
84 aRv
.Throw(NS_ERROR_FAILURE
);
88 nsRefPtr
<AudioContext
> object
= new AudioContext(window
, false);
89 window
->AddAudioContext(object
);
90 return object
.forget();
93 /* static */ already_AddRefed
<AudioContext
>
94 AudioContext::Constructor(const GlobalObject
& aGlobal
,
95 uint32_t aNumberOfChannels
,
100 nsCOMPtr
<nsPIDOMWindow
> window
= do_QueryInterface(aGlobal
.Get());
102 aRv
.Throw(NS_ERROR_FAILURE
);
106 if (aNumberOfChannels
== 0 ||
107 aNumberOfChannels
> WebAudioUtils::MaxChannelCount
||
109 aSampleRate
<= 1.0f
||
110 aSampleRate
>= TRACK_RATE_MAX
) {
111 // The DOM binding protects us against infinity and NaN
112 aRv
.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR
);
116 nsRefPtr
<AudioContext
> object
= new AudioContext(window
,
121 window
->AddAudioContext(object
);
122 return object
.forget();
125 already_AddRefed
<AudioBufferSourceNode
>
126 AudioContext::CreateBufferSource()
128 nsRefPtr
<AudioBufferSourceNode
> bufferNode
=
129 new AudioBufferSourceNode(this);
130 mAudioBufferSourceNodes
.PutEntry(bufferNode
);
131 return bufferNode
.forget();
134 already_AddRefed
<AudioBuffer
>
135 AudioContext::CreateBuffer(JSContext
* aJSContext
, uint32_t aNumberOfChannels
,
136 uint32_t aLength
, float aSampleRate
,
139 if (aSampleRate
< 8000 || aSampleRate
> 96000 || !aLength
) {
140 aRv
.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR
);
144 if (aLength
> INT32_MAX
) {
145 aRv
.Throw(NS_ERROR_OUT_OF_MEMORY
);
149 nsRefPtr
<AudioBuffer
> buffer
=
150 new AudioBuffer(this, int32_t(aLength
), aSampleRate
);
151 if (!buffer
->InitializeBuffers(aNumberOfChannels
, aJSContext
)) {
152 aRv
.Throw(NS_ERROR_OUT_OF_MEMORY
);
156 return buffer
.forget();
159 already_AddRefed
<AudioBuffer
>
160 AudioContext::CreateBuffer(JSContext
* aJSContext
, ArrayBuffer
& aBuffer
,
161 bool aMixToMono
, ErrorResult
& aRv
)
163 // Sniff the content of the media.
164 // Failed type sniffing will be handled by SyncDecodeMedia.
165 nsAutoCString contentType
;
166 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY
, nullptr,
167 aBuffer
.Data(), aBuffer
.Length(),
170 WebAudioDecodeJob
job(contentType
, this);
172 if (mDecoder
.SyncDecodeMedia(contentType
.get(),
173 aBuffer
.Data(), aBuffer
.Length(), job
) &&
175 nsRefPtr
<AudioBuffer
> buffer
= job
.mOutput
.forget();
177 buffer
->MixToMono(aJSContext
);
179 return buffer
.forget();
187 bool IsValidBufferSize(uint32_t aBufferSize
) {
188 switch (aBufferSize
) {
189 case 0: // let the implementation choose the buffer size
205 already_AddRefed
<ScriptProcessorNode
>
206 AudioContext::CreateScriptProcessor(uint32_t aBufferSize
,
207 uint32_t aNumberOfInputChannels
,
208 uint32_t aNumberOfOutputChannels
,
211 if ((aNumberOfInputChannels
== 0 && aNumberOfOutputChannels
== 0) ||
212 aNumberOfInputChannels
> WebAudioUtils::MaxChannelCount
||
213 aNumberOfOutputChannels
> WebAudioUtils::MaxChannelCount
||
214 !IsValidBufferSize(aBufferSize
)) {
215 aRv
.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR
);
219 nsRefPtr
<ScriptProcessorNode
> scriptProcessor
=
220 new ScriptProcessorNode(this, aBufferSize
, aNumberOfInputChannels
,
221 aNumberOfOutputChannels
);
222 mScriptProcessorNodes
.PutEntry(scriptProcessor
);
223 return scriptProcessor
.forget();
226 already_AddRefed
<AnalyserNode
>
227 AudioContext::CreateAnalyser()
229 nsRefPtr
<AnalyserNode
> analyserNode
= new AnalyserNode(this);
230 return analyserNode
.forget();
233 already_AddRefed
<GainNode
>
234 AudioContext::CreateGain()
236 nsRefPtr
<GainNode
> gainNode
= new GainNode(this);
237 return gainNode
.forget();
240 already_AddRefed
<WaveShaperNode
>
241 AudioContext::CreateWaveShaper()
243 nsRefPtr
<WaveShaperNode
> waveShaperNode
= new WaveShaperNode(this);
244 return waveShaperNode
.forget();
247 already_AddRefed
<DelayNode
>
248 AudioContext::CreateDelay(double aMaxDelayTime
, ErrorResult
& aRv
)
250 if (aMaxDelayTime
> 0. && aMaxDelayTime
< 180.) {
251 nsRefPtr
<DelayNode
> delayNode
= new DelayNode(this, aMaxDelayTime
);
252 return delayNode
.forget();
254 aRv
.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR
);
258 already_AddRefed
<PannerNode
>
259 AudioContext::CreatePanner()
261 nsRefPtr
<PannerNode
> pannerNode
= new PannerNode(this);
262 mPannerNodes
.PutEntry(pannerNode
);
263 return pannerNode
.forget();
266 already_AddRefed
<ChannelSplitterNode
>
267 AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs
, ErrorResult
& aRv
)
269 if (aNumberOfOutputs
== 0 ||
270 aNumberOfOutputs
> WebAudioUtils::MaxChannelCount
) {
271 aRv
.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR
);
275 nsRefPtr
<ChannelSplitterNode
> splitterNode
=
276 new ChannelSplitterNode(this, aNumberOfOutputs
);
277 return splitterNode
.forget();
280 already_AddRefed
<ChannelMergerNode
>
281 AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs
, ErrorResult
& aRv
)
283 if (aNumberOfInputs
== 0 ||
284 aNumberOfInputs
> WebAudioUtils::MaxChannelCount
) {
285 aRv
.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR
);
289 nsRefPtr
<ChannelMergerNode
> mergerNode
=
290 new ChannelMergerNode(this, aNumberOfInputs
);
291 return mergerNode
.forget();
294 already_AddRefed
<DynamicsCompressorNode
>
295 AudioContext::CreateDynamicsCompressor()
297 nsRefPtr
<DynamicsCompressorNode
> compressorNode
=
298 new DynamicsCompressorNode(this);
299 return compressorNode
.forget();
302 already_AddRefed
<BiquadFilterNode
>
303 AudioContext::CreateBiquadFilter()
305 nsRefPtr
<BiquadFilterNode
> filterNode
=
306 new BiquadFilterNode(this);
307 return filterNode
.forget();
310 already_AddRefed
<WaveTable
>
311 AudioContext::CreateWaveTable(const Float32Array
& aRealData
,
312 const Float32Array
& aImagData
,
315 if (aRealData
.Length() != aImagData
.Length() ||
316 aRealData
.Length() == 0 ||
317 aRealData
.Length() > 4096) {
318 aRv
.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR
);
322 nsRefPtr
<WaveTable
> waveTable
=
323 new WaveTable(this, aRealData
.Data(), aRealData
.Length(),
324 aImagData
.Data(), aImagData
.Length());
325 return waveTable
.forget();
329 AudioContext::Listener()
332 mListener
= new AudioListener(this);
338 AudioContext::DecodeAudioData(const ArrayBuffer
& aBuffer
,
339 DecodeSuccessCallback
& aSuccessCallback
,
340 const Optional
<OwningNonNull
<DecodeErrorCallback
> >& aFailureCallback
)
342 // Sniff the content of the media.
343 // Failed type sniffing will be handled by AsyncDecodeMedia.
344 nsAutoCString contentType
;
345 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY
, nullptr,
346 aBuffer
.Data(), aBuffer
.Length(),
349 nsCOMPtr
<DecodeErrorCallback
> failureCallback
;
350 if (aFailureCallback
.WasPassed()) {
351 failureCallback
= aFailureCallback
.Value().get();
353 nsAutoPtr
<WebAudioDecodeJob
> job(
354 new WebAudioDecodeJob(contentType
, this,
355 &aSuccessCallback
, failureCallback
));
356 mDecoder
.AsyncDecodeMedia(contentType
.get(),
357 aBuffer
.Data(), aBuffer
.Length(), *job
);
358 // Transfer the ownership to mDecodeJobs
359 mDecodeJobs
.AppendElement(job
.forget());
363 AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob
* aDecodeJob
)
365 mDecodeJobs
.RemoveElement(aDecodeJob
);
369 AudioContext::UnregisterAudioBufferSourceNode(AudioBufferSourceNode
* aNode
)
371 mAudioBufferSourceNodes
.RemoveEntry(aNode
);
372 UpdatePannerSource();
376 AudioContext::UnregisterPannerNode(PannerNode
* aNode
)
378 mPannerNodes
.RemoveEntry(aNode
);
382 AudioContext::UnregisterScriptProcessorNode(ScriptProcessorNode
* aNode
)
384 mScriptProcessorNodes
.RemoveEntry(aNode
);
387 static PLDHashOperator
388 FindConnectedSourcesOn(nsPtrHashKey
<PannerNode
>* aEntry
, void* aData
)
390 aEntry
->GetKey()->FindConnectedSources();
391 return PL_DHASH_NEXT
;
395 AudioContext::UpdatePannerSource()
397 mPannerNodes
.EnumerateEntries(FindConnectedSourcesOn
, nullptr);
401 AudioContext::Graph() const
403 return Destination()->Stream()->Graph();
407 AudioContext::DestinationStream() const
409 return Destination()->Stream();
413 AudioContext::CurrentTime() const
415 return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime());
419 static PLDHashOperator
420 GetHashtableEntry(nsPtrHashKey
<T
>* aEntry
, void* aData
)
422 nsTArray
<T
*>* array
= static_cast<nsTArray
<T
*>*>(aData
);
423 array
->AppendElement(aEntry
->GetKey());
424 return PL_DHASH_NEXT
;
429 GetHashtableElements(nsTHashtable
<nsPtrHashKey
<T
> >& aHashtable
, nsTArray
<T
*>& aArray
)
431 aHashtable
.EnumerateEntries(&GetHashtableEntry
<T
>, &aArray
);
435 AudioContext::Shutdown()
440 // Stop all audio buffer source nodes, to make sure that they release
441 // their self-references.
442 // We first gather an array of the nodes and then call Stop on each one,
443 // since Stop may delete the object and therefore trigger a re-entrant
444 // hashtable call to remove the pointer from the hashtable, which is
446 nsTArray
<AudioBufferSourceNode
*> sourceNodes
;
447 GetHashtableElements(mAudioBufferSourceNodes
, sourceNodes
);
448 for (uint32_t i
= 0; i
< sourceNodes
.Length(); ++i
) {
450 sourceNodes
[i
]->Stop(0.0, rv
, true);
452 // Stop all script processor nodes, to make sure that they release
453 // their self-references.
454 nsTArray
<ScriptProcessorNode
*> spNodes
;
455 GetHashtableElements(mScriptProcessorNodes
, spNodes
);
456 for (uint32_t i
= 0; i
< spNodes
.Length(); ++i
) {
460 // For offline contexts, we can destroy the MediaStreamGraph at this point.
462 mDestination
->DestroyGraph();
467 AudioContext::Suspend()
469 MediaStream
* ds
= DestinationStream();
471 ds
->ChangeExplicitBlockerCount(1);
476 AudioContext::Resume()
478 MediaStream
* ds
= DestinationStream();
480 ds
->ChangeExplicitBlockerCount(-1);
485 AudioContext::GetJSContext() const
487 MOZ_ASSERT(NS_IsMainThread());
489 nsCOMPtr
<nsIScriptGlobalObject
> scriptGlobal
=
490 do_QueryInterface(GetParentObject());
494 nsIScriptContext
* scriptContext
= scriptGlobal
->GetContext();
495 if (!scriptContext
) {
498 return scriptContext
->GetNativeContext();
502 AudioContext::StartRendering()
504 MOZ_ASSERT(mIsOffline
, "This should only be called on OfflineAudioContext");
506 mDestination
->StartRendering();