Bug 878875 - Import PannerNode tests from Blink. r=ehsan
[gecko.git] / content / media / webaudio / AudioContext.cpp
blobeacef00288cb7a1f329e949f9e81d23a3e8555c9
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "AudioContext.h"
8 #include "nsContentUtils.h"
9 #include "nsPIDOMWindow.h"
10 #include "mozilla/ErrorResult.h"
11 #include "mozilla/dom/AudioContextBinding.h"
12 #include "mozilla/dom/OfflineAudioContextBinding.h"
13 #include "MediaStreamGraph.h"
14 #include "mozilla/dom/AnalyserNode.h"
15 #include "AudioDestinationNode.h"
16 #include "AudioBufferSourceNode.h"
17 #include "AudioBuffer.h"
18 #include "GainNode.h"
19 #include "DelayNode.h"
20 #include "PannerNode.h"
21 #include "AudioListener.h"
22 #include "DynamicsCompressorNode.h"
23 #include "BiquadFilterNode.h"
24 #include "ScriptProcessorNode.h"
25 #include "ChannelMergerNode.h"
26 #include "ChannelSplitterNode.h"
27 #include "WaveShaperNode.h"
28 #include "WaveTable.h"
29 #include "nsNetUtil.h"
31 namespace mozilla {
32 namespace dom {
34 NS_IMPL_CYCLE_COLLECTION_INHERITED_2(AudioContext, nsDOMEventTargetHelper,
35 mDestination, mListener)
37 NS_IMPL_ADDREF_INHERITED(AudioContext, nsDOMEventTargetHelper)
38 NS_IMPL_RELEASE_INHERITED(AudioContext, nsDOMEventTargetHelper)
39 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioContext)
40 NS_INTERFACE_MAP_END_INHERITING(nsDOMEventTargetHelper)
42 static uint8_t gWebAudioOutputKey;
44 AudioContext::AudioContext(nsPIDOMWindow* aWindow,
45 bool aIsOffline,
46 uint32_t aNumberOfChannels,
47 uint32_t aLength,
48 float aSampleRate)
49 : mSampleRate(aIsOffline ? aSampleRate : IdealAudioRate())
50 , mDestination(new AudioDestinationNode(this, aIsOffline,
51 aNumberOfChannels,
52 aLength, aSampleRate))
53 , mIsOffline(aIsOffline)
55 // Actually play audio
56 mDestination->Stream()->AddAudioOutput(&gWebAudioOutputKey);
57 nsDOMEventTargetHelper::BindToOwner(aWindow);
58 SetIsDOMBinding();
60 mPannerNodes.Init();
61 mAudioBufferSourceNodes.Init();
62 mScriptProcessorNodes.Init();
65 AudioContext::~AudioContext()
69 JSObject*
70 AudioContext::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
72 if (mIsOffline) {
73 return OfflineAudioContextBinding::Wrap(aCx, aScope, this);
74 } else {
75 return AudioContextBinding::Wrap(aCx, aScope, this);
79 /* static */ already_AddRefed<AudioContext>
80 AudioContext::Constructor(const GlobalObject& aGlobal, ErrorResult& aRv)
82 nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(aGlobal.Get());
83 if (!window) {
84 aRv.Throw(NS_ERROR_FAILURE);
85 return nullptr;
88 nsRefPtr<AudioContext> object = new AudioContext(window, false);
89 window->AddAudioContext(object);
90 return object.forget();
93 /* static */ already_AddRefed<AudioContext>
94 AudioContext::Constructor(const GlobalObject& aGlobal,
95 uint32_t aNumberOfChannels,
96 uint32_t aLength,
97 float aSampleRate,
98 ErrorResult& aRv)
100 nsCOMPtr<nsPIDOMWindow> window = do_QueryInterface(aGlobal.Get());
101 if (!window) {
102 aRv.Throw(NS_ERROR_FAILURE);
103 return nullptr;
106 if (aNumberOfChannels == 0 ||
107 aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
108 aLength == 0 ||
109 aSampleRate <= 1.0f ||
110 aSampleRate >= TRACK_RATE_MAX) {
111 // The DOM binding protects us against infinity and NaN
112 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
113 return nullptr;
116 nsRefPtr<AudioContext> object = new AudioContext(window,
117 true,
118 aNumberOfChannels,
119 aLength,
120 aSampleRate);
121 window->AddAudioContext(object);
122 return object.forget();
125 already_AddRefed<AudioBufferSourceNode>
126 AudioContext::CreateBufferSource()
128 nsRefPtr<AudioBufferSourceNode> bufferNode =
129 new AudioBufferSourceNode(this);
130 mAudioBufferSourceNodes.PutEntry(bufferNode);
131 return bufferNode.forget();
134 already_AddRefed<AudioBuffer>
135 AudioContext::CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
136 uint32_t aLength, float aSampleRate,
137 ErrorResult& aRv)
139 if (aSampleRate < 8000 || aSampleRate > 96000 || !aLength) {
140 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
141 return nullptr;
144 if (aLength > INT32_MAX) {
145 aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
146 return nullptr;
149 nsRefPtr<AudioBuffer> buffer =
150 new AudioBuffer(this, int32_t(aLength), aSampleRate);
151 if (!buffer->InitializeBuffers(aNumberOfChannels, aJSContext)) {
152 aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
153 return nullptr;
156 return buffer.forget();
159 already_AddRefed<AudioBuffer>
160 AudioContext::CreateBuffer(JSContext* aJSContext, ArrayBuffer& aBuffer,
161 bool aMixToMono, ErrorResult& aRv)
163 // Sniff the content of the media.
164 // Failed type sniffing will be handled by SyncDecodeMedia.
165 nsAutoCString contentType;
166 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr,
167 aBuffer.Data(), aBuffer.Length(),
168 contentType);
170 WebAudioDecodeJob job(contentType, this);
172 if (mDecoder.SyncDecodeMedia(contentType.get(),
173 aBuffer.Data(), aBuffer.Length(), job) &&
174 job.mOutput) {
175 nsRefPtr<AudioBuffer> buffer = job.mOutput.forget();
176 if (aMixToMono) {
177 buffer->MixToMono(aJSContext);
179 return buffer.forget();
182 return nullptr;
185 namespace {
187 bool IsValidBufferSize(uint32_t aBufferSize) {
188 switch (aBufferSize) {
189 case 0: // let the implementation choose the buffer size
190 case 256:
191 case 512:
192 case 1024:
193 case 2048:
194 case 4096:
195 case 8192:
196 case 16384:
197 return true;
198 default:
199 return false;
205 already_AddRefed<ScriptProcessorNode>
206 AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
207 uint32_t aNumberOfInputChannels,
208 uint32_t aNumberOfOutputChannels,
209 ErrorResult& aRv)
211 if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) ||
212 aNumberOfInputChannels > WebAudioUtils::MaxChannelCount ||
213 aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount ||
214 !IsValidBufferSize(aBufferSize)) {
215 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
216 return nullptr;
219 nsRefPtr<ScriptProcessorNode> scriptProcessor =
220 new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels,
221 aNumberOfOutputChannels);
222 mScriptProcessorNodes.PutEntry(scriptProcessor);
223 return scriptProcessor.forget();
226 already_AddRefed<AnalyserNode>
227 AudioContext::CreateAnalyser()
229 nsRefPtr<AnalyserNode> analyserNode = new AnalyserNode(this);
230 return analyserNode.forget();
233 already_AddRefed<GainNode>
234 AudioContext::CreateGain()
236 nsRefPtr<GainNode> gainNode = new GainNode(this);
237 return gainNode.forget();
240 already_AddRefed<WaveShaperNode>
241 AudioContext::CreateWaveShaper()
243 nsRefPtr<WaveShaperNode> waveShaperNode = new WaveShaperNode(this);
244 return waveShaperNode.forget();
247 already_AddRefed<DelayNode>
248 AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv)
250 if (aMaxDelayTime > 0. && aMaxDelayTime < 180.) {
251 nsRefPtr<DelayNode> delayNode = new DelayNode(this, aMaxDelayTime);
252 return delayNode.forget();
254 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
255 return nullptr;
258 already_AddRefed<PannerNode>
259 AudioContext::CreatePanner()
261 nsRefPtr<PannerNode> pannerNode = new PannerNode(this);
262 mPannerNodes.PutEntry(pannerNode);
263 return pannerNode.forget();
266 already_AddRefed<ChannelSplitterNode>
267 AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv)
269 if (aNumberOfOutputs == 0 ||
270 aNumberOfOutputs > WebAudioUtils::MaxChannelCount) {
271 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
272 return nullptr;
275 nsRefPtr<ChannelSplitterNode> splitterNode =
276 new ChannelSplitterNode(this, aNumberOfOutputs);
277 return splitterNode.forget();
280 already_AddRefed<ChannelMergerNode>
281 AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv)
283 if (aNumberOfInputs == 0 ||
284 aNumberOfInputs > WebAudioUtils::MaxChannelCount) {
285 aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
286 return nullptr;
289 nsRefPtr<ChannelMergerNode> mergerNode =
290 new ChannelMergerNode(this, aNumberOfInputs);
291 return mergerNode.forget();
294 already_AddRefed<DynamicsCompressorNode>
295 AudioContext::CreateDynamicsCompressor()
297 nsRefPtr<DynamicsCompressorNode> compressorNode =
298 new DynamicsCompressorNode(this);
299 return compressorNode.forget();
302 already_AddRefed<BiquadFilterNode>
303 AudioContext::CreateBiquadFilter()
305 nsRefPtr<BiquadFilterNode> filterNode =
306 new BiquadFilterNode(this);
307 return filterNode.forget();
310 already_AddRefed<WaveTable>
311 AudioContext::CreateWaveTable(const Float32Array& aRealData,
312 const Float32Array& aImagData,
313 ErrorResult& aRv)
315 if (aRealData.Length() != aImagData.Length() ||
316 aRealData.Length() == 0 ||
317 aRealData.Length() > 4096) {
318 aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
319 return nullptr;
322 nsRefPtr<WaveTable> waveTable =
323 new WaveTable(this, aRealData.Data(), aRealData.Length(),
324 aImagData.Data(), aImagData.Length());
325 return waveTable.forget();
328 AudioListener*
329 AudioContext::Listener()
331 if (!mListener) {
332 mListener = new AudioListener(this);
334 return mListener;
337 void
338 AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer,
339 DecodeSuccessCallback& aSuccessCallback,
340 const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback)
342 // Sniff the content of the media.
343 // Failed type sniffing will be handled by AsyncDecodeMedia.
344 nsAutoCString contentType;
345 NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr,
346 aBuffer.Data(), aBuffer.Length(),
347 contentType);
349 nsCOMPtr<DecodeErrorCallback> failureCallback;
350 if (aFailureCallback.WasPassed()) {
351 failureCallback = aFailureCallback.Value().get();
353 nsAutoPtr<WebAudioDecodeJob> job(
354 new WebAudioDecodeJob(contentType, this,
355 &aSuccessCallback, failureCallback));
356 mDecoder.AsyncDecodeMedia(contentType.get(),
357 aBuffer.Data(), aBuffer.Length(), *job);
358 // Transfer the ownership to mDecodeJobs
359 mDecodeJobs.AppendElement(job.forget());
362 void
363 AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob)
365 mDecodeJobs.RemoveElement(aDecodeJob);
368 void
369 AudioContext::UnregisterAudioBufferSourceNode(AudioBufferSourceNode* aNode)
371 mAudioBufferSourceNodes.RemoveEntry(aNode);
372 UpdatePannerSource();
375 void
376 AudioContext::UnregisterPannerNode(PannerNode* aNode)
378 mPannerNodes.RemoveEntry(aNode);
381 void
382 AudioContext::UnregisterScriptProcessorNode(ScriptProcessorNode* aNode)
384 mScriptProcessorNodes.RemoveEntry(aNode);
387 static PLDHashOperator
388 FindConnectedSourcesOn(nsPtrHashKey<PannerNode>* aEntry, void* aData)
390 aEntry->GetKey()->FindConnectedSources();
391 return PL_DHASH_NEXT;
394 void
395 AudioContext::UpdatePannerSource()
397 mPannerNodes.EnumerateEntries(FindConnectedSourcesOn, nullptr);
400 MediaStreamGraph*
401 AudioContext::Graph() const
403 return Destination()->Stream()->Graph();
406 MediaStream*
407 AudioContext::DestinationStream() const
409 return Destination()->Stream();
412 double
413 AudioContext::CurrentTime() const
415 return MediaTimeToSeconds(Destination()->Stream()->GetCurrentTime());
418 template <class T>
419 static PLDHashOperator
420 GetHashtableEntry(nsPtrHashKey<T>* aEntry, void* aData)
422 nsTArray<T*>* array = static_cast<nsTArray<T*>*>(aData);
423 array->AppendElement(aEntry->GetKey());
424 return PL_DHASH_NEXT;
427 template <class T>
428 static void
429 GetHashtableElements(nsTHashtable<nsPtrHashKey<T> >& aHashtable, nsTArray<T*>& aArray)
431 aHashtable.EnumerateEntries(&GetHashtableEntry<T>, &aArray);
434 void
435 AudioContext::Shutdown()
437 Suspend();
438 mDecoder.Shutdown();
440 // Stop all audio buffer source nodes, to make sure that they release
441 // their self-references.
442 // We first gather an array of the nodes and then call Stop on each one,
443 // since Stop may delete the object and therefore trigger a re-entrant
444 // hashtable call to remove the pointer from the hashtable, which is
445 // not safe.
446 nsTArray<AudioBufferSourceNode*> sourceNodes;
447 GetHashtableElements(mAudioBufferSourceNodes, sourceNodes);
448 for (uint32_t i = 0; i < sourceNodes.Length(); ++i) {
449 ErrorResult rv;
450 sourceNodes[i]->Stop(0.0, rv, true);
452 // Stop all script processor nodes, to make sure that they release
453 // their self-references.
454 nsTArray<ScriptProcessorNode*> spNodes;
455 GetHashtableElements(mScriptProcessorNodes, spNodes);
456 for (uint32_t i = 0; i < spNodes.Length(); ++i) {
457 spNodes[i]->Stop();
460 // For offline contexts, we can destroy the MediaStreamGraph at this point.
461 if (mIsOffline) {
462 mDestination->DestroyGraph();
466 void
467 AudioContext::Suspend()
469 MediaStream* ds = DestinationStream();
470 if (ds) {
471 ds->ChangeExplicitBlockerCount(1);
475 void
476 AudioContext::Resume()
478 MediaStream* ds = DestinationStream();
479 if (ds) {
480 ds->ChangeExplicitBlockerCount(-1);
484 JSContext*
485 AudioContext::GetJSContext() const
487 MOZ_ASSERT(NS_IsMainThread());
489 nsCOMPtr<nsIScriptGlobalObject> scriptGlobal =
490 do_QueryInterface(GetParentObject());
491 if (!scriptGlobal) {
492 return nullptr;
494 nsIScriptContext* scriptContext = scriptGlobal->GetContext();
495 if (!scriptContext) {
496 return nullptr;
498 return scriptContext->GetNativeContext();
501 void
502 AudioContext::StartRendering()
504 MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext");
506 mDestination->StartRendering();