1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "SourceBuffer.h"
11 #include "mozilla/Likely.h"
12 #include "nsIInputStream.h"
13 #include "MainThreadUtils.h"
14 #include "SurfaceCache.h"
22 //////////////////////////////////////////////////////////////////////////////
23 // SourceBufferIterator implementation.
24 //////////////////////////////////////////////////////////////////////////////
26 SourceBufferIterator::~SourceBufferIterator() {
28 mOwner
->OnIteratorRelease();
32 SourceBufferIterator
& SourceBufferIterator::operator=(
33 SourceBufferIterator
&& aOther
) {
35 mOwner
->OnIteratorRelease();
38 mOwner
= std::move(aOther
.mOwner
);
39 mState
= aOther
.mState
;
41 mChunkCount
= aOther
.mChunkCount
;
42 mByteCount
= aOther
.mByteCount
;
43 mRemainderToRead
= aOther
.mRemainderToRead
;
48 SourceBufferIterator::State
SourceBufferIterator::AdvanceOrScheduleResume(
49 size_t aRequestedBytes
, IResumable
* aConsumer
) {
52 if (MOZ_UNLIKELY(!HasMore())) {
53 MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
57 // The range of data [mOffset, mOffset + mNextReadLength) has just been read
58 // by the caller (or at least they don't have any interest in it), so consume
60 MOZ_ASSERT(mData
.mIterating
.mNextReadLength
<=
61 mData
.mIterating
.mAvailableLength
);
62 mData
.mIterating
.mOffset
+= mData
.mIterating
.mNextReadLength
;
63 mData
.mIterating
.mAvailableLength
-= mData
.mIterating
.mNextReadLength
;
65 // An iterator can have a limit imposed on it to read only a subset of a
66 // source buffer. If it is present, we need to mimic the same behaviour as
67 // the owning SourceBuffer.
68 if (MOZ_UNLIKELY(mRemainderToRead
!= SIZE_MAX
)) {
69 MOZ_ASSERT(mData
.mIterating
.mNextReadLength
<= mRemainderToRead
);
70 mRemainderToRead
-= mData
.mIterating
.mNextReadLength
;
72 if (MOZ_UNLIKELY(mRemainderToRead
== 0)) {
73 mData
.mIterating
.mNextReadLength
= 0;
78 if (MOZ_UNLIKELY(aRequestedBytes
> mRemainderToRead
)) {
79 aRequestedBytes
= mRemainderToRead
;
83 mData
.mIterating
.mNextReadLength
= 0;
85 if (MOZ_LIKELY(mState
== READY
)) {
86 // If the caller wants zero bytes of data, that's easy enough; we just
87 // configured ourselves for a zero-byte read above! In theory we could do
88 // this even in the START state, but it's not important for performance and
89 // breaking the ability of callers to assert that the pointer returned by
90 // Data() is non-null doesn't seem worth it.
91 if (aRequestedBytes
== 0) {
92 MOZ_ASSERT(mData
.mIterating
.mNextReadLength
== 0);
96 // Try to satisfy the request out of our local buffer. This is potentially
97 // much faster than requesting data from our owning SourceBuffer because we
98 // don't have to take the lock. Note that if we have anything at all in our
99 // local buffer, we use it to satisfy the request; @aRequestedBytes is just
100 // the *maximum* number of bytes we can return.
101 if (mData
.mIterating
.mAvailableLength
> 0) {
102 return AdvanceFromLocalBuffer(aRequestedBytes
);
106 // Our local buffer is empty, so we'll have to request data from our owning
108 return mOwner
->AdvanceIteratorOrScheduleResume(*this, aRequestedBytes
,
112 bool SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes
) const {
114 return mOwner
->RemainingBytesIsNoMoreThan(*this, aBytes
);
117 //////////////////////////////////////////////////////////////////////////////
118 // SourceBuffer implementation.
119 //////////////////////////////////////////////////////////////////////////////
121 const size_t SourceBuffer::MIN_CHUNK_CAPACITY
;
122 const size_t SourceBuffer::MAX_CHUNK_CAPACITY
;
124 SourceBuffer::SourceBuffer()
125 : mMutex("image::SourceBuffer"), mConsumerCount(0), mCompacted(false) {}
127 SourceBuffer::~SourceBuffer() {
128 MOZ_ASSERT(mConsumerCount
== 0,
129 "SourceBuffer destroyed with active consumers");
132 nsresult
SourceBuffer::AppendChunk(Maybe
<Chunk
>&& aChunk
) {
133 mMutex
.AssertCurrentThreadOwns();
135 if (MOZ_UNLIKELY(!aChunk
)) {
136 return NS_ERROR_OUT_OF_MEMORY
;
139 if (MOZ_UNLIKELY(aChunk
->AllocationFailed())) {
140 return NS_ERROR_OUT_OF_MEMORY
;
143 if (MOZ_UNLIKELY(!mChunks
.AppendElement(std::move(*aChunk
), fallible
))) {
144 return NS_ERROR_OUT_OF_MEMORY
;
150 Maybe
<SourceBuffer::Chunk
> SourceBuffer::CreateChunk(
151 size_t aCapacity
, size_t aExistingCapacity
/* = 0 */,
152 bool aRoundUp
/* = true */) {
153 if (MOZ_UNLIKELY(aCapacity
== 0)) {
154 MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
158 // Round up if requested.
159 size_t finalCapacity
= aRoundUp
? RoundedUpCapacity(aCapacity
) : aCapacity
;
161 // Use the size of the SurfaceCache as an additional heuristic to avoid
162 // allocating huge buffers. Generally images do not get smaller when decoded,
163 // so if we could store the source data in the SurfaceCache, we assume that
164 // there's no way we'll be able to store the decoded version.
165 if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity
+ aExistingCapacity
))) {
167 "SourceBuffer refused to create chunk too large for SurfaceCache");
171 return Some(Chunk(finalCapacity
));
174 nsresult
SourceBuffer::Compact() {
175 mMutex
.AssertCurrentThreadOwns();
177 MOZ_ASSERT(mConsumerCount
== 0, "Should have no consumers here");
178 MOZ_ASSERT(mWaitingConsumers
.Length() == 0, "Shouldn't have waiters");
179 MOZ_ASSERT(mStatus
, "Should be complete here");
181 // If we've tried to compact once, don't attempt again.
188 // Compact our waiting consumers list, since we're complete and no future
189 // consumer will ever have to wait.
190 mWaitingConsumers
.Compact();
192 // If we have no chunks, then there's nothing to compact.
193 if (mChunks
.Length() < 1) {
197 // If we have one chunk, then we can compact if it has excess capacity.
198 if (mChunks
.Length() == 1 && mChunks
[0].Length() == mChunks
[0].Capacity()) {
202 // If the last chunk has the maximum capacity, then we know the total size
203 // will be quite large and not worth consolidating. We can likely/cheapily
204 // trim the last chunk if it is too big however.
205 size_t capacity
= mChunks
.LastElement().Capacity();
206 if (capacity
== MAX_CHUNK_CAPACITY
) {
207 size_t lastLength
= mChunks
.LastElement().Length();
208 if (lastLength
!= capacity
) {
209 mChunks
.LastElement().SetCapacity(lastLength
);
214 // We can compact our buffer. Determine the total length.
216 for (uint32_t i
= 0; i
< mChunks
.Length(); ++i
) {
217 length
+= mChunks
[i
].Length();
220 // If our total length is zero (which means ExpectLength() got called, but no
221 // data ever actually got written) then just empty our chunk list.
222 if (MOZ_UNLIKELY(length
== 0)) {
227 Chunk
& mergeChunk
= mChunks
[0];
228 if (MOZ_UNLIKELY(!mergeChunk
.SetCapacity(length
))) {
229 NS_WARNING("Failed to reallocate chunk for SourceBuffer compacting - OOM?");
233 // Copy our old chunks into the newly reallocated first chunk.
234 for (uint32_t i
= 1; i
< mChunks
.Length(); ++i
) {
235 size_t offset
= mergeChunk
.Length();
236 MOZ_ASSERT(offset
< mergeChunk
.Capacity());
237 MOZ_ASSERT(offset
+ mChunks
[i
].Length() <= mergeChunk
.Capacity());
239 memcpy(mergeChunk
.Data() + offset
, mChunks
[i
].Data(), mChunks
[i
].Length());
240 mergeChunk
.AddLength(mChunks
[i
].Length());
243 MOZ_ASSERT(mergeChunk
.Length() == mergeChunk
.Capacity(),
244 "Compacted chunk has slack space");
246 // Remove the redundant chunks.
247 mChunks
.RemoveLastElements(mChunks
.Length() - 1);
254 size_t SourceBuffer::RoundedUpCapacity(size_t aCapacity
) {
255 // Protect against overflow.
256 if (MOZ_UNLIKELY(SIZE_MAX
- aCapacity
< MIN_CHUNK_CAPACITY
)) {
260 // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
262 size_t roundedCapacity
=
263 (aCapacity
+ MIN_CHUNK_CAPACITY
- 1) & ~(MIN_CHUNK_CAPACITY
- 1);
264 MOZ_ASSERT(roundedCapacity
>= aCapacity
, "Bad math?");
265 MOZ_ASSERT(roundedCapacity
- aCapacity
< MIN_CHUNK_CAPACITY
, "Bad math?");
267 return roundedCapacity
;
270 size_t SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity
) {
271 mMutex
.AssertCurrentThreadOwns();
273 // We grow the source buffer using a Fibonacci growth rate. It will be capped
274 // at MAX_CHUNK_CAPACITY, unless the available data exceeds that.
276 size_t length
= mChunks
.Length();
278 if (length
== 0 || aMinCapacity
> MAX_CHUNK_CAPACITY
) {
283 return min(max(2 * mChunks
[0].Capacity(), aMinCapacity
),
288 max(mChunks
[length
- 1].Capacity() + mChunks
[length
- 2].Capacity(),
293 void SourceBuffer::AddWaitingConsumer(IResumable
* aConsumer
) {
294 mMutex
.AssertCurrentThreadOwns();
296 MOZ_ASSERT(!mStatus
, "Waiting when we're complete?");
299 mWaitingConsumers
.AppendElement(aConsumer
);
303 void SourceBuffer::ResumeWaitingConsumers() {
304 mMutex
.AssertCurrentThreadOwns();
306 if (mWaitingConsumers
.Length() == 0) {
310 for (uint32_t i
= 0; i
< mWaitingConsumers
.Length(); ++i
) {
311 mWaitingConsumers
[i
]->Resume();
314 mWaitingConsumers
.Clear();
317 nsresult
SourceBuffer::ExpectLength(size_t aExpectedLength
) {
318 MOZ_ASSERT(aExpectedLength
> 0, "Zero expected size?");
320 MutexAutoLock
lock(mMutex
);
322 if (MOZ_UNLIKELY(mStatus
)) {
323 MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
327 if (MOZ_UNLIKELY(mChunks
.Length() > 0)) {
328 MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
332 if (MOZ_UNLIKELY(!SurfaceCache::CanHold(aExpectedLength
))) {
333 NS_WARNING("SourceBuffer refused to store too large buffer");
334 return HandleError(NS_ERROR_INVALID_ARG
);
337 size_t length
= min(aExpectedLength
, MAX_CHUNK_CAPACITY
);
338 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(length
,
339 /* aExistingCapacity */ 0,
340 /* aRoundUp */ false))))) {
341 return HandleError(NS_ERROR_OUT_OF_MEMORY
);
347 nsresult
SourceBuffer::Append(const char* aData
, size_t aLength
) {
348 MOZ_ASSERT(aData
, "Should have a buffer");
349 MOZ_ASSERT(aLength
> 0, "Writing a zero-sized chunk");
351 size_t currentChunkCapacity
= 0;
352 size_t currentChunkLength
= 0;
353 char* currentChunkData
= nullptr;
354 size_t currentChunkRemaining
= 0;
355 size_t forCurrentChunk
= 0;
356 size_t forNextChunk
= 0;
357 size_t nextChunkCapacity
= 0;
358 size_t totalCapacity
= 0;
361 MutexAutoLock
lock(mMutex
);
363 if (MOZ_UNLIKELY(mStatus
)) {
364 // This SourceBuffer is already complete; ignore further data.
365 return NS_ERROR_FAILURE
;
368 if (MOZ_UNLIKELY(mChunks
.Length() == 0)) {
369 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength
))))) {
370 return HandleError(NS_ERROR_OUT_OF_MEMORY
);
374 // Copy out the current chunk's information so we can release the lock.
375 // Note that this wouldn't be safe if multiple producers were allowed!
376 Chunk
& currentChunk
= mChunks
.LastElement();
377 currentChunkCapacity
= currentChunk
.Capacity();
378 currentChunkLength
= currentChunk
.Length();
379 currentChunkData
= currentChunk
.Data();
381 // Partition this data between the current chunk and the next chunk.
382 // (Because we always allocate a chunk big enough to fit everything passed
383 // to Append, we'll never need more than those two chunks to store
385 currentChunkRemaining
= currentChunkCapacity
- currentChunkLength
;
386 forCurrentChunk
= min(aLength
, currentChunkRemaining
);
387 forNextChunk
= aLength
- forCurrentChunk
;
389 // If we'll need another chunk, determine what its capacity should be while
390 // we still hold the lock.
392 forNextChunk
> 0 ? FibonacciCapacityWithMinimum(forNextChunk
) : 0;
394 for (uint32_t i
= 0; i
< mChunks
.Length(); ++i
) {
395 totalCapacity
+= mChunks
[i
].Capacity();
399 // Write everything we can fit into the current chunk.
400 MOZ_ASSERT(currentChunkLength
+ forCurrentChunk
<= currentChunkCapacity
);
401 memcpy(currentChunkData
+ currentChunkLength
, aData
, forCurrentChunk
);
403 // If there's something left, create a new chunk and write it there.
404 Maybe
<Chunk
> nextChunk
;
405 if (forNextChunk
> 0) {
406 MOZ_ASSERT(nextChunkCapacity
>= forNextChunk
, "Next chunk too small?");
407 nextChunk
= CreateChunk(nextChunkCapacity
, totalCapacity
);
408 if (MOZ_LIKELY(nextChunk
&& !nextChunk
->AllocationFailed())) {
409 memcpy(nextChunk
->Data(), aData
+ forCurrentChunk
, forNextChunk
);
410 nextChunk
->AddLength(forNextChunk
);
414 // Update shared data structures.
416 MutexAutoLock
lock(mMutex
);
418 // Update the length of the current chunk.
419 Chunk
& currentChunk
= mChunks
.LastElement();
420 MOZ_ASSERT(currentChunk
.Data() == currentChunkData
, "Multiple producers?");
421 MOZ_ASSERT(currentChunk
.Length() == currentChunkLength
,
422 "Multiple producers?");
424 currentChunk
.AddLength(forCurrentChunk
);
426 // If we created a new chunk, add it to the series.
427 if (forNextChunk
> 0) {
428 if (MOZ_UNLIKELY(!nextChunk
)) {
429 return HandleError(NS_ERROR_OUT_OF_MEMORY
);
432 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(std::move(nextChunk
))))) {
433 return HandleError(NS_ERROR_OUT_OF_MEMORY
);
437 // Resume any waiting readers now that there's new data.
438 ResumeWaitingConsumers();
444 static nsresult
AppendToSourceBuffer(nsIInputStream
*, void* aClosure
,
445 const char* aFromRawSegment
, uint32_t,
446 uint32_t aCount
, uint32_t* aWriteCount
) {
447 SourceBuffer
* sourceBuffer
= static_cast<SourceBuffer
*>(aClosure
);
449 // Copy the source data. Unless we hit OOM, we squelch the return value here,
450 // because returning an error means that ReadSegments stops reading data, and
451 // we want to ensure that we read everything we get. If we hit OOM then we
452 // return a failed status to the caller.
453 nsresult rv
= sourceBuffer
->Append(aFromRawSegment
, aCount
);
454 if (rv
== NS_ERROR_OUT_OF_MEMORY
) {
458 // Report that we wrote everything we got.
459 *aWriteCount
= aCount
;
464 nsresult
SourceBuffer::AppendFromInputStream(nsIInputStream
* aInputStream
,
467 nsresult rv
= aInputStream
->ReadSegments(AppendToSourceBuffer
, this, aCount
,
469 if (NS_WARN_IF(NS_FAILED(rv
))) {
473 if (bytesRead
== 0) {
474 // The loading of the image has been canceled.
475 return NS_ERROR_FAILURE
;
478 if (bytesRead
!= aCount
) {
479 // Only some of the given data was read. We may have failed in
480 // SourceBuffer::Append but ReadSegments swallowed the error. Otherwise the
481 // stream itself failed to yield the data.
482 MutexAutoLock
lock(mMutex
);
484 MOZ_ASSERT(NS_FAILED(*mStatus
));
488 MOZ_ASSERT_UNREACHABLE("AppendToSourceBuffer should consume everything");
494 void SourceBuffer::Complete(nsresult aStatus
) {
495 MutexAutoLock
lock(mMutex
);
497 // When an error occurs internally (e.g. due to an OOM), we save the status.
498 // This will indirectly trigger a failure higher up and that will call
499 // SourceBuffer::Complete. Since it doesn't necessarily know we are already
500 // complete, it is safe to ignore.
501 if (mStatus
&& (MOZ_UNLIKELY(NS_SUCCEEDED(*mStatus
) ||
502 aStatus
!= NS_IMAGELIB_ERROR_FAILURE
))) {
503 MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
507 if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus
) && IsEmpty())) {
508 // It's illegal to succeed without writing anything.
509 aStatus
= NS_ERROR_FAILURE
;
512 mStatus
= Some(aStatus
);
514 // Resume any waiting consumers now that we're complete.
515 ResumeWaitingConsumers();
517 // If we still have active consumers, just return.
518 if (mConsumerCount
> 0) {
522 // Attempt to compact our buffer down to a single chunk.
526 bool SourceBuffer::IsComplete() {
527 MutexAutoLock
lock(mMutex
);
528 return bool(mStatus
);
531 size_t SourceBuffer::SizeOfIncludingThisWithComputedFallback(
532 MallocSizeOf aMallocSizeOf
) const {
533 MutexAutoLock
lock(mMutex
);
535 size_t n
= aMallocSizeOf(this);
536 n
+= mChunks
.ShallowSizeOfExcludingThis(aMallocSizeOf
);
538 for (uint32_t i
= 0; i
< mChunks
.Length(); ++i
) {
539 size_t chunkSize
= aMallocSizeOf(mChunks
[i
].Data());
541 if (chunkSize
== 0) {
542 // We're on a platform where moz_malloc_size_of always returns 0.
543 chunkSize
= mChunks
[i
].Capacity();
552 SourceBufferIterator
SourceBuffer::Iterator(size_t aReadLength
) {
554 MutexAutoLock
lock(mMutex
);
558 return SourceBufferIterator(this, aReadLength
);
561 void SourceBuffer::OnIteratorRelease() {
562 MutexAutoLock
lock(mMutex
);
564 MOZ_ASSERT(mConsumerCount
> 0, "Consumer count doesn't add up");
567 // If we still have active consumers, or we're not complete yet, then return.
568 if (mConsumerCount
> 0 || !mStatus
) {
572 // Attempt to compact our buffer down to a single chunk.
576 bool SourceBuffer::RemainingBytesIsNoMoreThan(
577 const SourceBufferIterator
& aIterator
, size_t aBytes
) const {
578 MutexAutoLock
lock(mMutex
);
580 // If we're not complete, we always say no.
585 // If the iterator's at the end, the answer is trivial.
586 if (!aIterator
.HasMore()) {
590 uint32_t iteratorChunk
= aIterator
.mData
.mIterating
.mChunk
;
591 size_t iteratorOffset
= aIterator
.mData
.mIterating
.mOffset
;
592 size_t iteratorLength
= aIterator
.mData
.mIterating
.mAvailableLength
;
594 // Include the bytes the iterator is currently pointing to in the limit, so
595 // that the current chunk doesn't have to be a special case.
596 size_t bytes
= aBytes
+ iteratorOffset
+ iteratorLength
;
598 // Count the length over all of our chunks, starting with the one that the
599 // iterator is currently pointing to. (This is O(N), but N is expected to be
600 // ~1, so it doesn't seem worth caching the length separately.)
601 size_t lengthSoFar
= 0;
602 for (uint32_t i
= iteratorChunk
; i
< mChunks
.Length(); ++i
) {
603 lengthSoFar
+= mChunks
[i
].Length();
604 if (lengthSoFar
> bytes
) {
612 SourceBufferIterator::State
SourceBuffer::AdvanceIteratorOrScheduleResume(
613 SourceBufferIterator
& aIterator
, size_t aRequestedBytes
,
614 IResumable
* aConsumer
) {
615 MutexAutoLock
lock(mMutex
);
617 MOZ_ASSERT(aIterator
.HasMore(),
618 "Advancing a completed iterator and "
619 "AdvanceOrScheduleResume didn't catch it");
621 if (MOZ_UNLIKELY(mStatus
&& NS_FAILED(*mStatus
))) {
622 // This SourceBuffer is complete due to an error; all reads fail.
623 return aIterator
.SetComplete(*mStatus
);
626 if (MOZ_UNLIKELY(mChunks
.Length() == 0)) {
627 // We haven't gotten an initial chunk yet.
628 AddWaitingConsumer(aConsumer
);
629 return aIterator
.SetWaiting(!!aConsumer
);
632 uint32_t iteratorChunkIdx
= aIterator
.mData
.mIterating
.mChunk
;
633 MOZ_ASSERT(iteratorChunkIdx
< mChunks
.Length());
635 const Chunk
& currentChunk
= mChunks
[iteratorChunkIdx
];
636 size_t iteratorEnd
= aIterator
.mData
.mIterating
.mOffset
+
637 aIterator
.mData
.mIterating
.mAvailableLength
;
638 MOZ_ASSERT(iteratorEnd
<= currentChunk
.Length());
639 MOZ_ASSERT(iteratorEnd
<= currentChunk
.Capacity());
641 if (iteratorEnd
< currentChunk
.Length()) {
642 // There's more data in the current chunk.
643 return aIterator
.SetReady(iteratorChunkIdx
, currentChunk
.Data(),
644 iteratorEnd
, currentChunk
.Length() - iteratorEnd
,
648 if (iteratorEnd
== currentChunk
.Capacity() &&
649 !IsLastChunk(iteratorChunkIdx
)) {
650 // Advance to the next chunk.
651 const Chunk
& nextChunk
= mChunks
[iteratorChunkIdx
+ 1];
652 return aIterator
.SetReady(iteratorChunkIdx
+ 1, nextChunk
.Data(), 0,
653 nextChunk
.Length(), aRequestedBytes
);
656 MOZ_ASSERT(IsLastChunk(iteratorChunkIdx
), "Should've advanced");
659 // There's no more data and this SourceBuffer completed successfully.
660 MOZ_ASSERT(NS_SUCCEEDED(*mStatus
), "Handled failures earlier");
661 return aIterator
.SetComplete(*mStatus
);
664 // We're not complete, but there's no more data right now. Arrange to wake up
665 // the consumer when we get more data.
666 AddWaitingConsumer(aConsumer
);
667 return aIterator
.SetWaiting(!!aConsumer
);
670 nsresult
SourceBuffer::HandleError(nsresult aError
) {
671 MOZ_ASSERT(NS_FAILED(aError
), "Should have an error here");
672 MOZ_ASSERT(aError
== NS_ERROR_OUT_OF_MEMORY
|| aError
== NS_ERROR_INVALID_ARG
,
673 "Unexpected error; may want to notify waiting readers, which "
674 "HandleError currently doesn't do");
676 mMutex
.AssertCurrentThreadOwns();
678 NS_WARNING("SourceBuffer encountered an unrecoverable error");
681 mStatus
= Some(aError
);
683 // Drop our references to waiting readers.
684 mWaitingConsumers
.Clear();
689 bool SourceBuffer::IsEmpty() {
690 mMutex
.AssertCurrentThreadOwns();
691 return mChunks
.Length() == 0 || mChunks
[0].Length() == 0;
694 bool SourceBuffer::IsLastChunk(uint32_t aChunk
) {
695 mMutex
.AssertCurrentThreadOwns();
696 return aChunk
+ 1 == mChunks
.Length();
700 } // namespace mozilla