Bug 1492664 - update funsize scripts to use TASKCLUSTER_ROOT_URL; r=sfraser
[gecko.git] / image / SourceBuffer.cpp
blobac32c92b5882f809d2bce9f285c5d139b72cf865
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "SourceBuffer.h"
8 #include <algorithm>
9 #include <cmath>
10 #include <cstring>
11 #include "mozilla/Likely.h"
12 #include "nsIInputStream.h"
13 #include "MainThreadUtils.h"
14 #include "SurfaceCache.h"
16 using std::max;
17 using std::min;
19 namespace mozilla {
20 namespace image {
22 //////////////////////////////////////////////////////////////////////////////
23 // SourceBufferIterator implementation.
24 //////////////////////////////////////////////////////////////////////////////
26 SourceBufferIterator::~SourceBufferIterator() {
27 if (mOwner) {
28 mOwner->OnIteratorRelease();
32 SourceBufferIterator& SourceBufferIterator::operator=(
33 SourceBufferIterator&& aOther) {
34 if (mOwner) {
35 mOwner->OnIteratorRelease();
38 mOwner = std::move(aOther.mOwner);
39 mState = aOther.mState;
40 mData = aOther.mData;
41 mChunkCount = aOther.mChunkCount;
42 mByteCount = aOther.mByteCount;
43 mRemainderToRead = aOther.mRemainderToRead;
45 return *this;
48 SourceBufferIterator::State SourceBufferIterator::AdvanceOrScheduleResume(
49 size_t aRequestedBytes, IResumable* aConsumer) {
50 MOZ_ASSERT(mOwner);
52 if (MOZ_UNLIKELY(!HasMore())) {
53 MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
54 return COMPLETE;
57 // The range of data [mOffset, mOffset + mNextReadLength) has just been read
58 // by the caller (or at least they don't have any interest in it), so consume
59 // that data.
60 MOZ_ASSERT(mData.mIterating.mNextReadLength <=
61 mData.mIterating.mAvailableLength);
62 mData.mIterating.mOffset += mData.mIterating.mNextReadLength;
63 mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength;
65 // An iterator can have a limit imposed on it to read only a subset of a
66 // source buffer. If it is present, we need to mimic the same behaviour as
67 // the owning SourceBuffer.
68 if (MOZ_UNLIKELY(mRemainderToRead != SIZE_MAX)) {
69 MOZ_ASSERT(mData.mIterating.mNextReadLength <= mRemainderToRead);
70 mRemainderToRead -= mData.mIterating.mNextReadLength;
72 if (MOZ_UNLIKELY(mRemainderToRead == 0)) {
73 mData.mIterating.mNextReadLength = 0;
74 SetComplete(NS_OK);
75 return COMPLETE;
78 if (MOZ_UNLIKELY(aRequestedBytes > mRemainderToRead)) {
79 aRequestedBytes = mRemainderToRead;
83 mData.mIterating.mNextReadLength = 0;
85 if (MOZ_LIKELY(mState == READY)) {
86 // If the caller wants zero bytes of data, that's easy enough; we just
87 // configured ourselves for a zero-byte read above! In theory we could do
88 // this even in the START state, but it's not important for performance and
89 // breaking the ability of callers to assert that the pointer returned by
90 // Data() is non-null doesn't seem worth it.
91 if (aRequestedBytes == 0) {
92 MOZ_ASSERT(mData.mIterating.mNextReadLength == 0);
93 return READY;
96 // Try to satisfy the request out of our local buffer. This is potentially
97 // much faster than requesting data from our owning SourceBuffer because we
98 // don't have to take the lock. Note that if we have anything at all in our
99 // local buffer, we use it to satisfy the request; @aRequestedBytes is just
100 // the *maximum* number of bytes we can return.
101 if (mData.mIterating.mAvailableLength > 0) {
102 return AdvanceFromLocalBuffer(aRequestedBytes);
106 // Our local buffer is empty, so we'll have to request data from our owning
107 // SourceBuffer.
108 return mOwner->AdvanceIteratorOrScheduleResume(*this, aRequestedBytes,
109 aConsumer);
112 bool SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const {
113 MOZ_ASSERT(mOwner);
114 return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
117 //////////////////////////////////////////////////////////////////////////////
118 // SourceBuffer implementation.
119 //////////////////////////////////////////////////////////////////////////////
121 const size_t SourceBuffer::MIN_CHUNK_CAPACITY;
122 const size_t SourceBuffer::MAX_CHUNK_CAPACITY;
124 SourceBuffer::SourceBuffer()
125 : mMutex("image::SourceBuffer"), mConsumerCount(0), mCompacted(false) {}
127 SourceBuffer::~SourceBuffer() {
128 MOZ_ASSERT(mConsumerCount == 0,
129 "SourceBuffer destroyed with active consumers");
132 nsresult SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk) {
133 mMutex.AssertCurrentThreadOwns();
135 #ifdef DEBUG
136 if (mChunks.Length() > 0) {
137 NS_WARNING("Appending an extra chunk for SourceBuffer");
139 #endif
141 if (MOZ_UNLIKELY(!aChunk)) {
142 return NS_ERROR_OUT_OF_MEMORY;
145 if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
146 return NS_ERROR_OUT_OF_MEMORY;
149 if (MOZ_UNLIKELY(!mChunks.AppendElement(std::move(*aChunk), fallible))) {
150 return NS_ERROR_OUT_OF_MEMORY;
153 return NS_OK;
156 Maybe<SourceBuffer::Chunk> SourceBuffer::CreateChunk(
157 size_t aCapacity, size_t aExistingCapacity /* = 0 */,
158 bool aRoundUp /* = true */) {
159 if (MOZ_UNLIKELY(aCapacity == 0)) {
160 MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
161 return Nothing();
164 // Round up if requested.
165 size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity) : aCapacity;
167 // Use the size of the SurfaceCache as an additional heuristic to avoid
168 // allocating huge buffers. Generally images do not get smaller when decoded,
169 // so if we could store the source data in the SurfaceCache, we assume that
170 // there's no way we'll be able to store the decoded version.
171 if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity + aExistingCapacity))) {
172 NS_WARNING(
173 "SourceBuffer refused to create chunk too large for SurfaceCache");
174 return Nothing();
177 return Some(Chunk(finalCapacity));
180 nsresult SourceBuffer::Compact() {
181 mMutex.AssertCurrentThreadOwns();
183 MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here");
184 MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters");
185 MOZ_ASSERT(mStatus, "Should be complete here");
187 // If we've tried to compact once, don't attempt again.
188 if (mCompacted) {
189 return NS_OK;
192 mCompacted = true;
194 // Compact our waiting consumers list, since we're complete and no future
195 // consumer will ever have to wait.
196 mWaitingConsumers.Compact();
198 // If we have no chunks, then there's nothing to compact.
199 if (mChunks.Length() < 1) {
200 return NS_OK;
203 // If we have one chunk, then we can compact if it has excess capacity.
204 if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) {
205 return NS_OK;
208 // If the last chunk has the maximum capacity, then we know the total size
209 // will be quite large and not worth consolidating. We can likely/cheapily
210 // trim the last chunk if it is too big however.
211 size_t capacity = mChunks.LastElement().Capacity();
212 if (capacity == MAX_CHUNK_CAPACITY) {
213 size_t lastLength = mChunks.LastElement().Length();
214 if (lastLength != capacity) {
215 mChunks.LastElement().SetCapacity(lastLength);
217 return NS_OK;
220 // We can compact our buffer. Determine the total length.
221 size_t length = 0;
222 for (uint32_t i = 0; i < mChunks.Length(); ++i) {
223 length += mChunks[i].Length();
226 // If our total length is zero (which means ExpectLength() got called, but no
227 // data ever actually got written) then just empty our chunk list.
228 if (MOZ_UNLIKELY(length == 0)) {
229 mChunks.Clear();
230 return NS_OK;
233 Chunk& mergeChunk = mChunks[0];
234 if (MOZ_UNLIKELY(!mergeChunk.SetCapacity(length))) {
235 NS_WARNING("Failed to reallocate chunk for SourceBuffer compacting - OOM?");
236 return NS_OK;
239 // Copy our old chunks into the newly reallocated first chunk.
240 for (uint32_t i = 1; i < mChunks.Length(); ++i) {
241 size_t offset = mergeChunk.Length();
242 MOZ_ASSERT(offset < mergeChunk.Capacity());
243 MOZ_ASSERT(offset + mChunks[i].Length() <= mergeChunk.Capacity());
245 memcpy(mergeChunk.Data() + offset, mChunks[i].Data(), mChunks[i].Length());
246 mergeChunk.AddLength(mChunks[i].Length());
249 MOZ_ASSERT(mergeChunk.Length() == mergeChunk.Capacity(),
250 "Compacted chunk has slack space");
252 // Remove the redundant chunks.
253 mChunks.RemoveElementsAt(1, mChunks.Length() - 1);
254 mChunks.Compact();
256 return NS_OK;
259 /* static */ size_t SourceBuffer::RoundedUpCapacity(size_t aCapacity) {
260 // Protect against overflow.
261 if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
262 return aCapacity;
265 // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
266 // size of a page).
267 size_t roundedCapacity =
268 (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
269 MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
270 MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
272 return roundedCapacity;
275 size_t SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity) {
276 mMutex.AssertCurrentThreadOwns();
278 // We grow the source buffer using a Fibonacci growth rate. It will be capped
279 // at MAX_CHUNK_CAPACITY, unless the available data exceeds that.
281 size_t length = mChunks.Length();
283 if (length == 0 || aMinCapacity > MAX_CHUNK_CAPACITY) {
284 return aMinCapacity;
287 if (length == 1) {
288 return min(max(2 * mChunks[0].Capacity(), aMinCapacity),
289 MAX_CHUNK_CAPACITY);
292 return min(
293 max(mChunks[length - 1].Capacity() + mChunks[length - 2].Capacity(),
294 aMinCapacity),
295 MAX_CHUNK_CAPACITY);
298 void SourceBuffer::AddWaitingConsumer(IResumable* aConsumer) {
299 mMutex.AssertCurrentThreadOwns();
301 MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
303 if (aConsumer) {
304 mWaitingConsumers.AppendElement(aConsumer);
308 void SourceBuffer::ResumeWaitingConsumers() {
309 mMutex.AssertCurrentThreadOwns();
311 if (mWaitingConsumers.Length() == 0) {
312 return;
315 for (uint32_t i = 0; i < mWaitingConsumers.Length(); ++i) {
316 mWaitingConsumers[i]->Resume();
319 mWaitingConsumers.Clear();
322 nsresult SourceBuffer::ExpectLength(size_t aExpectedLength) {
323 MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
325 MutexAutoLock lock(mMutex);
327 if (MOZ_UNLIKELY(mStatus)) {
328 MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
329 return NS_OK;
332 if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
333 MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
334 return NS_OK;
337 if (MOZ_UNLIKELY(!SurfaceCache::CanHold(aExpectedLength))) {
338 NS_WARNING("SourceBuffer refused to store too large buffer");
339 return HandleError(NS_ERROR_INVALID_ARG);
342 size_t length = min(aExpectedLength, MAX_CHUNK_CAPACITY);
343 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(length,
344 /* aExistingCapacity */ 0,
345 /* aRoundUp */ false))))) {
346 return HandleError(NS_ERROR_OUT_OF_MEMORY);
349 return NS_OK;
352 nsresult SourceBuffer::Append(const char* aData, size_t aLength) {
353 MOZ_ASSERT(aData, "Should have a buffer");
354 MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
356 size_t currentChunkCapacity = 0;
357 size_t currentChunkLength = 0;
358 char* currentChunkData = nullptr;
359 size_t currentChunkRemaining = 0;
360 size_t forCurrentChunk = 0;
361 size_t forNextChunk = 0;
362 size_t nextChunkCapacity = 0;
363 size_t totalCapacity = 0;
366 MutexAutoLock lock(mMutex);
368 if (MOZ_UNLIKELY(mStatus)) {
369 // This SourceBuffer is already complete; ignore further data.
370 return NS_ERROR_FAILURE;
373 if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
374 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
375 return HandleError(NS_ERROR_OUT_OF_MEMORY);
379 // Copy out the current chunk's information so we can release the lock.
380 // Note that this wouldn't be safe if multiple producers were allowed!
381 Chunk& currentChunk = mChunks.LastElement();
382 currentChunkCapacity = currentChunk.Capacity();
383 currentChunkLength = currentChunk.Length();
384 currentChunkData = currentChunk.Data();
386 // Partition this data between the current chunk and the next chunk.
387 // (Because we always allocate a chunk big enough to fit everything passed
388 // to Append, we'll never need more than those two chunks to store
389 // everything.)
390 currentChunkRemaining = currentChunkCapacity - currentChunkLength;
391 forCurrentChunk = min(aLength, currentChunkRemaining);
392 forNextChunk = aLength - forCurrentChunk;
394 // If we'll need another chunk, determine what its capacity should be while
395 // we still hold the lock.
396 nextChunkCapacity =
397 forNextChunk > 0 ? FibonacciCapacityWithMinimum(forNextChunk) : 0;
399 for (uint32_t i = 0; i < mChunks.Length(); ++i) {
400 totalCapacity += mChunks[i].Capacity();
404 // Write everything we can fit into the current chunk.
405 MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
406 memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
408 // If there's something left, create a new chunk and write it there.
409 Maybe<Chunk> nextChunk;
410 if (forNextChunk > 0) {
411 MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
412 nextChunk = CreateChunk(nextChunkCapacity, totalCapacity);
413 if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
414 memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
415 nextChunk->AddLength(forNextChunk);
419 // Update shared data structures.
421 MutexAutoLock lock(mMutex);
423 // Update the length of the current chunk.
424 Chunk& currentChunk = mChunks.LastElement();
425 MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
426 MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
427 "Multiple producers?");
429 currentChunk.AddLength(forCurrentChunk);
431 // If we created a new chunk, add it to the series.
432 if (forNextChunk > 0) {
433 if (MOZ_UNLIKELY(!nextChunk)) {
434 return HandleError(NS_ERROR_OUT_OF_MEMORY);
437 if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(std::move(nextChunk))))) {
438 return HandleError(NS_ERROR_OUT_OF_MEMORY);
442 // Resume any waiting readers now that there's new data.
443 ResumeWaitingConsumers();
446 return NS_OK;
449 static nsresult AppendToSourceBuffer(nsIInputStream*, void* aClosure,
450 const char* aFromRawSegment, uint32_t,
451 uint32_t aCount, uint32_t* aWriteCount) {
452 SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure);
454 // Copy the source data. Unless we hit OOM, we squelch the return value here,
455 // because returning an error means that ReadSegments stops reading data, and
456 // we want to ensure that we read everything we get. If we hit OOM then we
457 // return a failed status to the caller.
458 nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount);
459 if (rv == NS_ERROR_OUT_OF_MEMORY) {
460 return rv;
463 // Report that we wrote everything we got.
464 *aWriteCount = aCount;
466 return NS_OK;
469 nsresult SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream,
470 uint32_t aCount) {
471 uint32_t bytesRead;
472 nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this, aCount,
473 &bytesRead);
474 if (NS_WARN_IF(NS_FAILED(rv))) {
475 return rv;
478 if (bytesRead == 0) {
479 // The loading of the image has been canceled.
480 return NS_ERROR_FAILURE;
483 if (bytesRead != aCount) {
484 // Only some of the given data was read. We may have failed in
485 // SourceBuffer::Append but ReadSegments swallowed the error. Otherwise the
486 // stream itself failed to yield the data.
487 MutexAutoLock lock(mMutex);
488 if (mStatus) {
489 MOZ_ASSERT(NS_FAILED(*mStatus));
490 return *mStatus;
493 MOZ_ASSERT_UNREACHABLE("AppendToSourceBuffer should consume everything");
496 return rv;
499 void SourceBuffer::Complete(nsresult aStatus) {
500 MutexAutoLock lock(mMutex);
502 // When an error occurs internally (e.g. due to an OOM), we save the status.
503 // This will indirectly trigger a failure higher up and that will call
504 // SourceBuffer::Complete. Since it doesn't necessarily know we are already
505 // complete, it is safe to ignore.
506 if (mStatus && (MOZ_UNLIKELY(NS_SUCCEEDED(*mStatus) ||
507 aStatus != NS_IMAGELIB_ERROR_FAILURE))) {
508 MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
509 return;
512 if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
513 // It's illegal to succeed without writing anything.
514 aStatus = NS_ERROR_FAILURE;
517 mStatus = Some(aStatus);
519 // Resume any waiting consumers now that we're complete.
520 ResumeWaitingConsumers();
522 // If we still have active consumers, just return.
523 if (mConsumerCount > 0) {
524 return;
527 // Attempt to compact our buffer down to a single chunk.
528 Compact();
531 bool SourceBuffer::IsComplete() {
532 MutexAutoLock lock(mMutex);
533 return bool(mStatus);
536 size_t SourceBuffer::SizeOfIncludingThisWithComputedFallback(
537 MallocSizeOf aMallocSizeOf) const {
538 MutexAutoLock lock(mMutex);
540 size_t n = aMallocSizeOf(this);
541 n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
543 for (uint32_t i = 0; i < mChunks.Length(); ++i) {
544 size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
546 if (chunkSize == 0) {
547 // We're on a platform where moz_malloc_size_of always returns 0.
548 chunkSize = mChunks[i].Capacity();
551 n += chunkSize;
554 return n;
557 SourceBufferIterator SourceBuffer::Iterator(size_t aReadLength) {
559 MutexAutoLock lock(mMutex);
560 mConsumerCount++;
563 return SourceBufferIterator(this, aReadLength);
566 void SourceBuffer::OnIteratorRelease() {
567 MutexAutoLock lock(mMutex);
569 MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up");
570 mConsumerCount--;
572 // If we still have active consumers, or we're not complete yet, then return.
573 if (mConsumerCount > 0 || !mStatus) {
574 return;
577 // Attempt to compact our buffer down to a single chunk.
578 Compact();
581 bool SourceBuffer::RemainingBytesIsNoMoreThan(
582 const SourceBufferIterator& aIterator, size_t aBytes) const {
583 MutexAutoLock lock(mMutex);
585 // If we're not complete, we always say no.
586 if (!mStatus) {
587 return false;
590 // If the iterator's at the end, the answer is trivial.
591 if (!aIterator.HasMore()) {
592 return true;
595 uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
596 size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
597 size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength;
599 // Include the bytes the iterator is currently pointing to in the limit, so
600 // that the current chunk doesn't have to be a special case.
601 size_t bytes = aBytes + iteratorOffset + iteratorLength;
603 // Count the length over all of our chunks, starting with the one that the
604 // iterator is currently pointing to. (This is O(N), but N is expected to be
605 // ~1, so it doesn't seem worth caching the length separately.)
606 size_t lengthSoFar = 0;
607 for (uint32_t i = iteratorChunk; i < mChunks.Length(); ++i) {
608 lengthSoFar += mChunks[i].Length();
609 if (lengthSoFar > bytes) {
610 return false;
614 return true;
617 SourceBufferIterator::State SourceBuffer::AdvanceIteratorOrScheduleResume(
618 SourceBufferIterator& aIterator, size_t aRequestedBytes,
619 IResumable* aConsumer) {
620 MutexAutoLock lock(mMutex);
622 MOZ_ASSERT(aIterator.HasMore(),
623 "Advancing a completed iterator and "
624 "AdvanceOrScheduleResume didn't catch it");
626 if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
627 // This SourceBuffer is complete due to an error; all reads fail.
628 return aIterator.SetComplete(*mStatus);
631 if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
632 // We haven't gotten an initial chunk yet.
633 AddWaitingConsumer(aConsumer);
634 return aIterator.SetWaiting(!!aConsumer);
637 uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
638 MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
640 const Chunk& currentChunk = mChunks[iteratorChunkIdx];
641 size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
642 aIterator.mData.mIterating.mAvailableLength;
643 MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
644 MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
646 if (iteratorEnd < currentChunk.Length()) {
647 // There's more data in the current chunk.
648 return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
649 iteratorEnd, currentChunk.Length() - iteratorEnd,
650 aRequestedBytes);
653 if (iteratorEnd == currentChunk.Capacity() &&
654 !IsLastChunk(iteratorChunkIdx)) {
655 // Advance to the next chunk.
656 const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
657 return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
658 nextChunk.Length(), aRequestedBytes);
661 MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
663 if (mStatus) {
664 // There's no more data and this SourceBuffer completed successfully.
665 MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
666 return aIterator.SetComplete(*mStatus);
669 // We're not complete, but there's no more data right now. Arrange to wake up
670 // the consumer when we get more data.
671 AddWaitingConsumer(aConsumer);
672 return aIterator.SetWaiting(!!aConsumer);
675 nsresult SourceBuffer::HandleError(nsresult aError) {
676 MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
677 MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY || aError == NS_ERROR_INVALID_ARG,
678 "Unexpected error; may want to notify waiting readers, which "
679 "HandleError currently doesn't do");
681 mMutex.AssertCurrentThreadOwns();
683 NS_WARNING("SourceBuffer encountered an unrecoverable error");
685 // Record the error.
686 mStatus = Some(aError);
688 // Drop our references to waiting readers.
689 mWaitingConsumers.Clear();
691 return *mStatus;
694 bool SourceBuffer::IsEmpty() {
695 mMutex.AssertCurrentThreadOwns();
696 return mChunks.Length() == 0 || mChunks[0].Length() == 0;
699 bool SourceBuffer::IsLastChunk(uint32_t aChunk) {
700 mMutex.AssertCurrentThreadOwns();
701 return aChunk + 1 == mChunks.Length();
704 } // namespace image
705 } // namespace mozilla