Bug 1572460 - Refactor `selection` out of the `InspectorFront`. r=yulia
[gecko.git] / image / SourceBuffer.h
blob7af37241620bc613fed190a9f556a34a50b01d6a
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /**
7 * SourceBuffer is a single producer, multiple consumer data structure used for
8 * storing image source (compressed) data.
9 */
11 #ifndef mozilla_image_sourcebuffer_h
12 #define mozilla_image_sourcebuffer_h
14 #include <algorithm>
15 #include "mozilla/Maybe.h"
16 #include "mozilla/MemoryReporting.h"
17 #include "mozilla/Mutex.h"
18 #include "mozilla/Move.h"
19 #include "mozilla/MemoryReporting.h"
20 #include "mozilla/RefPtr.h"
21 #include "mozilla/RefCounted.h"
22 #include "mozilla/UniquePtr.h"
23 #include "mozilla/RefPtr.h"
24 #include "nsTArray.h"
26 class nsIInputStream;
28 namespace mozilla {
29 namespace image {
31 class SourceBuffer;
33 /**
34 * IResumable is an interface for classes that can schedule themselves to resume
35 * their work later. An implementation of IResumable generally should post a
36 * runnable to some event target which continues the work of the task.
38 struct IResumable {
39 MOZ_DECLARE_REFCOUNTED_TYPENAME(IResumable)
41 // Subclasses may or may not be XPCOM classes, so we just require that they
42 // implement AddRef and Release.
43 NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING
45 virtual void Resume() = 0;
47 protected:
48 virtual ~IResumable() {}
51 /**
52 * SourceBufferIterator is a class that allows consumers of image source data to
53 * read the contents of a SourceBuffer sequentially.
55 * Consumers can advance through the SourceBuffer by calling
56 * AdvanceOrScheduleResume() repeatedly. After every advance, they should call
57 * check the return value, which will tell them the iterator's new state.
59 * If WAITING is returned, AdvanceOrScheduleResume() has arranged
60 * to call the consumer's Resume() method later, so the consumer should save its
61 * state if needed and stop running.
63 * If the iterator's new state is READY, then the consumer can call Data() and
64 * Length() to read new data from the SourceBuffer.
66 * Finally, in the COMPLETE state the consumer can call CompletionStatus() to
67 * get the status passed to SourceBuffer::Complete().
69 class SourceBufferIterator final {
70 public:
71 enum State {
72 START, // The iterator is at the beginning of the buffer.
73 READY, // The iterator is pointing to new data.
74 WAITING, // The iterator is blocked and the caller must yield.
75 COMPLETE // The iterator is pointing to the end of the buffer.
78 explicit SourceBufferIterator(SourceBuffer* aOwner, size_t aReadLimit)
79 : mOwner(aOwner),
80 mState(START),
81 mChunkCount(0),
82 mByteCount(0),
83 mRemainderToRead(aReadLimit) {
84 MOZ_ASSERT(aOwner);
85 mData.mIterating.mChunk = 0;
86 mData.mIterating.mData = nullptr;
87 mData.mIterating.mOffset = 0;
88 mData.mIterating.mAvailableLength = 0;
89 mData.mIterating.mNextReadLength = 0;
92 SourceBufferIterator(SourceBufferIterator&& aOther)
93 : mOwner(std::move(aOther.mOwner)),
94 mState(aOther.mState),
95 mData(aOther.mData),
96 mChunkCount(aOther.mChunkCount),
97 mByteCount(aOther.mByteCount),
98 mRemainderToRead(aOther.mRemainderToRead) {}
100 ~SourceBufferIterator();
102 SourceBufferIterator& operator=(SourceBufferIterator&& aOther);
105 * Returns true if there are no more than @aBytes remaining in the
106 * SourceBuffer. If the SourceBuffer is not yet complete, returns false.
108 bool RemainingBytesIsNoMoreThan(size_t aBytes) const;
111 * Advances the iterator through the SourceBuffer if possible. Advances no
112 * more than @aRequestedBytes bytes. (Use SIZE_MAX to advance as much as
113 * possible.)
115 * This is a wrapper around AdvanceOrScheduleResume() that makes it clearer at
116 * the callsite when the no resuming is intended.
118 * @return State::READY if the iterator was successfully advanced.
119 * State::WAITING if the iterator could not be advanced because it's
120 * at the end of the underlying SourceBuffer, but the SourceBuffer
121 * may still receive additional data.
122 * State::COMPLETE if the iterator could not be advanced because it's
123 * at the end of the underlying SourceBuffer and the SourceBuffer is
124 * marked complete (i.e., it will never receive any additional
125 * data).
127 State Advance(size_t aRequestedBytes) {
128 return AdvanceOrScheduleResume(aRequestedBytes, nullptr);
132 * Advances the iterator through the SourceBuffer if possible. Advances no
133 * more than @aRequestedBytes bytes. (Use SIZE_MAX to advance as much as
134 * possible.) If advancing is not possible and @aConsumer is not null,
135 * arranges to call the @aConsumer's Resume() method when more data is
136 * available.
138 * @return State::READY if the iterator was successfully advanced.
139 * State::WAITING if the iterator could not be advanced because it's
140 * at the end of the underlying SourceBuffer, but the SourceBuffer
141 * may still receive additional data. @aConsumer's Resume() method
142 * will be called when additional data is available.
143 * State::COMPLETE if the iterator could not be advanced because it's
144 * at the end of the underlying SourceBuffer and the SourceBuffer is
145 * marked complete (i.e., it will never receive any additional
146 * data).
148 State AdvanceOrScheduleResume(size_t aRequestedBytes, IResumable* aConsumer);
150 /// If at the end, returns the status passed to SourceBuffer::Complete().
151 nsresult CompletionStatus() const {
152 MOZ_ASSERT(mState == COMPLETE,
153 "Calling CompletionStatus() in the wrong state");
154 return mState == COMPLETE ? mData.mAtEnd.mStatus : NS_OK;
157 /// If we're ready to read, returns a pointer to the new data.
158 const char* Data() const {
159 MOZ_ASSERT(mState == READY, "Calling Data() in the wrong state");
160 return mState == READY ? mData.mIterating.mData + mData.mIterating.mOffset
161 : nullptr;
164 /// If we're ready to read, returns the length of the new data.
165 size_t Length() const {
166 MOZ_ASSERT(mState == READY, "Calling Length() in the wrong state");
167 return mState == READY ? mData.mIterating.mNextReadLength : 0;
170 /// If we're ready to read, returns whether or not everything available thus
171 /// far has been in the same contiguous buffer.
172 bool IsContiguous() const {
173 MOZ_ASSERT(mState == READY, "Calling IsContiguous() in the wrong state");
174 return mState == READY ? mData.mIterating.mChunk == 0 : false;
177 /// @return a count of the chunks we've advanced through.
178 uint32_t ChunkCount() const { return mChunkCount; }
180 /// @return a count of the bytes in all chunks we've advanced through.
181 size_t ByteCount() const { return mByteCount; }
183 /// @return the source buffer which owns the iterator.
184 SourceBuffer* Owner() const {
185 MOZ_ASSERT(mOwner);
186 return mOwner;
189 /// @return the current offset from the beginning of the buffer.
190 size_t Position() const {
191 return mByteCount - mData.mIterating.mAvailableLength;
194 private:
195 friend class SourceBuffer;
197 SourceBufferIterator(const SourceBufferIterator&) = delete;
198 SourceBufferIterator& operator=(const SourceBufferIterator&) = delete;
200 bool HasMore() const { return mState != COMPLETE; }
202 State AdvanceFromLocalBuffer(size_t aRequestedBytes) {
203 MOZ_ASSERT(mState == READY, "Advancing in the wrong state");
204 MOZ_ASSERT(mData.mIterating.mAvailableLength > 0,
205 "The local buffer shouldn't be empty");
206 MOZ_ASSERT(mData.mIterating.mNextReadLength == 0,
207 "Advancing without consuming previous data");
209 mData.mIterating.mNextReadLength =
210 std::min(mData.mIterating.mAvailableLength, aRequestedBytes);
212 return READY;
215 State SetReady(uint32_t aChunk, const char* aData, size_t aOffset,
216 size_t aAvailableLength, size_t aRequestedBytes) {
217 MOZ_ASSERT(mState != COMPLETE);
218 mState = READY;
220 // Prevent the iterator from reporting more data than it is allowed to read.
221 if (aAvailableLength > mRemainderToRead) {
222 aAvailableLength = mRemainderToRead;
225 // Update state.
226 mData.mIterating.mChunk = aChunk;
227 mData.mIterating.mData = aData;
228 mData.mIterating.mOffset = aOffset;
229 mData.mIterating.mAvailableLength = aAvailableLength;
231 // Update metrics.
232 mChunkCount++;
233 mByteCount += aAvailableLength;
235 // Attempt to advance by the requested number of bytes.
236 return AdvanceFromLocalBuffer(aRequestedBytes);
239 State SetWaiting(bool aHasConsumer) {
240 MOZ_ASSERT(mState != COMPLETE);
241 // Without a consumer, we won't know when to wake up precisely. Caller
242 // convention should mean that we don't try to advance unless we have
243 // written new data, but that doesn't mean we got enough.
244 MOZ_ASSERT(mState != WAITING || !aHasConsumer,
245 "Did we get a spurious wakeup somehow?");
246 return mState = WAITING;
249 State SetComplete(nsresult aStatus) {
250 mData.mAtEnd.mStatus = aStatus;
251 return mState = COMPLETE;
254 RefPtr<SourceBuffer> mOwner;
256 State mState;
259 * This union contains our iteration state if we're still iterating (for
260 * states START, READY, and WAITING) and the status the SourceBuffer was
261 * completed with if we're in state COMPLETE.
263 union {
264 struct {
265 uint32_t mChunk; // Index of the chunk in SourceBuffer.
266 const char* mData; // Pointer to the start of the chunk.
267 size_t mOffset; // Current read position of the iterator relative to
268 // mData.
269 size_t mAvailableLength; // How many bytes remain unread in the chunk,
270 // relative to mOffset.
271 size_t
272 mNextReadLength; // How many bytes the last iterator advance
273 // requested to be read, so that we know much
274 // to increase mOffset and reduce mAvailableLength
275 // by when the next advance is requested.
276 } mIterating; // Cached info of the chunk currently iterating over.
277 struct {
278 nsresult mStatus; // Status code indicating if we read all the data.
279 } mAtEnd; // State info after iterator is complete.
280 } mData;
282 uint32_t mChunkCount; // Count of chunks observed, including current chunk.
283 size_t mByteCount; // Count of readable bytes observed, including unread
284 // bytes from the current chunk.
285 size_t mRemainderToRead; // Count of bytes left to read if there is a maximum
286 // imposed by the caller. SIZE_MAX if unlimited.
290 * SourceBuffer is a parallel data structure used for storing image source
291 * (compressed) data.
293 * SourceBuffer is a single producer, multiple consumer data structure. The
294 * single producer calls Append() to append data to the buffer. In parallel,
295 * multiple consumers can call Iterator(), which returns a SourceBufferIterator
296 * that they can use to iterate through the buffer. The SourceBufferIterator
297 * returns a series of pointers which remain stable for lifetime of the
298 * SourceBuffer, and the data they point to is immutable, ensuring that the
299 * producer never interferes with the consumers.
301 * In order to avoid blocking, SourceBuffer works with SourceBufferIterator to
302 * keep a list of consumers which are waiting for new data, and to resume them
303 * when the producer appends more. All consumers must implement the IResumable
304 * interface to make this possible.
306 class SourceBuffer final {
307 public:
308 MOZ_DECLARE_REFCOUNTED_TYPENAME(image::SourceBuffer)
309 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(image::SourceBuffer)
311 SourceBuffer();
313 //////////////////////////////////////////////////////////////////////////////
314 // Producer methods.
315 //////////////////////////////////////////////////////////////////////////////
318 * If the producer knows how long the source data will be, it should call
319 * ExpectLength, which enables SourceBuffer to preallocate its buffer.
321 nsresult ExpectLength(size_t aExpectedLength);
323 /// Append the provided data to the buffer.
324 nsresult Append(const char* aData, size_t aLength);
326 /// Append the data available on the provided nsIInputStream to the buffer.
327 nsresult AppendFromInputStream(nsIInputStream* aInputStream, uint32_t aCount);
330 * Mark the buffer complete, with a status that will be available to
331 * consumers. Further calls to Append() are forbidden after Complete().
333 void Complete(nsresult aStatus);
335 /// Returns true if the buffer is complete.
336 bool IsComplete();
338 /// Memory reporting.
339 size_t SizeOfIncludingThisWithComputedFallback(MallocSizeOf) const;
341 //////////////////////////////////////////////////////////////////////////////
342 // Consumer methods.
343 //////////////////////////////////////////////////////////////////////////////
346 * Returns an iterator to this SourceBuffer, which cannot read more than the
347 * given length.
349 SourceBufferIterator Iterator(size_t aReadLength = SIZE_MAX);
351 //////////////////////////////////////////////////////////////////////////////
352 // Consumer methods.
353 //////////////////////////////////////////////////////////////////////////////
356 * The minimum chunk capacity we'll allocate, if we don't know the correct
357 * capacity (which would happen because ExpectLength() wasn't called or gave
358 * us the wrong value). This is only exposed for use by tests; if normal code
359 * is using this, it's doing something wrong.
361 static const size_t MIN_CHUNK_CAPACITY = 4096;
364 * The maximum chunk capacity we'll allocate. This was historically the
365 * maximum we would preallocate based on the network size. We may adjust it
366 * in the future based on the IMAGE_DECODE_CHUNKS telemetry to ensure most
367 * images remain in a single chunk.
369 static const size_t MAX_CHUNK_CAPACITY = 20 * 1024 * 1024;
371 private:
372 friend class SourceBufferIterator;
374 ~SourceBuffer();
376 //////////////////////////////////////////////////////////////////////////////
377 // Chunk type and chunk-related methods.
378 //////////////////////////////////////////////////////////////////////////////
380 class Chunk final {
381 public:
382 explicit Chunk(size_t aCapacity) : mCapacity(aCapacity), mLength(0) {
383 MOZ_ASSERT(aCapacity > 0, "Creating zero-capacity chunk");
384 mData = static_cast<char*>(malloc(mCapacity));
387 ~Chunk() { free(mData); }
389 Chunk(Chunk&& aOther)
390 : mCapacity(aOther.mCapacity),
391 mLength(aOther.mLength),
392 mData(aOther.mData) {
393 aOther.mCapacity = aOther.mLength = 0;
394 aOther.mData = nullptr;
397 Chunk& operator=(Chunk&& aOther) {
398 free(mData);
399 mCapacity = aOther.mCapacity;
400 mLength = aOther.mLength;
401 mData = aOther.mData;
402 aOther.mCapacity = aOther.mLength = 0;
403 aOther.mData = nullptr;
404 return *this;
407 bool AllocationFailed() const { return !mData; }
408 size_t Capacity() const { return mCapacity; }
409 size_t Length() const { return mLength; }
411 char* Data() const {
412 MOZ_ASSERT(mData, "Allocation failed but nobody checked for it");
413 return mData;
416 void AddLength(size_t aAdditionalLength) {
417 MOZ_ASSERT(mLength + aAdditionalLength <= mCapacity);
418 mLength += aAdditionalLength;
421 bool SetCapacity(size_t aCapacity) {
422 MOZ_ASSERT(mData, "Allocation failed but nobody checked for it");
423 char* data = static_cast<char*>(realloc(mData, aCapacity));
424 if (!data) {
425 return false;
428 mData = data;
429 mCapacity = aCapacity;
430 return true;
433 private:
434 Chunk(const Chunk&) = delete;
435 Chunk& operator=(const Chunk&) = delete;
437 size_t mCapacity;
438 size_t mLength;
439 char* mData;
442 nsresult AppendChunk(Maybe<Chunk>&& aChunk);
443 Maybe<Chunk> CreateChunk(size_t aCapacity, size_t aExistingCapacity = 0,
444 bool aRoundUp = true);
445 nsresult Compact();
446 static size_t RoundedUpCapacity(size_t aCapacity);
447 size_t FibonacciCapacityWithMinimum(size_t aMinCapacity);
449 //////////////////////////////////////////////////////////////////////////////
450 // Iterator / consumer methods.
451 //////////////////////////////////////////////////////////////////////////////
453 void AddWaitingConsumer(IResumable* aConsumer);
454 void ResumeWaitingConsumers();
456 typedef SourceBufferIterator::State State;
458 State AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
459 size_t aRequestedBytes,
460 IResumable* aConsumer);
461 bool RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
462 size_t aBytes) const;
464 void OnIteratorRelease();
466 //////////////////////////////////////////////////////////////////////////////
467 // Helper methods.
468 //////////////////////////////////////////////////////////////////////////////
470 nsresult HandleError(nsresult aError);
471 bool IsEmpty();
472 bool IsLastChunk(uint32_t aChunk);
474 //////////////////////////////////////////////////////////////////////////////
475 // Member variables.
476 //////////////////////////////////////////////////////////////////////////////
478 /// All private members are protected by mMutex.
479 mutable Mutex mMutex;
481 /// The data in this SourceBuffer, stored as a series of Chunks.
482 AutoTArray<Chunk, 1> mChunks;
484 /// Consumers which are waiting to be notified when new data is available.
485 nsTArray<RefPtr<IResumable>> mWaitingConsumers;
487 /// If present, marks this SourceBuffer complete with the given final status.
488 Maybe<nsresult> mStatus;
490 /// Count of active consumers.
491 uint32_t mConsumerCount;
493 /// True if compacting has been performed.
494 bool mCompacted;
497 } // namespace image
498 } // namespace mozilla
500 #endif // mozilla_image_sourcebuffer_h