Bug 1755973 - Try implied arr+[0] in GetUniformIndices. r=gfx-reviewers,bradwerth
[gecko.git] / mfbt / Vector.h
blob77e2d46e6be3806261a38aa21ed5975c9139d0f7
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* A type/length-parametrized vector class. */
9 #ifndef mozilla_Vector_h
10 #define mozilla_Vector_h
12 #include <new> // for placement new
13 #include <utility>
15 #include "mozilla/Alignment.h"
16 #include "mozilla/AllocPolicy.h"
17 #include "mozilla/ArrayUtils.h" // for PointerRangeSize
18 #include "mozilla/Assertions.h"
19 #include "mozilla/Attributes.h"
20 #include "mozilla/MathAlgorithms.h"
21 #include "mozilla/MemoryReporting.h"
22 #include "mozilla/OperatorNewExtensions.h"
23 #include "mozilla/ReentrancyGuard.h"
24 #include "mozilla/Span.h"
25 #include "mozilla/TemplateLib.h"
26 #include "mozilla/TypeTraits.h"
28 namespace mozilla {
30 template <typename T, size_t N, class AllocPolicy>
31 class Vector;
33 namespace detail {
36 * Check that the given capacity wastes the minimal amount of space if
37 * allocated on the heap. This means that aCapacity*EltSize is as close to a
38 * power-of-two as possible. growStorageBy() is responsible for ensuring this.
40 template <size_t EltSize>
41 static bool CapacityHasExcessSpace(size_t aCapacity) {
42 size_t size = aCapacity * EltSize;
43 return RoundUpPow2(size) - size >= EltSize;
47 * AllocPolicy can optionally provide a `computeGrowth<T>(size_t aOldElts,
48 * size_t aIncr)` method that returns the new number of elements to allocate
49 * when the current capacity is `aOldElts` and `aIncr` more are being
50 * requested. If the AllocPolicy does not have such a method, a fallback
51 * will be used that mostly will just round the new requested capacity up to
52 * the next power of two, which results in doubling capacity for the most part.
54 * If the new size would overflow some limit, `computeGrowth` returns 0.
56 * A simpler way would be to make computeGrowth() part of the API for all
57 * AllocPolicy classes, but this turns out to be rather complex because
58 * mozalloc.h defines a very widely-used InfallibleAllocPolicy, and yet it
59 * can only be compiled in limited contexts, eg within `extern "C"` and with
60 * -std=c++11 rather than a later version. That makes the headers that are
61 * necessary for the computation unavailable (eg mfbt/MathAlgorithms.h).
64 // Fallback version.
65 template <size_t EltSize>
66 inline size_t GrowEltsByDoubling(size_t aOldElts, size_t aIncr) {
68 * When choosing a new capacity, its size in bytes should is as close to 2**N
69 * bytes as possible. 2**N-sized requests are best because they are unlikely
70 * to be rounded up by the allocator. Asking for a 2**N number of elements
71 * isn't as good, because if EltSize is not a power-of-two that would
72 * result in a non-2**N request size.
75 if (aIncr == 1) {
76 if (aOldElts == 0) {
77 return 1;
80 /* This case occurs in ~15--20% of the calls to Vector::growStorageBy. */
83 * Will aOldSize * 4 * sizeof(T) overflow? This condition limits a
84 * collection to 1GB of memory on a 32-bit system, which is a reasonable
85 * limit. It also ensures that
87 * static_cast<char*>(end()) - static_cast<char*>(begin())
89 * for a Vector doesn't overflow ptrdiff_t (see bug 510319).
91 if (MOZ_UNLIKELY(aOldElts &
92 mozilla::tl::MulOverflowMask<4 * EltSize>::value)) {
93 return 0;
97 * If we reach here, the existing capacity will have a size that is already
98 * as close to 2^N as sizeof(T) will allow. Just double the capacity, and
99 * then there might be space for one more element.
101 size_t newElts = aOldElts * 2;
102 if (CapacityHasExcessSpace<EltSize>(newElts)) {
103 newElts += 1;
105 return newElts;
108 /* This case occurs in ~2% of the calls to Vector::growStorageBy. */
109 size_t newMinCap = aOldElts + aIncr;
111 /* Did aOldElts + aIncr overflow? Will newMinCap * EltSize rounded up to the
112 * next power of two overflow PTRDIFF_MAX? */
113 if (MOZ_UNLIKELY(newMinCap < aOldElts ||
114 newMinCap & tl::MulOverflowMask<4 * EltSize>::value)) {
115 return 0;
118 size_t newMinSize = newMinCap * EltSize;
119 size_t newSize = RoundUpPow2(newMinSize);
120 return newSize / EltSize;
123 // Fallback version.
124 template <typename AP, size_t EltSize>
125 static size_t ComputeGrowth(size_t aOldElts, size_t aIncr, int) {
126 return GrowEltsByDoubling<EltSize>(aOldElts, aIncr);
129 // If the AllocPolicy provides its own computeGrowth<EltSize> implementation,
130 // use that.
131 template <typename AP, size_t EltSize>
132 static size_t ComputeGrowth(
133 size_t aOldElts, size_t aIncr,
134 decltype(std::declval<AP>().template computeGrowth<EltSize>(0, 0),
135 bool()) aOverloadSelector) {
136 size_t newElts = AP::template computeGrowth<EltSize>(aOldElts, aIncr);
137 MOZ_ASSERT(newElts <= PTRDIFF_MAX && newElts * EltSize <= PTRDIFF_MAX,
138 "invalid Vector size (see bug 510319)");
139 return newElts;
143 * This template class provides a default implementation for vector operations
144 * when the element type is not known to be a POD, as judged by IsPod.
146 template <typename T, size_t N, class AP, bool IsPod>
147 struct VectorImpl {
149 * Constructs an object in the uninitialized memory at *aDst with aArgs.
151 template <typename... Args>
152 MOZ_NONNULL(1)
153 static inline void new_(T* aDst, Args&&... aArgs) {
154 new (KnownNotNull, aDst) T(std::forward<Args>(aArgs)...);
157 /* Destroys constructed objects in the range [aBegin, aEnd). */
158 static inline void destroy(T* aBegin, T* aEnd) {
159 MOZ_ASSERT(aBegin <= aEnd);
160 for (T* p = aBegin; p < aEnd; ++p) {
161 p->~T();
165 /* Constructs objects in the uninitialized range [aBegin, aEnd). */
166 static inline void initialize(T* aBegin, T* aEnd) {
167 MOZ_ASSERT(aBegin <= aEnd);
168 for (T* p = aBegin; p < aEnd; ++p) {
169 new_(p);
174 * Copy-constructs objects in the uninitialized range
175 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
177 template <typename U>
178 static inline void copyConstruct(T* aDst, const U* aSrcStart,
179 const U* aSrcEnd) {
180 MOZ_ASSERT(aSrcStart <= aSrcEnd);
181 for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
182 new_(aDst, *p);
187 * Move-constructs objects in the uninitialized range
188 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
190 template <typename U>
191 static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd) {
192 MOZ_ASSERT(aSrcStart <= aSrcEnd);
193 for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
194 new_(aDst, std::move(*p));
199 * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
200 * the same object aU.
202 template <typename U>
203 static inline void copyConstructN(T* aDst, size_t aN, const U& aU) {
204 for (T* end = aDst + aN; aDst < end; ++aDst) {
205 new_(aDst, aU);
210 * Grows the given buffer to have capacity aNewCap, preserving the objects
211 * constructed in the range [begin, end) and updating aV. Assumes that (1)
212 * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
213 * not overflow.
215 [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
216 size_t aNewCap) {
217 MOZ_ASSERT(!aV.usingInlineStorage());
218 MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
219 T* newbuf = aV.template pod_malloc<T>(aNewCap);
220 if (MOZ_UNLIKELY(!newbuf)) {
221 return false;
223 T* dst = newbuf;
224 T* src = aV.beginNoCheck();
225 for (; src < aV.endNoCheck(); ++dst, ++src) {
226 new_(dst, std::move(*src));
228 VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
229 aV.free_(aV.mBegin, aV.mTail.mCapacity);
230 aV.mBegin = newbuf;
231 /* aV.mLength is unchanged. */
232 aV.mTail.mCapacity = aNewCap;
233 return true;
238 * This partial template specialization provides a default implementation for
239 * vector operations when the element type is known to be a POD, as judged by
240 * IsPod.
242 template <typename T, size_t N, class AP>
243 struct VectorImpl<T, N, AP, true> {
244 template <typename... Args>
245 MOZ_NONNULL(1)
246 static inline void new_(T* aDst, Args&&... aArgs) {
247 // Explicitly construct a local object instead of using a temporary since
248 // T(args...) will be treated like a C-style cast in the unary case and
249 // allow unsafe conversions. Both forms should be equivalent to an
250 // optimizing compiler.
251 T temp(std::forward<Args>(aArgs)...);
252 *aDst = temp;
255 static inline void destroy(T*, T*) {}
257 static inline void initialize(T* aBegin, T* aEnd) {
259 * You would think that memset would be a big win (or even break even)
260 * when we know T is a POD. But currently it's not. This is probably
261 * because |append| tends to be given small ranges and memset requires
262 * a function call that doesn't get inlined.
264 * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
266 MOZ_ASSERT(aBegin <= aEnd);
267 for (T* p = aBegin; p < aEnd; ++p) {
268 new_(p);
272 template <typename U>
273 static inline void copyConstruct(T* aDst, const U* aSrcStart,
274 const U* aSrcEnd) {
276 * See above memset comment. Also, notice that copyConstruct is
277 * currently templated (T != U), so memcpy won't work without
278 * requiring T == U.
280 * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
282 MOZ_ASSERT(aSrcStart <= aSrcEnd);
283 for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
284 new_(aDst, *p);
288 template <typename U>
289 static inline void moveConstruct(T* aDst, const U* aSrcStart,
290 const U* aSrcEnd) {
291 copyConstruct(aDst, aSrcStart, aSrcEnd);
294 static inline void copyConstructN(T* aDst, size_t aN, const T& aT) {
295 for (T* end = aDst + aN; aDst < end; ++aDst) {
296 new_(aDst, aT);
300 [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
301 size_t aNewCap) {
302 MOZ_ASSERT(!aV.usingInlineStorage());
303 MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
304 T* newbuf =
305 aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
306 if (MOZ_UNLIKELY(!newbuf)) {
307 return false;
309 aV.mBegin = newbuf;
310 /* aV.mLength is unchanged. */
311 aV.mTail.mCapacity = aNewCap;
312 return true;
316 // A struct for TestVector.cpp to access private internal fields.
317 // DO NOT DEFINE IN YOUR OWN CODE.
318 struct VectorTesting;
320 } // namespace detail
323 * STL-like container providing a short-lived, dynamic buffer. Vector calls the
324 * constructors/destructors of all elements stored in its internal buffer, so
325 * non-PODs may be safely used. Additionally, Vector will store the first N
326 * elements in-place before resorting to dynamic allocation.
328 * T requirements:
329 * - default and copy constructible, assignable, destructible
330 * - operations do not throw
331 * MinInlineCapacity requirements:
332 * - any value, however, MinInlineCapacity is clamped to min/max values
333 * AllocPolicy:
334 * - see "Allocation policies" in AllocPolicy.h (defaults to
335 * mozilla::MallocAllocPolicy)
337 * Vector is not reentrant: T member functions called during Vector member
338 * functions must not call back into the same object!
340 template <typename T, size_t MinInlineCapacity = 0,
341 class AllocPolicy = MallocAllocPolicy>
342 class MOZ_NON_PARAM Vector final : private AllocPolicy {
343 /* utilities */
345 static constexpr bool kElemIsPod = IsPod<T>::value;
346 typedef detail::VectorImpl<T, MinInlineCapacity, AllocPolicy, kElemIsPod>
347 Impl;
348 friend struct detail::VectorImpl<T, MinInlineCapacity, AllocPolicy,
349 kElemIsPod>;
351 friend struct detail::VectorTesting;
353 [[nodiscard]] bool growStorageBy(size_t aIncr);
354 [[nodiscard]] bool convertToHeapStorage(size_t aNewCap);
355 [[nodiscard]] bool maybeCheckSimulatedOOM(size_t aRequestedSize);
357 /* magic constants */
360 * The maximum space allocated for inline element storage.
362 * We reduce space by what the AllocPolicy base class and prior Vector member
363 * fields likely consume to attempt to play well with binary size classes.
365 static constexpr size_t kMaxInlineBytes =
366 1024 -
367 (sizeof(AllocPolicy) + sizeof(T*) + sizeof(size_t) + sizeof(size_t));
370 * The number of T elements of inline capacity built into this Vector. This
371 * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
373 * We use a partially-specialized template (not explicit specialization, which
374 * is only allowed at namespace scope) to compute this value. The benefit is
375 * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
376 * defined at the time |Vector<T>| appears, if no inline storage is requested.
378 template <size_t MinimumInlineCapacity, size_t Dummy>
379 struct ComputeCapacity {
380 static constexpr size_t value =
381 tl::Min<MinimumInlineCapacity, kMaxInlineBytes / sizeof(T)>::value;
384 template <size_t Dummy>
385 struct ComputeCapacity<0, Dummy> {
386 static constexpr size_t value = 0;
389 /** The actual inline capacity in number of elements T. This may be zero! */
390 static constexpr size_t kInlineCapacity =
391 ComputeCapacity<MinInlineCapacity, 0>::value;
393 /* member data */
396 * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
397 * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
398 * mLength, mBegin + mCapacity) holds uninitialized memory. The range
399 * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
400 * previously allocated by a call to reserve().
402 T* mBegin;
404 /* Number of elements in the vector. */
405 size_t mLength;
408 * Memory used to store capacity, reserved element count (debug builds only),
409 * and inline storage. The simple "answer" is:
411 * size_t mCapacity;
412 * #ifdef DEBUG
413 * size_t mReserved;
414 * #endif
415 * alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
417 * but there are complications. First, C++ forbids zero-sized arrays that
418 * might result. Second, we don't want zero capacity to affect Vector's size
419 * (even empty classes take up a byte, unless they're base classes).
421 * Yet again, we eliminate the zero-sized array using partial specialization.
422 * And we eliminate potential size hit by putting capacity/reserved in one
423 * struct, then putting the array (if any) in a derived struct. If no array
424 * is needed, the derived struct won't consume extra space.
426 struct CapacityAndReserved {
427 explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
428 : mCapacity(aCapacity)
429 #ifdef DEBUG
431 mReserved(aReserved)
432 #endif
435 CapacityAndReserved() = default;
437 /* Max number of elements storable in the vector without resizing. */
438 size_t mCapacity;
440 #ifdef DEBUG
441 /* Max elements of reserved or used space in this vector. */
442 size_t mReserved;
443 #endif
446 // Silence warnings about this struct possibly being padded dued to the
447 // alignas() in it -- there's nothing we can do to avoid it.
448 #ifdef _MSC_VER
449 # pragma warning(push)
450 # pragma warning(disable : 4324)
451 #endif // _MSC_VER
453 template <size_t Capacity, size_t Dummy>
454 struct CRAndStorage : CapacityAndReserved {
455 explicit CRAndStorage(size_t aCapacity, size_t aReserved)
456 : CapacityAndReserved(aCapacity, aReserved) {}
457 CRAndStorage() = default;
459 alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
461 // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
462 // T*. Indirecting through this function addresses the problem.
463 void* data() { return mBytes; }
465 T* storage() { return static_cast<T*>(data()); }
468 template <size_t Dummy>
469 struct CRAndStorage<0, Dummy> : CapacityAndReserved {
470 explicit CRAndStorage(size_t aCapacity, size_t aReserved)
471 : CapacityAndReserved(aCapacity, aReserved) {}
472 CRAndStorage() = default;
474 T* storage() {
475 // If this returns |nullptr|, functions like |Vector::begin()| would too,
476 // breaking callers that pass a vector's elements as pointer/length to
477 // code that bounds its operation by length but (even just as a sanity
478 // check) always wants a non-null pointer. Fake up an aligned, non-null
479 // pointer to support these callers.
480 return reinterpret_cast<T*>(sizeof(T));
484 CRAndStorage<kInlineCapacity, 0> mTail;
486 #ifdef _MSC_VER
487 # pragma warning(pop)
488 #endif // _MSC_VER
490 #ifdef DEBUG
491 friend class ReentrancyGuard;
492 bool mEntered;
493 #endif
495 /* private accessors */
497 bool usingInlineStorage() const {
498 return mBegin == const_cast<Vector*>(this)->inlineStorage();
501 T* inlineStorage() { return mTail.storage(); }
503 T* beginNoCheck() const { return mBegin; }
505 T* endNoCheck() { return mBegin + mLength; }
507 const T* endNoCheck() const { return mBegin + mLength; }
509 #ifdef DEBUG
511 * The amount of explicitly allocated space in this vector that is immediately
512 * available to be filled by appending additional elements. This value is
513 * always greater than or equal to |length()| -- the vector's actual elements
514 * are implicitly reserved. This value is always less than or equal to
515 * |capacity()|. It may be explicitly increased using the |reserve()| method.
517 size_t reserved() const {
518 MOZ_ASSERT(mLength <= mTail.mReserved);
519 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
520 return mTail.mReserved;
522 #endif
524 bool internalEnsureCapacity(size_t aNeeded);
526 /* Append operations guaranteed to succeed due to pre-reserved space. */
527 template <typename U>
528 void internalAppend(U&& aU);
529 template <typename U, size_t O, class BP>
530 void internalAppendAll(const Vector<U, O, BP>& aU);
531 void internalAppendN(const T& aT, size_t aN);
532 template <typename U>
533 void internalAppend(const U* aBegin, size_t aLength);
534 template <typename U>
535 void internalMoveAppend(U* aBegin, size_t aLength);
537 public:
538 static const size_t sMaxInlineStorage = MinInlineCapacity;
540 typedef T ElementType;
542 explicit Vector(AllocPolicy = AllocPolicy());
543 Vector(Vector&&); /* Move constructor. */
544 Vector& operator=(Vector&&); /* Move assignment. */
545 ~Vector();
547 /* accessors */
549 const AllocPolicy& allocPolicy() const { return *this; }
551 AllocPolicy& allocPolicy() { return *this; }
553 enum { InlineLength = MinInlineCapacity };
555 size_t length() const { return mLength; }
557 bool empty() const { return mLength == 0; }
559 size_t capacity() const { return mTail.mCapacity; }
561 T* begin() {
562 MOZ_ASSERT(!mEntered);
563 return mBegin;
566 const T* begin() const {
567 MOZ_ASSERT(!mEntered);
568 return mBegin;
571 T* end() {
572 MOZ_ASSERT(!mEntered);
573 return mBegin + mLength;
576 const T* end() const {
577 MOZ_ASSERT(!mEntered);
578 return mBegin + mLength;
581 T& operator[](size_t aIndex) {
582 MOZ_ASSERT(!mEntered);
583 MOZ_ASSERT(aIndex < mLength);
584 return begin()[aIndex];
587 const T& operator[](size_t aIndex) const {
588 MOZ_ASSERT(!mEntered);
589 MOZ_ASSERT(aIndex < mLength);
590 return begin()[aIndex];
593 T& back() {
594 MOZ_ASSERT(!mEntered);
595 MOZ_ASSERT(!empty());
596 return *(end() - 1);
599 const T& back() const {
600 MOZ_ASSERT(!mEntered);
601 MOZ_ASSERT(!empty());
602 return *(end() - 1);
605 operator mozilla::Span<const T>() const {
606 // Explicitly specify template argument here to avoid instantiating Span<T>
607 // first and then implicitly converting to Span<const T>
608 return mozilla::Span<const T>{mBegin, mLength};
611 operator mozilla::Span<T>() { return mozilla::Span{mBegin, mLength}; }
613 class Range {
614 friend class Vector;
615 T* mCur;
616 T* mEnd;
617 Range(T* aCur, T* aEnd) : mCur(aCur), mEnd(aEnd) {
618 MOZ_ASSERT(aCur <= aEnd);
621 public:
622 bool empty() const { return mCur == mEnd; }
623 size_t remain() const { return PointerRangeSize(mCur, mEnd); }
624 T& front() const {
625 MOZ_ASSERT(!empty());
626 return *mCur;
628 void popFront() {
629 MOZ_ASSERT(!empty());
630 ++mCur;
632 T popCopyFront() {
633 MOZ_ASSERT(!empty());
634 return *mCur++;
638 class ConstRange {
639 friend class Vector;
640 const T* mCur;
641 const T* mEnd;
642 ConstRange(const T* aCur, const T* aEnd) : mCur(aCur), mEnd(aEnd) {
643 MOZ_ASSERT(aCur <= aEnd);
646 public:
647 bool empty() const { return mCur == mEnd; }
648 size_t remain() const { return PointerRangeSize(mCur, mEnd); }
649 const T& front() const {
650 MOZ_ASSERT(!empty());
651 return *mCur;
653 void popFront() {
654 MOZ_ASSERT(!empty());
655 ++mCur;
657 T popCopyFront() {
658 MOZ_ASSERT(!empty());
659 return *mCur++;
663 Range all() { return Range(begin(), end()); }
664 ConstRange all() const { return ConstRange(begin(), end()); }
666 /* mutators */
669 * Reverse the order of the elements in the vector in place.
671 void reverse();
674 * Given that the vector is empty, grow the internal capacity to |aRequest|,
675 * keeping the length 0.
677 [[nodiscard]] bool initCapacity(size_t aRequest);
680 * Given that the vector is empty, grow the internal capacity and length to
681 * |aRequest| leaving the elements' memory completely uninitialized (with all
682 * the associated hazards and caveats). This avoids the usual allocation-size
683 * rounding that happens in resize and overhead of initialization for elements
684 * that are about to be overwritten.
686 [[nodiscard]] bool initLengthUninitialized(size_t aRequest);
689 * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
690 * |aRequest - length()| elements, in any sequence of append/appendAll calls,
691 * is guaranteed to succeed.
693 * A request to reserve an amount less than the current length does not affect
694 * reserved space.
696 [[nodiscard]] bool reserve(size_t aRequest);
699 * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
700 * or unreserve storage for those elements.
702 void shrinkBy(size_t aIncr);
705 * Destroy elements in the range [aNewLength, end()). Does not deallocate
706 * or unreserve storage for those elements.
708 void shrinkTo(size_t aNewLength);
710 /** Grow the vector by aIncr elements. */
711 [[nodiscard]] bool growBy(size_t aIncr);
713 /** Call shrinkBy or growBy based on whether newSize > length(). */
714 [[nodiscard]] bool resize(size_t aNewLength);
717 * Increase the length of the vector, but don't initialize the new elements
718 * -- leave them as uninitialized memory.
720 [[nodiscard]] bool growByUninitialized(size_t aIncr);
721 void infallibleGrowByUninitialized(size_t aIncr);
722 [[nodiscard]] bool resizeUninitialized(size_t aNewLength);
724 /** Shorthand for shrinkBy(length()). */
725 void clear();
727 /** Clears and releases any heap-allocated storage. */
728 void clearAndFree();
731 * Shrinks the storage to drop excess capacity if possible.
733 * The return value indicates whether the operation succeeded, otherwise, it
734 * represents an OOM. The bool can be safely ignored unless you want to
735 * provide the guarantee that `length() == capacity()`.
737 * For PODs, it calls the AllocPolicy's pod_realloc. For non-PODs, it moves
738 * the elements into the new storage.
740 bool shrinkStorageToFit();
743 * If true, appending |aNeeded| elements won't reallocate elements storage.
744 * This *doesn't* mean that infallibleAppend may be used! You still must
745 * reserve the extra space, even if this method indicates that appends won't
746 * need to reallocate elements storage.
748 bool canAppendWithoutRealloc(size_t aNeeded) const;
750 /** Potentially fallible append operations. */
753 * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
754 * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
755 * not amused.")
757 template <typename U>
758 [[nodiscard]] bool append(U&& aU);
761 * Construct a T in-place as a new entry at the end of this vector.
763 template <typename... Args>
764 [[nodiscard]] bool emplaceBack(Args&&... aArgs) {
765 if (!growByUninitialized(1)) return false;
766 Impl::new_(&back(), std::forward<Args>(aArgs)...);
767 return true;
770 template <typename U, size_t O, class BP>
771 [[nodiscard]] bool appendAll(const Vector<U, O, BP>& aU);
772 template <typename U, size_t O, class BP>
773 [[nodiscard]] bool appendAll(Vector<U, O, BP>&& aU);
774 [[nodiscard]] bool appendN(const T& aT, size_t aN);
775 template <typename U>
776 [[nodiscard]] bool append(const U* aBegin, const U* aEnd);
777 template <typename U>
778 [[nodiscard]] bool append(const U* aBegin, size_t aLength);
779 template <typename U>
780 [[nodiscard]] bool moveAppend(U* aBegin, U* aEnd);
783 * Guaranteed-infallible append operations for use upon vectors whose
784 * memory has been pre-reserved. Don't use this if you haven't reserved the
785 * memory!
787 template <typename U>
788 void infallibleAppend(U&& aU) {
789 internalAppend(std::forward<U>(aU));
791 void infallibleAppendN(const T& aT, size_t aN) { internalAppendN(aT, aN); }
792 template <typename U>
793 void infallibleAppend(const U* aBegin, const U* aEnd) {
794 internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
796 template <typename U>
797 void infallibleAppend(const U* aBegin, size_t aLength) {
798 internalAppend(aBegin, aLength);
800 template <typename... Args>
801 void infallibleEmplaceBack(Args&&... aArgs) {
802 infallibleGrowByUninitialized(1);
803 Impl::new_(&back(), std::forward<Args>(aArgs)...);
806 void popBack();
808 T popCopy();
811 * If elements are stored in-place, return nullptr and leave this vector
812 * unmodified.
814 * Otherwise return this vector's elements buffer, and clear this vector as if
815 * by clearAndFree(). The caller now owns the buffer and is responsible for
816 * deallocating it consistent with this vector's AllocPolicy.
818 * N.B. Although a T*, only the range [0, length()) is constructed.
820 [[nodiscard]] T* extractRawBuffer();
823 * If elements are stored in-place, allocate a new buffer, move this vector's
824 * elements into it, and return that buffer.
826 * Otherwise return this vector's elements buffer. The caller now owns the
827 * buffer and is responsible for deallocating it consistent with this vector's
828 * AllocPolicy.
830 * This vector is cleared, as if by clearAndFree(), when this method
831 * succeeds. This method fails and returns nullptr only if new elements buffer
832 * allocation fails.
834 * N.B. Only the range [0, length()) of the returned buffer is constructed.
835 * If any of these elements are uninitialized (as growByUninitialized
836 * enables), behavior is undefined.
838 [[nodiscard]] T* extractOrCopyRawBuffer();
841 * Transfer ownership of an array of objects into the vector. The caller
842 * must have allocated the array in accordance with this vector's
843 * AllocPolicy.
845 * N.B. This call assumes that there are no uninitialized elements in the
846 * passed range [aP, aP + aLength). The range [aP + aLength, aP +
847 * aCapacity) must be allocated uninitialized memory.
849 void replaceRawBuffer(T* aP, size_t aLength, size_t aCapacity);
852 * Transfer ownership of an array of objects into the vector. The caller
853 * must have allocated the array in accordance with this vector's
854 * AllocPolicy.
856 * N.B. This call assumes that there are no uninitialized elements in the
857 * passed array.
859 void replaceRawBuffer(T* aP, size_t aLength);
862 * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
863 * one position higher. On success, |aP| should not be reused because it'll
864 * be a dangling pointer if reallocation of the vector storage occurred; the
865 * return value should be used instead. On failure, nullptr is returned.
867 * Example usage:
869 * if (!(p = vec.insert(p, val))) {
870 * <handle failure>
872 * <keep working with p>
874 * This is inherently a linear-time operation. Be careful!
876 template <typename U>
877 [[nodiscard]] T* insert(T* aP, U&& aVal);
880 * Removes the element |aT|, which must fall in the bounds [begin, end),
881 * shifting existing elements from |aT + 1| onward one position lower.
883 void erase(T* aT);
886 * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
887 * [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
888 * position.
890 void erase(T* aBegin, T* aEnd);
893 * Removes all elements that satisfy the predicate, shifting existing elements
894 * lower to fill erased gaps.
896 template <typename Pred>
897 void eraseIf(Pred aPred);
900 * Removes all elements that compare equal to |aU|, shifting existing elements
901 * lower to fill erased gaps.
903 template <typename U>
904 void eraseIfEqual(const U& aU);
907 * Measure the size of the vector's heap-allocated storage.
909 size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
912 * Like sizeOfExcludingThis, but also measures the size of the vector
913 * object (which must be heap-allocated) itself.
915 size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
917 void swap(Vector& aOther);
919 private:
920 Vector(const Vector&) = delete;
921 void operator=(const Vector&) = delete;
924 /* This does the re-entrancy check plus several other sanity checks. */
925 #define MOZ_REENTRANCY_GUARD_ET_AL \
926 ReentrancyGuard g(*this); \
927 MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
928 MOZ_ASSERT(reserved() <= mTail.mCapacity); \
929 MOZ_ASSERT(mLength <= reserved()); \
930 MOZ_ASSERT(mLength <= mTail.mCapacity)
932 /* Vector Implementation */
934 template <typename T, size_t N, class AP>
935 MOZ_ALWAYS_INLINE Vector<T, N, AP>::Vector(AP aAP)
936 : AP(std::move(aAP)),
937 mLength(0),
938 mTail(kInlineCapacity, 0)
939 #ifdef DEBUG
941 mEntered(false)
942 #endif
944 mBegin = inlineStorage();
947 /* Move constructor. */
948 template <typename T, size_t N, class AllocPolicy>
949 MOZ_ALWAYS_INLINE Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
950 : AllocPolicy(std::move(aRhs))
951 #ifdef DEBUG
953 mEntered(false)
954 #endif
956 mLength = aRhs.mLength;
957 mTail.mCapacity = aRhs.mTail.mCapacity;
958 #ifdef DEBUG
959 mTail.mReserved = aRhs.mTail.mReserved;
960 #endif
962 if (aRhs.usingInlineStorage()) {
963 /* We can't move the buffer over in this case, so copy elements. */
964 mBegin = inlineStorage();
965 Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
967 * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
968 * The elements in its in-line storage still need to be destroyed.
970 } else {
972 * Take src's buffer, and turn src into an empty vector using
973 * in-line storage.
975 mBegin = aRhs.mBegin;
976 aRhs.mBegin = aRhs.inlineStorage();
977 aRhs.mTail.mCapacity = kInlineCapacity;
978 aRhs.mLength = 0;
979 #ifdef DEBUG
980 aRhs.mTail.mReserved = 0;
981 #endif
985 /* Move assignment. */
986 template <typename T, size_t N, class AP>
987 MOZ_ALWAYS_INLINE Vector<T, N, AP>& Vector<T, N, AP>::operator=(Vector&& aRhs) {
988 MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
989 this->~Vector();
990 new (KnownNotNull, this) Vector(std::move(aRhs));
991 return *this;
994 template <typename T, size_t N, class AP>
995 MOZ_ALWAYS_INLINE Vector<T, N, AP>::~Vector() {
996 MOZ_REENTRANCY_GUARD_ET_AL;
997 Impl::destroy(beginNoCheck(), endNoCheck());
998 if (!usingInlineStorage()) {
999 this->free_(beginNoCheck(), mTail.mCapacity);
1003 template <typename T, size_t N, class AP>
1004 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::reverse() {
1005 MOZ_REENTRANCY_GUARD_ET_AL;
1006 T* elems = mBegin;
1007 size_t len = mLength;
1008 size_t mid = len / 2;
1009 for (size_t i = 0; i < mid; i++) {
1010 std::swap(elems[i], elems[len - i - 1]);
1015 * This function will create a new heap buffer with capacity aNewCap,
1016 * move all elements in the inline buffer to this new buffer,
1017 * and fail on OOM.
1019 template <typename T, size_t N, class AP>
1020 inline bool Vector<T, N, AP>::convertToHeapStorage(size_t aNewCap) {
1021 MOZ_ASSERT(usingInlineStorage());
1023 /* Allocate buffer. */
1024 MOZ_ASSERT(!detail::CapacityHasExcessSpace<sizeof(T)>(aNewCap));
1025 T* newBuf = this->template pod_malloc<T>(aNewCap);
1026 if (MOZ_UNLIKELY(!newBuf)) {
1027 return false;
1030 /* Copy inline elements into heap buffer. */
1031 Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
1032 Impl::destroy(beginNoCheck(), endNoCheck());
1034 /* Switch in heap buffer. */
1035 mBegin = newBuf;
1036 /* mLength is unchanged. */
1037 mTail.mCapacity = aNewCap;
1038 return true;
1041 template <typename T, size_t N, class AP>
1042 MOZ_NEVER_INLINE bool Vector<T, N, AP>::growStorageBy(size_t aIncr) {
1043 MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
1045 size_t newCap;
1047 if (aIncr == 1 && usingInlineStorage()) {
1048 /* This case occurs in ~70--80% of the calls to this function. */
1049 constexpr size_t newSize =
1050 tl::RoundUpPow2<(kInlineCapacity + 1) * sizeof(T)>::value;
1051 static_assert(newSize / sizeof(T) > 0,
1052 "overflow when exceeding inline Vector storage");
1053 newCap = newSize / sizeof(T);
1054 } else {
1055 newCap = detail::ComputeGrowth<AP, sizeof(T)>(mLength, aIncr, true);
1056 if (MOZ_UNLIKELY(newCap == 0)) {
1057 this->reportAllocOverflow();
1058 return false;
1062 if (usingInlineStorage()) {
1063 return convertToHeapStorage(newCap);
1066 return Impl::growTo(*this, newCap);
1069 template <typename T, size_t N, class AP>
1070 inline bool Vector<T, N, AP>::initCapacity(size_t aRequest) {
1071 MOZ_ASSERT(empty());
1072 MOZ_ASSERT(usingInlineStorage());
1073 if (aRequest == 0) {
1074 return true;
1076 T* newbuf = this->template pod_malloc<T>(aRequest);
1077 if (MOZ_UNLIKELY(!newbuf)) {
1078 return false;
1080 mBegin = newbuf;
1081 mTail.mCapacity = aRequest;
1082 #ifdef DEBUG
1083 mTail.mReserved = aRequest;
1084 #endif
1085 return true;
1088 template <typename T, size_t N, class AP>
1089 inline bool Vector<T, N, AP>::initLengthUninitialized(size_t aRequest) {
1090 if (!initCapacity(aRequest)) {
1091 return false;
1093 infallibleGrowByUninitialized(aRequest);
1094 return true;
1097 template <typename T, size_t N, class AP>
1098 inline bool Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize) {
1099 if (aRequestedSize <= N) {
1100 return true;
1103 #ifdef DEBUG
1104 if (aRequestedSize <= mTail.mReserved) {
1105 return true;
1107 #endif
1109 return allocPolicy().checkSimulatedOOM();
1112 template <typename T, size_t N, class AP>
1113 inline bool Vector<T, N, AP>::reserve(size_t aRequest) {
1114 MOZ_REENTRANCY_GUARD_ET_AL;
1115 if (aRequest > mTail.mCapacity) {
1116 if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
1117 return false;
1119 } else if (!maybeCheckSimulatedOOM(aRequest)) {
1120 return false;
1122 #ifdef DEBUG
1123 if (aRequest > mTail.mReserved) {
1124 mTail.mReserved = aRequest;
1126 MOZ_ASSERT(mLength <= mTail.mReserved);
1127 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1128 #endif
1129 return true;
1132 template <typename T, size_t N, class AP>
1133 inline void Vector<T, N, AP>::shrinkBy(size_t aIncr) {
1134 MOZ_REENTRANCY_GUARD_ET_AL;
1135 MOZ_ASSERT(aIncr <= mLength);
1136 Impl::destroy(endNoCheck() - aIncr, endNoCheck());
1137 mLength -= aIncr;
1140 template <typename T, size_t N, class AP>
1141 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::shrinkTo(size_t aNewLength) {
1142 MOZ_ASSERT(aNewLength <= mLength);
1143 shrinkBy(mLength - aNewLength);
1146 template <typename T, size_t N, class AP>
1147 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growBy(size_t aIncr) {
1148 MOZ_REENTRANCY_GUARD_ET_AL;
1149 if (aIncr > mTail.mCapacity - mLength) {
1150 if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1151 return false;
1153 } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1154 return false;
1156 MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
1157 T* newend = endNoCheck() + aIncr;
1158 Impl::initialize(endNoCheck(), newend);
1159 mLength += aIncr;
1160 #ifdef DEBUG
1161 if (mLength > mTail.mReserved) {
1162 mTail.mReserved = mLength;
1164 #endif
1165 return true;
1168 template <typename T, size_t N, class AP>
1169 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growByUninitialized(size_t aIncr) {
1170 MOZ_REENTRANCY_GUARD_ET_AL;
1171 if (aIncr > mTail.mCapacity - mLength) {
1172 if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
1173 return false;
1175 } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
1176 return false;
1178 #ifdef DEBUG
1179 if (mLength + aIncr > mTail.mReserved) {
1180 mTail.mReserved = mLength + aIncr;
1182 #endif
1183 infallibleGrowByUninitialized(aIncr);
1184 return true;
1187 template <typename T, size_t N, class AP>
1188 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::infallibleGrowByUninitialized(
1189 size_t aIncr) {
1190 MOZ_ASSERT(mLength + aIncr <= reserved());
1191 mLength += aIncr;
1194 template <typename T, size_t N, class AP>
1195 inline bool Vector<T, N, AP>::resize(size_t aNewLength) {
1196 size_t curLength = mLength;
1197 if (aNewLength > curLength) {
1198 return growBy(aNewLength - curLength);
1200 shrinkBy(curLength - aNewLength);
1201 return true;
1204 template <typename T, size_t N, class AP>
1205 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::resizeUninitialized(
1206 size_t aNewLength) {
1207 size_t curLength = mLength;
1208 if (aNewLength > curLength) {
1209 return growByUninitialized(aNewLength - curLength);
1211 shrinkBy(curLength - aNewLength);
1212 return true;
1215 template <typename T, size_t N, class AP>
1216 inline void Vector<T, N, AP>::clear() {
1217 MOZ_REENTRANCY_GUARD_ET_AL;
1218 Impl::destroy(beginNoCheck(), endNoCheck());
1219 mLength = 0;
1222 template <typename T, size_t N, class AP>
1223 inline void Vector<T, N, AP>::clearAndFree() {
1224 clear();
1226 if (usingInlineStorage()) {
1227 return;
1229 this->free_(beginNoCheck(), mTail.mCapacity);
1230 mBegin = inlineStorage();
1231 mTail.mCapacity = kInlineCapacity;
1232 #ifdef DEBUG
1233 mTail.mReserved = 0;
1234 #endif
1237 template <typename T, size_t N, class AP>
1238 inline bool Vector<T, N, AP>::shrinkStorageToFit() {
1239 MOZ_REENTRANCY_GUARD_ET_AL;
1241 const auto length = this->length();
1242 if (usingInlineStorage() || length == capacity()) {
1243 return true;
1246 if (!length) {
1247 this->free_(beginNoCheck(), mTail.mCapacity);
1248 mBegin = inlineStorage();
1249 mTail.mCapacity = kInlineCapacity;
1250 #ifdef DEBUG
1251 mTail.mReserved = 0;
1252 #endif
1253 return true;
1256 T* newBuf;
1257 size_t newCap;
1258 if (length <= kInlineCapacity) {
1259 newBuf = inlineStorage();
1260 newCap = kInlineCapacity;
1261 } else {
1262 if (kElemIsPod) {
1263 newBuf = this->template pod_realloc<T>(beginNoCheck(), mTail.mCapacity,
1264 length);
1265 } else {
1266 newBuf = this->template pod_malloc<T>(length);
1268 if (MOZ_UNLIKELY(!newBuf)) {
1269 return false;
1271 newCap = length;
1273 if (!kElemIsPod || newBuf == inlineStorage()) {
1274 Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
1275 Impl::destroy(beginNoCheck(), endNoCheck());
1277 if (!kElemIsPod) {
1278 this->free_(beginNoCheck(), mTail.mCapacity);
1280 mBegin = newBuf;
1281 mTail.mCapacity = newCap;
1282 #ifdef DEBUG
1283 mTail.mReserved = length;
1284 #endif
1285 return true;
1288 template <typename T, size_t N, class AP>
1289 inline bool Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const {
1290 return mLength + aNeeded <= mTail.mCapacity;
1293 template <typename T, size_t N, class AP>
1294 template <typename U, size_t O, class BP>
1295 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendAll(
1296 const Vector<U, O, BP>& aOther) {
1297 internalAppend(aOther.begin(), aOther.length());
1300 template <typename T, size_t N, class AP>
1301 template <typename U>
1302 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(U&& aU) {
1303 MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
1304 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1305 Impl::new_(endNoCheck(), std::forward<U>(aU));
1306 ++mLength;
1309 template <typename T, size_t N, class AP>
1310 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded) {
1311 MOZ_REENTRANCY_GUARD_ET_AL;
1312 if (mLength + aNeeded > mTail.mCapacity) {
1313 if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1314 return false;
1316 } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1317 return false;
1319 #ifdef DEBUG
1320 if (mLength + aNeeded > mTail.mReserved) {
1321 mTail.mReserved = mLength + aNeeded;
1323 #endif
1324 internalAppendN(aT, aNeeded);
1325 return true;
1328 template <typename T, size_t N, class AP>
1329 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendN(const T& aT,
1330 size_t aNeeded) {
1331 MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
1332 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1333 Impl::copyConstructN(endNoCheck(), aNeeded, aT);
1334 mLength += aNeeded;
1337 template <typename T, size_t N, class AP>
1338 template <typename U>
1339 inline T* Vector<T, N, AP>::insert(T* aP, U&& aVal) {
1340 MOZ_ASSERT(begin() <= aP);
1341 MOZ_ASSERT(aP <= end());
1342 size_t pos = aP - begin();
1343 MOZ_ASSERT(pos <= mLength);
1344 size_t oldLength = mLength;
1345 if (pos == oldLength) {
1346 if (!append(std::forward<U>(aVal))) {
1347 return nullptr;
1349 } else {
1350 T oldBack = std::move(back());
1351 if (!append(std::move(oldBack))) {
1352 return nullptr;
1354 for (size_t i = oldLength - 1; i > pos; --i) {
1355 (*this)[i] = std::move((*this)[i - 1]);
1357 (*this)[pos] = std::forward<U>(aVal);
1359 return begin() + pos;
1362 template <typename T, size_t N, class AP>
1363 inline void Vector<T, N, AP>::erase(T* aIt) {
1364 MOZ_ASSERT(begin() <= aIt);
1365 MOZ_ASSERT(aIt < end());
1366 while (aIt + 1 < end()) {
1367 *aIt = std::move(*(aIt + 1));
1368 ++aIt;
1370 popBack();
1373 template <typename T, size_t N, class AP>
1374 inline void Vector<T, N, AP>::erase(T* aBegin, T* aEnd) {
1375 MOZ_ASSERT(begin() <= aBegin);
1376 MOZ_ASSERT(aBegin <= aEnd);
1377 MOZ_ASSERT(aEnd <= end());
1378 while (aEnd < end()) {
1379 *aBegin++ = std::move(*aEnd++);
1381 shrinkBy(aEnd - aBegin);
1384 template <typename T, size_t N, class AP>
1385 template <typename Pred>
1386 void Vector<T, N, AP>::eraseIf(Pred aPred) {
1387 // remove_if finds the first element to be erased, and then efficiently move-
1388 // assigns elements to effectively overwrite elements that satisfy the
1389 // predicate. It returns the new end pointer, after which there are only
1390 // moved-from elements ready to be destroyed, so we just need to shrink the
1391 // vector accordingly.
1392 T* newEnd = std::remove_if(begin(), end(),
1393 [&aPred](const T& aT) { return aPred(aT); });
1394 MOZ_ASSERT(newEnd <= end());
1395 shrinkBy(end() - newEnd);
1398 template <typename T, size_t N, class AP>
1399 template <typename U>
1400 void Vector<T, N, AP>::eraseIfEqual(const U& aU) {
1401 return eraseIf([&aU](const T& aT) { return aT == aU; });
1404 template <typename T, size_t N, class AP>
1405 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::internalEnsureCapacity(
1406 size_t aNeeded) {
1407 if (mLength + aNeeded > mTail.mCapacity) {
1408 if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
1409 return false;
1411 } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
1412 return false;
1414 #ifdef DEBUG
1415 if (mLength + aNeeded > mTail.mReserved) {
1416 mTail.mReserved = mLength + aNeeded;
1418 #endif
1419 return true;
1422 template <typename T, size_t N, class AP>
1423 template <typename U>
1424 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1425 const U* aInsEnd) {
1426 MOZ_REENTRANCY_GUARD_ET_AL;
1427 const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
1428 if (!internalEnsureCapacity(needed)) {
1429 return false;
1431 internalAppend(aInsBegin, needed);
1432 return true;
1435 template <typename T, size_t N, class AP>
1436 template <typename U>
1437 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(const U* aInsBegin,
1438 size_t aInsLength) {
1439 MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
1440 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1441 Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
1442 mLength += aInsLength;
1445 template <typename T, size_t N, class AP>
1446 template <typename U>
1447 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::moveAppend(U* aInsBegin, U* aInsEnd) {
1448 MOZ_REENTRANCY_GUARD_ET_AL;
1449 const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
1450 if (!internalEnsureCapacity(needed)) {
1451 return false;
1453 internalMoveAppend(aInsBegin, needed);
1454 return true;
1457 template <typename T, size_t N, class AP>
1458 template <typename U>
1459 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalMoveAppend(U* aInsBegin,
1460 size_t aInsLength) {
1461 MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
1462 MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
1463 Impl::moveConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
1464 mLength += aInsLength;
1467 template <typename T, size_t N, class AP>
1468 template <typename U>
1469 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(U&& aU) {
1470 MOZ_REENTRANCY_GUARD_ET_AL;
1471 if (mLength == mTail.mCapacity) {
1472 if (MOZ_UNLIKELY(!growStorageBy(1))) {
1473 return false;
1475 } else if (!maybeCheckSimulatedOOM(mLength + 1)) {
1476 return false;
1478 #ifdef DEBUG
1479 if (mLength + 1 > mTail.mReserved) {
1480 mTail.mReserved = mLength + 1;
1482 #endif
1483 internalAppend(std::forward<U>(aU));
1484 return true;
1487 template <typename T, size_t N, class AP>
1488 template <typename U, size_t O, class BP>
1489 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(
1490 const Vector<U, O, BP>& aOther) {
1491 return append(aOther.begin(), aOther.length());
1494 template <typename T, size_t N, class AP>
1495 template <typename U, size_t O, class BP>
1496 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(Vector<U, O, BP>&& aOther) {
1497 if (empty() && capacity() < aOther.length()) {
1498 *this = std::move(aOther);
1499 return true;
1502 if (moveAppend(aOther.begin(), aOther.end())) {
1503 aOther.clearAndFree();
1504 return true;
1507 return false;
1510 template <typename T, size_t N, class AP>
1511 template <class U>
1512 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
1513 size_t aInsLength) {
1514 return append(aInsBegin, aInsBegin + aInsLength);
1517 template <typename T, size_t N, class AP>
1518 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::popBack() {
1519 MOZ_REENTRANCY_GUARD_ET_AL;
1520 MOZ_ASSERT(!empty());
1521 --mLength;
1522 endNoCheck()->~T();
1525 template <typename T, size_t N, class AP>
1526 MOZ_ALWAYS_INLINE T Vector<T, N, AP>::popCopy() {
1527 T ret = back();
1528 popBack();
1529 return ret;
1532 template <typename T, size_t N, class AP>
1533 inline T* Vector<T, N, AP>::extractRawBuffer() {
1534 MOZ_REENTRANCY_GUARD_ET_AL;
1536 if (usingInlineStorage()) {
1537 return nullptr;
1540 T* ret = mBegin;
1541 mBegin = inlineStorage();
1542 mLength = 0;
1543 mTail.mCapacity = kInlineCapacity;
1544 #ifdef DEBUG
1545 mTail.mReserved = 0;
1546 #endif
1547 return ret;
1550 template <typename T, size_t N, class AP>
1551 inline T* Vector<T, N, AP>::extractOrCopyRawBuffer() {
1552 if (T* ret = extractRawBuffer()) {
1553 return ret;
1556 MOZ_REENTRANCY_GUARD_ET_AL;
1558 T* copy = this->template pod_malloc<T>(mLength);
1559 if (!copy) {
1560 return nullptr;
1563 Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
1564 Impl::destroy(beginNoCheck(), endNoCheck());
1565 mBegin = inlineStorage();
1566 mLength = 0;
1567 mTail.mCapacity = kInlineCapacity;
1568 #ifdef DEBUG
1569 mTail.mReserved = 0;
1570 #endif
1571 return copy;
1574 template <typename T, size_t N, class AP>
1575 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength,
1576 size_t aCapacity) {
1577 MOZ_REENTRANCY_GUARD_ET_AL;
1579 /* Destroy what we have. */
1580 Impl::destroy(beginNoCheck(), endNoCheck());
1581 if (!usingInlineStorage()) {
1582 this->free_(beginNoCheck(), mTail.mCapacity);
1585 /* Take in the new buffer. */
1586 if (aCapacity <= kInlineCapacity) {
1588 * We convert to inline storage if possible, even though aP might
1589 * otherwise be acceptable. Maybe this behaviour should be
1590 * specifiable with an argument to this function.
1592 mBegin = inlineStorage();
1593 mLength = aLength;
1594 mTail.mCapacity = kInlineCapacity;
1595 Impl::moveConstruct(mBegin, aP, aP + aLength);
1596 Impl::destroy(aP, aP + aLength);
1597 this->free_(aP, aCapacity);
1598 } else {
1599 mBegin = aP;
1600 mLength = aLength;
1601 mTail.mCapacity = aCapacity;
1603 #ifdef DEBUG
1604 mTail.mReserved = aCapacity;
1605 #endif
1608 template <typename T, size_t N, class AP>
1609 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength) {
1610 replaceRawBuffer(aP, aLength, aLength);
1613 template <typename T, size_t N, class AP>
1614 inline size_t Vector<T, N, AP>::sizeOfExcludingThis(
1615 MallocSizeOf aMallocSizeOf) const {
1616 return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
1619 template <typename T, size_t N, class AP>
1620 inline size_t Vector<T, N, AP>::sizeOfIncludingThis(
1621 MallocSizeOf aMallocSizeOf) const {
1622 return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
1625 template <typename T, size_t N, class AP>
1626 inline void Vector<T, N, AP>::swap(Vector& aOther) {
1627 static_assert(N == 0, "still need to implement this for N != 0");
1629 // This only works when inline storage is always empty.
1630 if (!usingInlineStorage() && aOther.usingInlineStorage()) {
1631 aOther.mBegin = mBegin;
1632 mBegin = inlineStorage();
1633 } else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
1634 mBegin = aOther.mBegin;
1635 aOther.mBegin = aOther.inlineStorage();
1636 } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
1637 std::swap(mBegin, aOther.mBegin);
1638 } else {
1639 // This case is a no-op, since we'd set both to use their inline storage.
1642 std::swap(mLength, aOther.mLength);
1643 std::swap(mTail.mCapacity, aOther.mTail.mCapacity);
1644 #ifdef DEBUG
1645 std::swap(mTail.mReserved, aOther.mTail.mReserved);
1646 #endif
1649 } // namespace mozilla
1651 #endif /* mozilla_Vector_h */