1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* A type/length-parametrized vector class. */
9 #ifndef mozilla_Vector_h
10 #define mozilla_Vector_h
12 #include "mozilla/Alignment.h"
13 #include "mozilla/AllocPolicy.h"
14 #include "mozilla/ArrayUtils.h" // for PointerRangeSize
15 #include "mozilla/Assertions.h"
16 #include "mozilla/Attributes.h"
17 #include "mozilla/MathAlgorithms.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/Move.h"
20 #include "mozilla/OperatorNewExtensions.h"
21 #include "mozilla/ReentrancyGuard.h"
22 #include "mozilla/TemplateLib.h"
23 #include "mozilla/TypeTraits.h"
25 #include <new> // for placement new
27 /* Silence dire "bugs in previous versions of MSVC have been fixed" warnings */
30 #pragma warning(disable:4345)
35 template<typename T
, size_t N
, class AllocPolicy
>
41 * Check that the given capacity wastes the minimal amount of space if
42 * allocated on the heap. This means that aCapacity*sizeof(T) is as close to a
43 * power-of-two as possible. growStorageBy() is responsible for ensuring this.
46 static bool CapacityHasExcessSpace(size_t aCapacity
)
48 size_t size
= aCapacity
* sizeof(T
);
49 return RoundUpPow2(size
) - size
>= sizeof(T
);
53 * This template class provides a default implementation for vector operations
54 * when the element type is not known to be a POD, as judged by IsPod.
56 template<typename T
, size_t N
, class AP
, bool IsPod
>
60 * Constructs an object in the uninitialized memory at *aDst with aArgs.
62 template<typename
... Args
>
64 static inline void new_(T
* aDst
, Args
&&... aArgs
)
66 new(KnownNotNull
, aDst
) T(std::forward
<Args
>(aArgs
)...);
69 /* Destroys constructed objects in the range [aBegin, aEnd). */
70 static inline void destroy(T
* aBegin
, T
* aEnd
)
72 MOZ_ASSERT(aBegin
<= aEnd
);
73 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
78 /* Constructs objects in the uninitialized range [aBegin, aEnd). */
79 static inline void initialize(T
* aBegin
, T
* aEnd
)
81 MOZ_ASSERT(aBegin
<= aEnd
);
82 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
88 * Copy-constructs objects in the uninitialized range
89 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
92 static inline void copyConstruct(T
* aDst
,
93 const U
* aSrcStart
, const U
* aSrcEnd
)
95 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
96 for (const U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
102 * Move-constructs objects in the uninitialized range
103 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
106 static inline void moveConstruct(T
* aDst
, U
* aSrcStart
, U
* aSrcEnd
)
108 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
109 for (U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
110 new_(aDst
, std::move(*p
));
115 * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
116 * the same object aU.
119 static inline void copyConstructN(T
* aDst
, size_t aN
, const U
& aU
)
121 for (T
* end
= aDst
+ aN
; aDst
< end
; ++aDst
) {
127 * Grows the given buffer to have capacity aNewCap, preserving the objects
128 * constructed in the range [begin, end) and updating aV. Assumes that (1)
129 * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
132 static inline MOZ_MUST_USE
bool
133 growTo(Vector
<T
, N
, AP
>& aV
, size_t aNewCap
)
135 MOZ_ASSERT(!aV
.usingInlineStorage());
136 MOZ_ASSERT(!CapacityHasExcessSpace
<T
>(aNewCap
));
137 T
* newbuf
= aV
.template pod_malloc
<T
>(aNewCap
);
138 if (MOZ_UNLIKELY(!newbuf
)) {
142 T
* src
= aV
.beginNoCheck();
143 for (; src
< aV
.endNoCheck(); ++dst
, ++src
) {
144 new_(dst
, std::move(*src
));
146 VectorImpl::destroy(aV
.beginNoCheck(), aV
.endNoCheck());
147 aV
.free_(aV
.mBegin
, aV
.mTail
.mCapacity
);
149 /* aV.mLength is unchanged. */
150 aV
.mTail
.mCapacity
= aNewCap
;
156 * This partial template specialization provides a default implementation for
157 * vector operations when the element type is known to be a POD, as judged by
160 template<typename T
, size_t N
, class AP
>
161 struct VectorImpl
<T
, N
, AP
, true>
163 template<typename
... Args
>
165 static inline void new_(T
* aDst
, Args
&&... aArgs
)
167 // Explicitly construct a local object instead of using a temporary since
168 // T(args...) will be treated like a C-style cast in the unary case and
169 // allow unsafe conversions. Both forms should be equivalent to an
170 // optimizing compiler.
171 T
temp(std::forward
<Args
>(aArgs
)...);
175 static inline void destroy(T
*, T
*) {}
177 static inline void initialize(T
* aBegin
, T
* aEnd
)
180 * You would think that memset would be a big win (or even break even)
181 * when we know T is a POD. But currently it's not. This is probably
182 * because |append| tends to be given small ranges and memset requires
183 * a function call that doesn't get inlined.
185 * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
187 MOZ_ASSERT(aBegin
<= aEnd
);
188 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
194 static inline void copyConstruct(T
* aDst
,
195 const U
* aSrcStart
, const U
* aSrcEnd
)
198 * See above memset comment. Also, notice that copyConstruct is
199 * currently templated (T != U), so memcpy won't work without
202 * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
204 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
205 for (const U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
211 static inline void moveConstruct(T
* aDst
,
212 const U
* aSrcStart
, const U
* aSrcEnd
)
214 copyConstruct(aDst
, aSrcStart
, aSrcEnd
);
217 static inline void copyConstructN(T
* aDst
, size_t aN
, const T
& aT
)
219 for (T
* end
= aDst
+ aN
; aDst
< end
; ++aDst
) {
224 static inline MOZ_MUST_USE
bool
225 growTo(Vector
<T
, N
, AP
>& aV
, size_t aNewCap
)
227 MOZ_ASSERT(!aV
.usingInlineStorage());
228 MOZ_ASSERT(!CapacityHasExcessSpace
<T
>(aNewCap
));
230 aV
.template pod_realloc
<T
>(aV
.mBegin
, aV
.mTail
.mCapacity
, aNewCap
);
231 if (MOZ_UNLIKELY(!newbuf
)) {
235 /* aV.mLength is unchanged. */
236 aV
.mTail
.mCapacity
= aNewCap
;
241 podResizeToFit(Vector
<T
, N
, AP
>& aV
)
243 if (aV
.usingInlineStorage() || aV
.mLength
== aV
.mTail
.mCapacity
) {
247 aV
.free_(aV
.mBegin
, aV
.mTail
.mCapacity
);
248 aV
.mBegin
= aV
.inlineStorage();
249 aV
.mTail
.mCapacity
= aV
.kInlineCapacity
;
251 aV
.mTail
.mReserved
= 0;
256 aV
.template pod_realloc
<T
>(aV
.mBegin
, aV
.mTail
.mCapacity
, aV
.mLength
);
257 if (MOZ_UNLIKELY(!newbuf
)) {
261 aV
.mTail
.mCapacity
= aV
.mLength
;
263 aV
.mTail
.mReserved
= aV
.mLength
;
268 // A struct for TestVector.cpp to access private internal fields.
269 // DO NOT DEFINE IN YOUR OWN CODE.
270 struct VectorTesting
;
272 } // namespace detail
275 * STL-like container providing a short-lived, dynamic buffer. Vector calls the
276 * constructors/destructors of all elements stored in its internal buffer, so
277 * non-PODs may be safely used. Additionally, Vector will store the first N
278 * elements in-place before resorting to dynamic allocation.
281 * - default and copy constructible, assignable, destructible
282 * - operations do not throw
283 * MinInlineCapacity requirements:
284 * - any value, however, MinInlineCapacity is clamped to min/max values
286 * - see "Allocation policies" in AllocPolicy.h (defaults to
287 * mozilla::MallocAllocPolicy)
289 * Vector is not reentrant: T member functions called during Vector member
290 * functions must not call back into the same object!
293 size_t MinInlineCapacity
= 0,
294 class AllocPolicy
= MallocAllocPolicy
>
295 class MOZ_NON_PARAM Vector final
: private AllocPolicy
299 static const bool kElemIsPod
= IsPod
<T
>::value
;
300 typedef detail::VectorImpl
<T
, MinInlineCapacity
, AllocPolicy
, kElemIsPod
> Impl
;
301 friend struct detail::VectorImpl
<T
, MinInlineCapacity
, AllocPolicy
, kElemIsPod
>;
303 friend struct detail::VectorTesting
;
305 MOZ_MUST_USE
bool growStorageBy(size_t aIncr
);
306 MOZ_MUST_USE
bool convertToHeapStorage(size_t aNewCap
);
307 MOZ_MUST_USE
bool maybeCheckSimulatedOOM(size_t aRequestedSize
);
309 /* magic constants */
312 * The maximum space allocated for inline element storage.
314 * We reduce space by what the AllocPolicy base class and prior Vector member
315 * fields likely consume to attempt to play well with binary size classes.
317 static constexpr size_t kMaxInlineBytes
=
318 1024 - (sizeof(AllocPolicy
) + sizeof(T
*) + sizeof(size_t) + sizeof(size_t));
321 * The number of T elements of inline capacity built into this Vector. This
322 * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
324 * We use a partially-specialized template (not explicit specialization, which
325 * is only allowed at namespace scope) to compute this value. The benefit is
326 * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
327 * defined at the time |Vector<T>| appears, if no inline storage is requested.
329 template<size_t MinimumInlineCapacity
, size_t Dummy
>
330 struct ComputeCapacity
332 static constexpr size_t value
=
333 tl::Min
<MinimumInlineCapacity
, kMaxInlineBytes
/ sizeof(T
)>::value
;
336 template<size_t Dummy
>
337 struct ComputeCapacity
<0, Dummy
>
339 static constexpr size_t value
= 0;
342 /** The actual inline capacity in number of elements T. This may be zero! */
343 static constexpr size_t kInlineCapacity
=
344 ComputeCapacity
<MinInlineCapacity
, 0>::value
;
349 * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
350 * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
351 * mLength, mBegin + mCapacity) holds uninitialized memory. The range
352 * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
353 * previously allocated by a call to reserve().
357 /* Number of elements in the vector. */
361 * Memory used to store capacity, reserved element count (debug builds only),
362 * and inline storage. The simple "answer" is:
368 * alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
370 * but there are complications. First, C++ forbids zero-sized arrays that
371 * might result. Second, we don't want zero capacity to affect Vector's size
372 * (even empty classes take up a byte, unless they're base classes).
374 * Yet again, we eliminate the zero-sized array using partial specialization.
375 * And we eliminate potential size hit by putting capacity/reserved in one
376 * struct, then putting the array (if any) in a derived struct. If no array
377 * is needed, the derived struct won't consume extra space.
379 struct CapacityAndReserved
381 explicit CapacityAndReserved(size_t aCapacity
, size_t aReserved
)
382 : mCapacity(aCapacity
)
384 , mReserved(aReserved
)
387 CapacityAndReserved() = default;
389 /* Max number of elements storable in the vector without resizing. */
393 /* Max elements of reserved or used space in this vector. */
398 // Silence warnings about this struct possibly being padded dued to the
399 // alignas() in it -- there's nothing we can do to avoid it.
401 # pragma warning(push)
402 # pragma warning(disable:4324)
405 template<size_t Capacity
, size_t Dummy
>
406 struct CRAndStorage
: CapacityAndReserved
408 explicit CRAndStorage(size_t aCapacity
, size_t aReserved
)
409 : CapacityAndReserved(aCapacity
, aReserved
)
411 CRAndStorage() = default;
413 alignas(T
) unsigned char mBytes
[Capacity
* sizeof(T
)];
415 // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
416 // T*. Indirecting through this function addresses the problem.
417 void* data() { return mBytes
; }
419 T
* storage() { return static_cast<T
*>(data()); }
422 template<size_t Dummy
>
423 struct CRAndStorage
<0, Dummy
> : CapacityAndReserved
425 explicit CRAndStorage(size_t aCapacity
, size_t aReserved
)
426 : CapacityAndReserved(aCapacity
, aReserved
)
428 CRAndStorage() = default;
430 T
* storage() { return nullptr; }
433 CRAndStorage
<kInlineCapacity
, 0> mTail
;
436 # pragma warning(pop)
440 friend class ReentrancyGuard
;
444 /* private accessors */
446 bool usingInlineStorage() const
448 return mBegin
== const_cast<Vector
*>(this)->inlineStorage();
453 return mTail
.storage();
456 T
* beginNoCheck() const
463 return mBegin
+ mLength
;
466 const T
* endNoCheck() const
468 return mBegin
+ mLength
;
473 * The amount of explicitly allocated space in this vector that is immediately
474 * available to be filled by appending additional elements. This value is
475 * always greater than or equal to |length()| -- the vector's actual elements
476 * are implicitly reserved. This value is always less than or equal to
477 * |capacity()|. It may be explicitly increased using the |reserve()| method.
479 size_t reserved() const
481 MOZ_ASSERT(mLength
<= mTail
.mReserved
);
482 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
483 return mTail
.mReserved
;
487 /* Append operations guaranteed to succeed due to pre-reserved space. */
488 template<typename U
> void internalAppend(U
&& aU
);
489 template<typename U
, size_t O
, class BP
>
490 void internalAppendAll(const Vector
<U
, O
, BP
>& aU
);
491 void internalAppendN(const T
& aT
, size_t aN
);
492 template<typename U
> void internalAppend(const U
* aBegin
, size_t aLength
);
495 static const size_t sMaxInlineStorage
= MinInlineCapacity
;
497 typedef T ElementType
;
499 explicit Vector(AllocPolicy
= AllocPolicy());
500 Vector(Vector
&&); /* Move constructor. */
501 Vector
& operator=(Vector
&&); /* Move assignment. */
506 const AllocPolicy
& allocPolicy() const { return *this; }
508 AllocPolicy
& allocPolicy() { return *this; }
510 enum { InlineLength
= MinInlineCapacity
};
512 size_t length() const { return mLength
; }
514 bool empty() const { return mLength
== 0; }
516 size_t capacity() const { return mTail
.mCapacity
; }
520 MOZ_ASSERT(!mEntered
);
524 const T
* begin() const
526 MOZ_ASSERT(!mEntered
);
532 MOZ_ASSERT(!mEntered
);
533 return mBegin
+ mLength
;
538 MOZ_ASSERT(!mEntered
);
539 return mBegin
+ mLength
;
542 T
& operator[](size_t aIndex
)
544 MOZ_ASSERT(!mEntered
);
545 MOZ_ASSERT(aIndex
< mLength
);
546 return begin()[aIndex
];
549 const T
& operator[](size_t aIndex
) const
551 MOZ_ASSERT(!mEntered
);
552 MOZ_ASSERT(aIndex
< mLength
);
553 return begin()[aIndex
];
558 MOZ_ASSERT(!mEntered
);
559 MOZ_ASSERT(!empty());
563 const T
& back() const
565 MOZ_ASSERT(!mEntered
);
566 MOZ_ASSERT(!empty());
575 Range(T
* aCur
, T
* aEnd
)
579 MOZ_ASSERT(aCur
<= aEnd
);
583 bool empty() const { return mCur
== mEnd
; }
584 size_t remain() const { return PointerRangeSize(mCur
, mEnd
); }
585 T
& front() const { MOZ_ASSERT(!empty()); return *mCur
; }
586 void popFront() { MOZ_ASSERT(!empty()); ++mCur
; }
587 T
popCopyFront() { MOZ_ASSERT(!empty()); return *mCur
++; }
595 ConstRange(const T
* aCur
, const T
* aEnd
)
599 MOZ_ASSERT(aCur
<= aEnd
);
603 bool empty() const { return mCur
== mEnd
; }
604 size_t remain() const { return PointerRangeSize(mCur
, mEnd
); }
605 const T
& front() const { MOZ_ASSERT(!empty()); return *mCur
; }
606 void popFront() { MOZ_ASSERT(!empty()); ++mCur
; }
607 T
popCopyFront() { MOZ_ASSERT(!empty()); return *mCur
++; }
610 Range
all() { return Range(begin(), end()); }
611 ConstRange
all() const { return ConstRange(begin(), end()); }
616 * Reverse the order of the elements in the vector in place.
621 * Given that the vector is empty, grow the internal capacity to |aRequest|,
622 * keeping the length 0.
624 MOZ_MUST_USE
bool initCapacity(size_t aRequest
);
627 * Given that the vector is empty, grow the internal capacity and length to
628 * |aRequest| leaving the elements' memory completely uninitialized (with all
629 * the associated hazards and caveats). This avoids the usual allocation-size
630 * rounding that happens in resize and overhead of initialization for elements
631 * that are about to be overwritten.
633 MOZ_MUST_USE
bool initLengthUninitialized(size_t aRequest
);
636 * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
637 * |aRequest - length()| elements, in any sequence of append/appendAll calls,
638 * is guaranteed to succeed.
640 * A request to reserve an amount less than the current length does not affect
643 MOZ_MUST_USE
bool reserve(size_t aRequest
);
646 * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
647 * or unreserve storage for those elements.
649 void shrinkBy(size_t aIncr
);
652 * Destroy elements in the range [aNewLength, end()). Does not deallocate
653 * or unreserve storage for those elements.
655 void shrinkTo(size_t aNewLength
);
657 /** Grow the vector by aIncr elements. */
658 MOZ_MUST_USE
bool growBy(size_t aIncr
);
660 /** Call shrinkBy or growBy based on whether newSize > length(). */
661 MOZ_MUST_USE
bool resize(size_t aNewLength
);
664 * Increase the length of the vector, but don't initialize the new elements
665 * -- leave them as uninitialized memory.
667 MOZ_MUST_USE
bool growByUninitialized(size_t aIncr
);
668 void infallibleGrowByUninitialized(size_t aIncr
);
669 MOZ_MUST_USE
bool resizeUninitialized(size_t aNewLength
);
671 /** Shorthand for shrinkBy(length()). */
674 /** Clears and releases any heap-allocated storage. */
678 * Calls the AllocPolicy's pod_realloc to release excess capacity. Since
679 * realloc is only safe on PODs, this method fails to compile if IsPod<T>
682 void podResizeToFit();
685 * If true, appending |aNeeded| elements won't reallocate elements storage.
686 * This *doesn't* mean that infallibleAppend may be used! You still must
687 * reserve the extra space, even if this method indicates that appends won't
688 * need to reallocate elements storage.
690 bool canAppendWithoutRealloc(size_t aNeeded
) const;
692 /** Potentially fallible append operations. */
695 * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
696 * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
699 template<typename U
> MOZ_MUST_USE
bool append(U
&& aU
);
702 * Construct a T in-place as a new entry at the end of this vector.
704 template<typename
... Args
>
705 MOZ_MUST_USE
bool emplaceBack(Args
&&... aArgs
)
707 if (!growByUninitialized(1))
709 Impl::new_(&back(), std::forward
<Args
>(aArgs
)...);
713 template<typename U
, size_t O
, class BP
>
714 MOZ_MUST_USE
bool appendAll(const Vector
<U
, O
, BP
>& aU
);
715 MOZ_MUST_USE
bool appendN(const T
& aT
, size_t aN
);
716 template<typename U
> MOZ_MUST_USE
bool append(const U
* aBegin
, const U
* aEnd
);
717 template<typename U
> MOZ_MUST_USE
bool append(const U
* aBegin
, size_t aLength
);
720 * Guaranteed-infallible append operations for use upon vectors whose
721 * memory has been pre-reserved. Don't use this if you haven't reserved the
724 template<typename U
> void infallibleAppend(U
&& aU
)
726 internalAppend(std::forward
<U
>(aU
));
728 void infallibleAppendN(const T
& aT
, size_t aN
)
730 internalAppendN(aT
, aN
);
732 template<typename U
> void infallibleAppend(const U
* aBegin
, const U
* aEnd
)
734 internalAppend(aBegin
, PointerRangeSize(aBegin
, aEnd
));
736 template<typename U
> void infallibleAppend(const U
* aBegin
, size_t aLength
)
738 internalAppend(aBegin
, aLength
);
740 template<typename
... Args
>
741 void infallibleEmplaceBack(Args
&&... aArgs
)
743 infallibleGrowByUninitialized(1);
744 Impl::new_(&back(), std::forward
<Args
>(aArgs
)...);
752 * If elements are stored in-place, return nullptr and leave this vector
755 * Otherwise return this vector's elements buffer, and clear this vector as if
756 * by clearAndFree(). The caller now owns the buffer and is responsible for
757 * deallocating it consistent with this vector's AllocPolicy.
759 * N.B. Although a T*, only the range [0, length()) is constructed.
761 MOZ_MUST_USE T
* extractRawBuffer();
764 * If elements are stored in-place, allocate a new buffer, move this vector's
765 * elements into it, and return that buffer.
767 * Otherwise return this vector's elements buffer. The caller now owns the
768 * buffer and is responsible for deallocating it consistent with this vector's
771 * This vector is cleared, as if by clearAndFree(), when this method
772 * succeeds. This method fails and returns nullptr only if new elements buffer
775 * N.B. Only the range [0, length()) of the returned buffer is constructed.
776 * If any of these elements are uninitialized (as growByUninitialized
777 * enables), behavior is undefined.
779 MOZ_MUST_USE T
* extractOrCopyRawBuffer();
782 * Transfer ownership of an array of objects into the vector. The caller
783 * must have allocated the array in accordance with this vector's
786 * N.B. This call assumes that there are no uninitialized elements in the
787 * passed range [aP, aP + aLength). The range [aP + aLength, aP +
788 * aCapacity) must be allocated uninitialized memory.
790 void replaceRawBuffer(T
* aP
, size_t aLength
, size_t aCapacity
);
793 * Transfer ownership of an array of objects into the vector. The caller
794 * must have allocated the array in accordance with this vector's
797 * N.B. This call assumes that there are no uninitialized elements in the
800 void replaceRawBuffer(T
* aP
, size_t aLength
);
803 * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
804 * one position higher. On success, |aP| should not be reused because it'll
805 * be a dangling pointer if reallocation of the vector storage occurred; the
806 * return value should be used instead. On failure, nullptr is returned.
810 * if (!(p = vec.insert(p, val))) {
813 * <keep working with p>
815 * This is inherently a linear-time operation. Be careful!
818 MOZ_MUST_USE T
* insert(T
* aP
, U
&& aVal
);
821 * Removes the element |aT|, which must fall in the bounds [begin, end),
822 * shifting existing elements from |aT + 1| onward one position lower.
827 * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
828 * [begin, end), shifting existing elements from |aEnd + 1| onward to aBegin's
831 void erase(T
* aBegin
, T
* aEnd
);
834 * Measure the size of the vector's heap-allocated storage.
836 size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf
) const;
839 * Like sizeOfExcludingThis, but also measures the size of the vector
840 * object (which must be heap-allocated) itself.
842 size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf
) const;
844 void swap(Vector
& aOther
);
847 Vector(const Vector
&) = delete;
848 void operator=(const Vector
&) = delete;
851 /* This does the re-entrancy check plus several other sanity checks. */
852 #define MOZ_REENTRANCY_GUARD_ET_AL \
853 ReentrancyGuard g(*this); \
854 MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
855 MOZ_ASSERT(reserved() <= mTail.mCapacity); \
856 MOZ_ASSERT(mLength <= reserved()); \
857 MOZ_ASSERT(mLength <= mTail.mCapacity)
859 /* Vector Implementation */
861 template<typename T
, size_t N
, class AP
>
863 Vector
<T
, N
, AP
>::Vector(AP aAP
)
866 , mTail(kInlineCapacity
, 0)
871 mBegin
= inlineStorage();
874 /* Move constructor. */
875 template<typename T
, size_t N
, class AllocPolicy
>
877 Vector
<T
, N
, AllocPolicy
>::Vector(Vector
&& aRhs
)
878 : AllocPolicy(std::move(aRhs
))
883 mLength
= aRhs
.mLength
;
884 mTail
.mCapacity
= aRhs
.mTail
.mCapacity
;
886 mTail
.mReserved
= aRhs
.mTail
.mReserved
;
889 if (aRhs
.usingInlineStorage()) {
890 /* We can't move the buffer over in this case, so copy elements. */
891 mBegin
= inlineStorage();
892 Impl::moveConstruct(mBegin
, aRhs
.beginNoCheck(), aRhs
.endNoCheck());
894 * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
895 * The elements in its in-line storage still need to be destroyed.
899 * Take src's buffer, and turn src into an empty vector using
902 mBegin
= aRhs
.mBegin
;
903 aRhs
.mBegin
= aRhs
.inlineStorage();
904 aRhs
.mTail
.mCapacity
= kInlineCapacity
;
907 aRhs
.mTail
.mReserved
= 0;
912 /* Move assignment. */
913 template<typename T
, size_t N
, class AP
>
914 MOZ_ALWAYS_INLINE Vector
<T
, N
, AP
>&
915 Vector
<T
, N
, AP
>::operator=(Vector
&& aRhs
)
917 MOZ_ASSERT(this != &aRhs
, "self-move assignment is prohibited");
919 new(KnownNotNull
, this) Vector(std::move(aRhs
));
923 template<typename T
, size_t N
, class AP
>
925 Vector
<T
, N
, AP
>::~Vector()
927 MOZ_REENTRANCY_GUARD_ET_AL
;
928 Impl::destroy(beginNoCheck(), endNoCheck());
929 if (!usingInlineStorage()) {
930 this->free_(beginNoCheck(), mTail
.mCapacity
);
934 template<typename T
, size_t N
, class AP
>
935 MOZ_ALWAYS_INLINE
void
936 Vector
<T
, N
, AP
>::reverse() {
937 MOZ_REENTRANCY_GUARD_ET_AL
;
939 size_t len
= mLength
;
940 size_t mid
= len
/ 2;
941 for (size_t i
= 0; i
< mid
; i
++) {
942 Swap(elems
[i
], elems
[len
- i
- 1]);
947 * This function will create a new heap buffer with capacity aNewCap,
948 * move all elements in the inline buffer to this new buffer,
951 template<typename T
, size_t N
, class AP
>
953 Vector
<T
, N
, AP
>::convertToHeapStorage(size_t aNewCap
)
955 MOZ_ASSERT(usingInlineStorage());
957 /* Allocate buffer. */
958 MOZ_ASSERT(!detail::CapacityHasExcessSpace
<T
>(aNewCap
));
959 T
* newBuf
= this->template pod_malloc
<T
>(aNewCap
);
960 if (MOZ_UNLIKELY(!newBuf
)) {
964 /* Copy inline elements into heap buffer. */
965 Impl::moveConstruct(newBuf
, beginNoCheck(), endNoCheck());
966 Impl::destroy(beginNoCheck(), endNoCheck());
968 /* Switch in heap buffer. */
970 /* mLength is unchanged. */
971 mTail
.mCapacity
= aNewCap
;
975 template<typename T
, size_t N
, class AP
>
976 MOZ_NEVER_INLINE
bool
977 Vector
<T
, N
, AP
>::growStorageBy(size_t aIncr
)
979 MOZ_ASSERT(mLength
+ aIncr
> mTail
.mCapacity
);
982 * When choosing a new capacity, its size should is as close to 2**N bytes
983 * as possible. 2**N-sized requests are best because they are unlikely to
984 * be rounded up by the allocator. Asking for a 2**N number of elements
985 * isn't as good, because if sizeof(T) is not a power-of-two that would
986 * result in a non-2**N request size.
992 if (usingInlineStorage()) {
993 /* This case occurs in ~70--80% of the calls to this function. */
995 tl::RoundUpPow2
<(kInlineCapacity
+ 1) * sizeof(T
)>::value
;
996 newCap
= newSize
/ sizeof(T
);
1001 /* This case occurs in ~0--10% of the calls to this function. */
1006 /* This case occurs in ~15--20% of the calls to this function. */
1009 * Will mLength * 4 *sizeof(T) overflow? This condition limits a vector
1010 * to 1GB of memory on a 32-bit system, which is a reasonable limit. It
1013 * static_cast<char*>(end()) - static_cast<char*>(begin())
1015 * doesn't overflow ptrdiff_t (see bug 510319).
1017 if (MOZ_UNLIKELY(mLength
& tl::MulOverflowMask
<4 * sizeof(T
)>::value
)) {
1018 this->reportAllocOverflow();
1023 * If we reach here, the existing capacity will have a size that is already
1024 * as close to 2^N as sizeof(T) will allow. Just double the capacity, and
1025 * then there might be space for one more element.
1027 newCap
= mLength
* 2;
1028 if (detail::CapacityHasExcessSpace
<T
>(newCap
)) {
1032 /* This case occurs in ~2% of the calls to this function. */
1033 size_t newMinCap
= mLength
+ aIncr
;
1035 /* Did mLength + aIncr overflow? Will newCap * sizeof(T) overflow? */
1036 if (MOZ_UNLIKELY(newMinCap
< mLength
||
1037 newMinCap
& tl::MulOverflowMask
<2 * sizeof(T
)>::value
))
1039 this->reportAllocOverflow();
1043 size_t newMinSize
= newMinCap
* sizeof(T
);
1044 size_t newSize
= RoundUpPow2(newMinSize
);
1045 newCap
= newSize
/ sizeof(T
);
1048 if (usingInlineStorage()) {
1050 return convertToHeapStorage(newCap
);
1054 return Impl::growTo(*this, newCap
);
1057 template<typename T
, size_t N
, class AP
>
1059 Vector
<T
, N
, AP
>::initCapacity(size_t aRequest
)
1061 MOZ_ASSERT(empty());
1062 MOZ_ASSERT(usingInlineStorage());
1063 if (aRequest
== 0) {
1066 T
* newbuf
= this->template pod_malloc
<T
>(aRequest
);
1067 if (MOZ_UNLIKELY(!newbuf
)) {
1071 mTail
.mCapacity
= aRequest
;
1073 mTail
.mReserved
= aRequest
;
1078 template<typename T
, size_t N
, class AP
>
1080 Vector
<T
, N
, AP
>::initLengthUninitialized(size_t aRequest
)
1082 if (!initCapacity(aRequest
)) {
1085 infallibleGrowByUninitialized(aRequest
);
1089 template<typename T
, size_t N
, class AP
>
1091 Vector
<T
, N
, AP
>::maybeCheckSimulatedOOM(size_t aRequestedSize
)
1093 if (aRequestedSize
<= N
) {
1098 if (aRequestedSize
<= mTail
.mReserved
) {
1103 return allocPolicy().checkSimulatedOOM();
1106 template<typename T
, size_t N
, class AP
>
1108 Vector
<T
, N
, AP
>::reserve(size_t aRequest
)
1110 MOZ_REENTRANCY_GUARD_ET_AL
;
1111 if (aRequest
> mTail
.mCapacity
) {
1112 if (MOZ_UNLIKELY(!growStorageBy(aRequest
- mLength
))) {
1115 } else if (!maybeCheckSimulatedOOM(aRequest
)) {
1119 if (aRequest
> mTail
.mReserved
) {
1120 mTail
.mReserved
= aRequest
;
1122 MOZ_ASSERT(mLength
<= mTail
.mReserved
);
1123 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1128 template<typename T
, size_t N
, class AP
>
1130 Vector
<T
, N
, AP
>::shrinkBy(size_t aIncr
)
1132 MOZ_REENTRANCY_GUARD_ET_AL
;
1133 MOZ_ASSERT(aIncr
<= mLength
);
1134 Impl::destroy(endNoCheck() - aIncr
, endNoCheck());
1138 template<typename T
, size_t N
, class AP
>
1139 MOZ_ALWAYS_INLINE
void
1140 Vector
<T
, N
, AP
>::shrinkTo(size_t aNewLength
)
1142 MOZ_ASSERT(aNewLength
<= mLength
);
1143 shrinkBy(mLength
- aNewLength
);
1146 template<typename T
, size_t N
, class AP
>
1147 MOZ_ALWAYS_INLINE
bool
1148 Vector
<T
, N
, AP
>::growBy(size_t aIncr
)
1150 MOZ_REENTRANCY_GUARD_ET_AL
;
1151 if (aIncr
> mTail
.mCapacity
- mLength
) {
1152 if (MOZ_UNLIKELY(!growStorageBy(aIncr
))) {
1155 } else if (!maybeCheckSimulatedOOM(mLength
+ aIncr
)) {
1158 MOZ_ASSERT(mLength
+ aIncr
<= mTail
.mCapacity
);
1159 T
* newend
= endNoCheck() + aIncr
;
1160 Impl::initialize(endNoCheck(), newend
);
1163 if (mLength
> mTail
.mReserved
) {
1164 mTail
.mReserved
= mLength
;
1170 template<typename T
, size_t N
, class AP
>
1171 MOZ_ALWAYS_INLINE
bool
1172 Vector
<T
, N
, AP
>::growByUninitialized(size_t aIncr
)
1174 MOZ_REENTRANCY_GUARD_ET_AL
;
1175 if (aIncr
> mTail
.mCapacity
- mLength
) {
1176 if (MOZ_UNLIKELY(!growStorageBy(aIncr
))) {
1179 } else if (!maybeCheckSimulatedOOM(mLength
+ aIncr
)) {
1183 if (mLength
+ aIncr
> mTail
.mReserved
) {
1184 mTail
.mReserved
= mLength
+ aIncr
;
1187 infallibleGrowByUninitialized(aIncr
);
1191 template<typename T
, size_t N
, class AP
>
1192 MOZ_ALWAYS_INLINE
void
1193 Vector
<T
, N
, AP
>::infallibleGrowByUninitialized(size_t aIncr
)
1195 MOZ_ASSERT(mLength
+ aIncr
<= reserved());
1199 template<typename T
, size_t N
, class AP
>
1201 Vector
<T
, N
, AP
>::resize(size_t aNewLength
)
1203 size_t curLength
= mLength
;
1204 if (aNewLength
> curLength
) {
1205 return growBy(aNewLength
- curLength
);
1207 shrinkBy(curLength
- aNewLength
);
1211 template<typename T
, size_t N
, class AP
>
1212 MOZ_ALWAYS_INLINE
bool
1213 Vector
<T
, N
, AP
>::resizeUninitialized(size_t aNewLength
)
1215 size_t curLength
= mLength
;
1216 if (aNewLength
> curLength
) {
1217 return growByUninitialized(aNewLength
- curLength
);
1219 shrinkBy(curLength
- aNewLength
);
1223 template<typename T
, size_t N
, class AP
>
1225 Vector
<T
, N
, AP
>::clear()
1227 MOZ_REENTRANCY_GUARD_ET_AL
;
1228 Impl::destroy(beginNoCheck(), endNoCheck());
1232 template<typename T
, size_t N
, class AP
>
1234 Vector
<T
, N
, AP
>::clearAndFree()
1238 if (usingInlineStorage()) {
1241 this->free_(beginNoCheck(), mTail
.mCapacity
);
1242 mBegin
= inlineStorage();
1243 mTail
.mCapacity
= kInlineCapacity
;
1245 mTail
.mReserved
= 0;
1249 template<typename T
, size_t N
, class AP
>
1251 Vector
<T
, N
, AP
>::podResizeToFit()
1253 // This function is only defined if IsPod is true and will fail to compile
1255 Impl::podResizeToFit(*this);
1258 template<typename T
, size_t N
, class AP
>
1260 Vector
<T
, N
, AP
>::canAppendWithoutRealloc(size_t aNeeded
) const
1262 return mLength
+ aNeeded
<= mTail
.mCapacity
;
1265 template<typename T
, size_t N
, class AP
>
1266 template<typename U
, size_t O
, class BP
>
1267 MOZ_ALWAYS_INLINE
void
1268 Vector
<T
, N
, AP
>::internalAppendAll(const Vector
<U
, O
, BP
>& aOther
)
1270 internalAppend(aOther
.begin(), aOther
.length());
1273 template<typename T
, size_t N
, class AP
>
1274 template<typename U
>
1275 MOZ_ALWAYS_INLINE
void
1276 Vector
<T
, N
, AP
>::internalAppend(U
&& aU
)
1278 MOZ_ASSERT(mLength
+ 1 <= mTail
.mReserved
);
1279 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1280 Impl::new_(endNoCheck(), std::forward
<U
>(aU
));
1284 template<typename T
, size_t N
, class AP
>
1285 MOZ_ALWAYS_INLINE
bool
1286 Vector
<T
, N
, AP
>::appendN(const T
& aT
, size_t aNeeded
)
1288 MOZ_REENTRANCY_GUARD_ET_AL
;
1289 if (mLength
+ aNeeded
> mTail
.mCapacity
) {
1290 if (MOZ_UNLIKELY(!growStorageBy(aNeeded
))) {
1293 } else if (!maybeCheckSimulatedOOM(mLength
+ aNeeded
)) {
1297 if (mLength
+ aNeeded
> mTail
.mReserved
) {
1298 mTail
.mReserved
= mLength
+ aNeeded
;
1301 internalAppendN(aT
, aNeeded
);
1305 template<typename T
, size_t N
, class AP
>
1306 MOZ_ALWAYS_INLINE
void
1307 Vector
<T
, N
, AP
>::internalAppendN(const T
& aT
, size_t aNeeded
)
1309 MOZ_ASSERT(mLength
+ aNeeded
<= mTail
.mReserved
);
1310 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1311 Impl::copyConstructN(endNoCheck(), aNeeded
, aT
);
1315 template<typename T
, size_t N
, class AP
>
1316 template<typename U
>
1318 Vector
<T
, N
, AP
>::insert(T
* aP
, U
&& aVal
)
1320 MOZ_ASSERT(begin() <= aP
);
1321 MOZ_ASSERT(aP
<= end());
1322 size_t pos
= aP
- begin();
1323 MOZ_ASSERT(pos
<= mLength
);
1324 size_t oldLength
= mLength
;
1325 if (pos
== oldLength
) {
1326 if (!append(std::forward
<U
>(aVal
))) {
1330 T oldBack
= std::move(back());
1331 if (!append(std::move(oldBack
))) {
1334 for (size_t i
= oldLength
- 1; i
> pos
; --i
) {
1335 (*this)[i
] = std::move((*this)[i
- 1]);
1337 (*this)[pos
] = std::forward
<U
>(aVal
);
1339 return begin() + pos
;
1342 template<typename T
, size_t N
, class AP
>
1344 Vector
<T
, N
, AP
>::erase(T
* aIt
)
1346 MOZ_ASSERT(begin() <= aIt
);
1347 MOZ_ASSERT(aIt
< end());
1348 while (aIt
+ 1 < end()) {
1349 *aIt
= std::move(*(aIt
+ 1));
1355 template<typename T
, size_t N
, class AP
>
1357 Vector
<T
, N
, AP
>::erase(T
* aBegin
, T
* aEnd
)
1359 MOZ_ASSERT(begin() <= aBegin
);
1360 MOZ_ASSERT(aBegin
<= aEnd
);
1361 MOZ_ASSERT(aEnd
<= end());
1362 while (aEnd
< end()) {
1363 *aBegin
++ = std::move(*aEnd
++);
1365 shrinkBy(aEnd
- aBegin
);
1368 template<typename T
, size_t N
, class AP
>
1369 template<typename U
>
1370 MOZ_ALWAYS_INLINE
bool
1371 Vector
<T
, N
, AP
>::append(const U
* aInsBegin
, const U
* aInsEnd
)
1373 MOZ_REENTRANCY_GUARD_ET_AL
;
1374 size_t aNeeded
= PointerRangeSize(aInsBegin
, aInsEnd
);
1375 if (mLength
+ aNeeded
> mTail
.mCapacity
) {
1376 if (MOZ_UNLIKELY(!growStorageBy(aNeeded
))) {
1379 } else if (!maybeCheckSimulatedOOM(mLength
+ aNeeded
)) {
1383 if (mLength
+ aNeeded
> mTail
.mReserved
) {
1384 mTail
.mReserved
= mLength
+ aNeeded
;
1387 internalAppend(aInsBegin
, aNeeded
);
1391 template<typename T
, size_t N
, class AP
>
1392 template<typename U
>
1393 MOZ_ALWAYS_INLINE
void
1394 Vector
<T
, N
, AP
>::internalAppend(const U
* aInsBegin
, size_t aInsLength
)
1396 MOZ_ASSERT(mLength
+ aInsLength
<= mTail
.mReserved
);
1397 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1398 Impl::copyConstruct(endNoCheck(), aInsBegin
, aInsBegin
+ aInsLength
);
1399 mLength
+= aInsLength
;
1402 template<typename T
, size_t N
, class AP
>
1403 template<typename U
>
1404 MOZ_ALWAYS_INLINE
bool
1405 Vector
<T
, N
, AP
>::append(U
&& aU
)
1407 MOZ_REENTRANCY_GUARD_ET_AL
;
1408 if (mLength
== mTail
.mCapacity
) {
1409 if (MOZ_UNLIKELY(!growStorageBy(1))) {
1412 } else if (!maybeCheckSimulatedOOM(mLength
+ 1)) {
1416 if (mLength
+ 1 > mTail
.mReserved
) {
1417 mTail
.mReserved
= mLength
+ 1;
1420 internalAppend(std::forward
<U
>(aU
));
1424 template<typename T
, size_t N
, class AP
>
1425 template<typename U
, size_t O
, class BP
>
1426 MOZ_ALWAYS_INLINE
bool
1427 Vector
<T
, N
, AP
>::appendAll(const Vector
<U
, O
, BP
>& aOther
)
1429 return append(aOther
.begin(), aOther
.length());
1432 template<typename T
, size_t N
, class AP
>
1434 MOZ_ALWAYS_INLINE
bool
1435 Vector
<T
, N
, AP
>::append(const U
* aInsBegin
, size_t aInsLength
)
1437 return append(aInsBegin
, aInsBegin
+ aInsLength
);
1440 template<typename T
, size_t N
, class AP
>
1441 MOZ_ALWAYS_INLINE
void
1442 Vector
<T
, N
, AP
>::popBack()
1444 MOZ_REENTRANCY_GUARD_ET_AL
;
1445 MOZ_ASSERT(!empty());
1450 template<typename T
, size_t N
, class AP
>
1452 Vector
<T
, N
, AP
>::popCopy()
1459 template<typename T
, size_t N
, class AP
>
1461 Vector
<T
, N
, AP
>::extractRawBuffer()
1463 MOZ_REENTRANCY_GUARD_ET_AL
;
1465 if (usingInlineStorage()) {
1470 mBegin
= inlineStorage();
1472 mTail
.mCapacity
= kInlineCapacity
;
1474 mTail
.mReserved
= 0;
1479 template<typename T
, size_t N
, class AP
>
1481 Vector
<T
, N
, AP
>::extractOrCopyRawBuffer()
1483 if (T
* ret
= extractRawBuffer()) {
1487 MOZ_REENTRANCY_GUARD_ET_AL
;
1489 T
* copy
= this->template pod_malloc
<T
>(mLength
);
1494 Impl::moveConstruct(copy
, beginNoCheck(), endNoCheck());
1495 Impl::destroy(beginNoCheck(), endNoCheck());
1496 mBegin
= inlineStorage();
1498 mTail
.mCapacity
= kInlineCapacity
;
1500 mTail
.mReserved
= 0;
1505 template<typename T
, size_t N
, class AP
>
1507 Vector
<T
, N
, AP
>::replaceRawBuffer(T
* aP
, size_t aLength
, size_t aCapacity
)
1509 MOZ_REENTRANCY_GUARD_ET_AL
;
1511 /* Destroy what we have. */
1512 Impl::destroy(beginNoCheck(), endNoCheck());
1513 if (!usingInlineStorage()) {
1514 this->free_(beginNoCheck(), mTail
.mCapacity
);
1517 /* Take in the new buffer. */
1518 if (aCapacity
<= kInlineCapacity
) {
1520 * We convert to inline storage if possible, even though aP might
1521 * otherwise be acceptable. Maybe this behaviour should be
1522 * specifiable with an argument to this function.
1524 mBegin
= inlineStorage();
1526 mTail
.mCapacity
= kInlineCapacity
;
1527 Impl::moveConstruct(mBegin
, aP
, aP
+ aLength
);
1528 Impl::destroy(aP
, aP
+ aLength
);
1529 this->free_(aP
, aCapacity
);
1533 mTail
.mCapacity
= aCapacity
;
1536 mTail
.mReserved
= aCapacity
;
1540 template<typename T
, size_t N
, class AP
>
1542 Vector
<T
, N
, AP
>::replaceRawBuffer(T
* aP
, size_t aLength
)
1544 replaceRawBuffer(aP
, aLength
, aLength
);
1547 template<typename T
, size_t N
, class AP
>
1549 Vector
<T
, N
, AP
>::sizeOfExcludingThis(MallocSizeOf aMallocSizeOf
) const
1551 return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
1554 template<typename T
, size_t N
, class AP
>
1556 Vector
<T
, N
, AP
>::sizeOfIncludingThis(MallocSizeOf aMallocSizeOf
) const
1558 return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf
);
1561 template<typename T
, size_t N
, class AP
>
1563 Vector
<T
, N
, AP
>::swap(Vector
& aOther
)
1565 static_assert(N
== 0,
1566 "still need to implement this for N != 0");
1568 // This only works when inline storage is always empty.
1569 if (!usingInlineStorage() && aOther
.usingInlineStorage()) {
1570 aOther
.mBegin
= mBegin
;
1571 mBegin
= inlineStorage();
1572 } else if (usingInlineStorage() && !aOther
.usingInlineStorage()) {
1573 mBegin
= aOther
.mBegin
;
1574 aOther
.mBegin
= aOther
.inlineStorage();
1575 } else if (!usingInlineStorage() && !aOther
.usingInlineStorage()) {
1576 Swap(mBegin
, aOther
.mBegin
);
1578 // This case is a no-op, since we'd set both to use their inline storage.
1581 Swap(mLength
, aOther
.mLength
);
1582 Swap(mTail
.mCapacity
, aOther
.mTail
.mCapacity
);
1584 Swap(mTail
.mReserved
, aOther
.mTail
.mReserved
);
1588 } // namespace mozilla
1591 #pragma warning(pop)
1594 #endif /* mozilla_Vector_h */