1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* A type/length-parametrized vector class. */
9 #ifndef mozilla_Vector_h
10 #define mozilla_Vector_h
12 #include <new> // for placement new
15 #include "mozilla/Alignment.h"
16 #include "mozilla/AllocPolicy.h"
17 #include "mozilla/ArrayUtils.h" // for PointerRangeSize
18 #include "mozilla/Assertions.h"
19 #include "mozilla/Attributes.h"
20 #include "mozilla/MathAlgorithms.h"
21 #include "mozilla/MemoryReporting.h"
22 #include "mozilla/OperatorNewExtensions.h"
23 #include "mozilla/ReentrancyGuard.h"
24 #include "mozilla/Span.h"
25 #include "mozilla/TemplateLib.h"
26 #include "mozilla/TypeTraits.h"
30 template <typename T
, size_t N
, class AllocPolicy
>
36 * Check that the given capacity wastes the minimal amount of space if
37 * allocated on the heap. This means that aCapacity*sizeof(T) is as close to a
38 * power-of-two as possible. growStorageBy() is responsible for ensuring this.
41 static bool CapacityHasExcessSpace(size_t aCapacity
) {
42 size_t size
= aCapacity
* sizeof(T
);
43 return RoundUpPow2(size
) - size
>= sizeof(T
);
47 * This template class provides a default implementation for vector operations
48 * when the element type is not known to be a POD, as judged by IsPod.
50 template <typename T
, size_t N
, class AP
, bool IsPod
>
53 * Constructs an object in the uninitialized memory at *aDst with aArgs.
55 template <typename
... Args
>
57 static inline void new_(T
* aDst
, Args
&&... aArgs
) {
58 new (KnownNotNull
, aDst
) T(std::forward
<Args
>(aArgs
)...);
61 /* Destroys constructed objects in the range [aBegin, aEnd). */
62 static inline void destroy(T
* aBegin
, T
* aEnd
) {
63 MOZ_ASSERT(aBegin
<= aEnd
);
64 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
69 /* Constructs objects in the uninitialized range [aBegin, aEnd). */
70 static inline void initialize(T
* aBegin
, T
* aEnd
) {
71 MOZ_ASSERT(aBegin
<= aEnd
);
72 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
78 * Copy-constructs objects in the uninitialized range
79 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
82 static inline void copyConstruct(T
* aDst
, const U
* aSrcStart
,
84 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
85 for (const U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
91 * Move-constructs objects in the uninitialized range
92 * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
95 static inline void moveConstruct(T
* aDst
, U
* aSrcStart
, U
* aSrcEnd
) {
96 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
97 for (U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
98 new_(aDst
, std::move(*p
));
103 * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
104 * the same object aU.
106 template <typename U
>
107 static inline void copyConstructN(T
* aDst
, size_t aN
, const U
& aU
) {
108 for (T
* end
= aDst
+ aN
; aDst
< end
; ++aDst
) {
114 * Grows the given buffer to have capacity aNewCap, preserving the objects
115 * constructed in the range [begin, end) and updating aV. Assumes that (1)
116 * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
119 [[nodiscard
]] static inline bool growTo(Vector
<T
, N
, AP
>& aV
,
121 MOZ_ASSERT(!aV
.usingInlineStorage());
122 MOZ_ASSERT(!CapacityHasExcessSpace
<T
>(aNewCap
));
123 T
* newbuf
= aV
.template pod_malloc
<T
>(aNewCap
);
124 if (MOZ_UNLIKELY(!newbuf
)) {
128 T
* src
= aV
.beginNoCheck();
129 for (; src
< aV
.endNoCheck(); ++dst
, ++src
) {
130 new_(dst
, std::move(*src
));
132 VectorImpl::destroy(aV
.beginNoCheck(), aV
.endNoCheck());
133 aV
.free_(aV
.mBegin
, aV
.mTail
.mCapacity
);
135 /* aV.mLength is unchanged. */
136 aV
.mTail
.mCapacity
= aNewCap
;
142 * This partial template specialization provides a default implementation for
143 * vector operations when the element type is known to be a POD, as judged by
146 template <typename T
, size_t N
, class AP
>
147 struct VectorImpl
<T
, N
, AP
, true> {
148 template <typename
... Args
>
150 static inline void new_(T
* aDst
, Args
&&... aArgs
) {
151 // Explicitly construct a local object instead of using a temporary since
152 // T(args...) will be treated like a C-style cast in the unary case and
153 // allow unsafe conversions. Both forms should be equivalent to an
154 // optimizing compiler.
155 T
temp(std::forward
<Args
>(aArgs
)...);
159 static inline void destroy(T
*, T
*) {}
161 static inline void initialize(T
* aBegin
, T
* aEnd
) {
163 * You would think that memset would be a big win (or even break even)
164 * when we know T is a POD. But currently it's not. This is probably
165 * because |append| tends to be given small ranges and memset requires
166 * a function call that doesn't get inlined.
168 * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
170 MOZ_ASSERT(aBegin
<= aEnd
);
171 for (T
* p
= aBegin
; p
< aEnd
; ++p
) {
176 template <typename U
>
177 static inline void copyConstruct(T
* aDst
, const U
* aSrcStart
,
180 * See above memset comment. Also, notice that copyConstruct is
181 * currently templated (T != U), so memcpy won't work without
184 * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
186 MOZ_ASSERT(aSrcStart
<= aSrcEnd
);
187 for (const U
* p
= aSrcStart
; p
< aSrcEnd
; ++p
, ++aDst
) {
192 template <typename U
>
193 static inline void moveConstruct(T
* aDst
, const U
* aSrcStart
,
195 copyConstruct(aDst
, aSrcStart
, aSrcEnd
);
198 static inline void copyConstructN(T
* aDst
, size_t aN
, const T
& aT
) {
199 for (T
* end
= aDst
+ aN
; aDst
< end
; ++aDst
) {
204 [[nodiscard
]] static inline bool growTo(Vector
<T
, N
, AP
>& aV
,
206 MOZ_ASSERT(!aV
.usingInlineStorage());
207 MOZ_ASSERT(!CapacityHasExcessSpace
<T
>(aNewCap
));
209 aV
.template pod_realloc
<T
>(aV
.mBegin
, aV
.mTail
.mCapacity
, aNewCap
);
210 if (MOZ_UNLIKELY(!newbuf
)) {
214 /* aV.mLength is unchanged. */
215 aV
.mTail
.mCapacity
= aNewCap
;
220 // A struct for TestVector.cpp to access private internal fields.
221 // DO NOT DEFINE IN YOUR OWN CODE.
222 struct VectorTesting
;
224 } // namespace detail
227 * STL-like container providing a short-lived, dynamic buffer. Vector calls the
228 * constructors/destructors of all elements stored in its internal buffer, so
229 * non-PODs may be safely used. Additionally, Vector will store the first N
230 * elements in-place before resorting to dynamic allocation.
233 * - default and copy constructible, assignable, destructible
234 * - operations do not throw
235 * MinInlineCapacity requirements:
236 * - any value, however, MinInlineCapacity is clamped to min/max values
238 * - see "Allocation policies" in AllocPolicy.h (defaults to
239 * mozilla::MallocAllocPolicy)
241 * Vector is not reentrant: T member functions called during Vector member
242 * functions must not call back into the same object!
244 template <typename T
, size_t MinInlineCapacity
= 0,
245 class AllocPolicy
= MallocAllocPolicy
>
246 class MOZ_NON_PARAM Vector final
: private AllocPolicy
{
249 static constexpr bool kElemIsPod
= IsPod
<T
>::value
;
250 typedef detail::VectorImpl
<T
, MinInlineCapacity
, AllocPolicy
, kElemIsPod
>
252 friend struct detail::VectorImpl
<T
, MinInlineCapacity
, AllocPolicy
,
255 friend struct detail::VectorTesting
;
257 [[nodiscard
]] bool growStorageBy(size_t aIncr
);
258 [[nodiscard
]] bool convertToHeapStorage(size_t aNewCap
);
259 [[nodiscard
]] bool maybeCheckSimulatedOOM(size_t aRequestedSize
);
261 /* magic constants */
264 * The maximum space allocated for inline element storage.
266 * We reduce space by what the AllocPolicy base class and prior Vector member
267 * fields likely consume to attempt to play well with binary size classes.
269 static constexpr size_t kMaxInlineBytes
=
271 (sizeof(AllocPolicy
) + sizeof(T
*) + sizeof(size_t) + sizeof(size_t));
274 * The number of T elements of inline capacity built into this Vector. This
275 * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
277 * We use a partially-specialized template (not explicit specialization, which
278 * is only allowed at namespace scope) to compute this value. The benefit is
279 * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
280 * defined at the time |Vector<T>| appears, if no inline storage is requested.
282 template <size_t MinimumInlineCapacity
, size_t Dummy
>
283 struct ComputeCapacity
{
284 static constexpr size_t value
=
285 tl::Min
<MinimumInlineCapacity
, kMaxInlineBytes
/ sizeof(T
)>::value
;
288 template <size_t Dummy
>
289 struct ComputeCapacity
<0, Dummy
> {
290 static constexpr size_t value
= 0;
293 /** The actual inline capacity in number of elements T. This may be zero! */
294 static constexpr size_t kInlineCapacity
=
295 ComputeCapacity
<MinInlineCapacity
, 0>::value
;
300 * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
301 * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
302 * mLength, mBegin + mCapacity) holds uninitialized memory. The range
303 * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
304 * previously allocated by a call to reserve().
308 /* Number of elements in the vector. */
312 * Memory used to store capacity, reserved element count (debug builds only),
313 * and inline storage. The simple "answer" is:
319 * alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
321 * but there are complications. First, C++ forbids zero-sized arrays that
322 * might result. Second, we don't want zero capacity to affect Vector's size
323 * (even empty classes take up a byte, unless they're base classes).
325 * Yet again, we eliminate the zero-sized array using partial specialization.
326 * And we eliminate potential size hit by putting capacity/reserved in one
327 * struct, then putting the array (if any) in a derived struct. If no array
328 * is needed, the derived struct won't consume extra space.
330 struct CapacityAndReserved
{
331 explicit CapacityAndReserved(size_t aCapacity
, size_t aReserved
)
332 : mCapacity(aCapacity
)
339 CapacityAndReserved() = default;
341 /* Max number of elements storable in the vector without resizing. */
345 /* Max elements of reserved or used space in this vector. */
350 // Silence warnings about this struct possibly being padded dued to the
351 // alignas() in it -- there's nothing we can do to avoid it.
353 # pragma warning(push)
354 # pragma warning(disable : 4324)
357 template <size_t Capacity
, size_t Dummy
>
358 struct CRAndStorage
: CapacityAndReserved
{
359 explicit CRAndStorage(size_t aCapacity
, size_t aReserved
)
360 : CapacityAndReserved(aCapacity
, aReserved
) {}
361 CRAndStorage() = default;
363 alignas(T
) unsigned char mBytes
[Capacity
* sizeof(T
)];
365 // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
366 // T*. Indirecting through this function addresses the problem.
367 void* data() { return mBytes
; }
369 T
* storage() { return static_cast<T
*>(data()); }
372 template <size_t Dummy
>
373 struct CRAndStorage
<0, Dummy
> : CapacityAndReserved
{
374 explicit CRAndStorage(size_t aCapacity
, size_t aReserved
)
375 : CapacityAndReserved(aCapacity
, aReserved
) {}
376 CRAndStorage() = default;
379 // If this returns |nullptr|, functions like |Vector::begin()| would too,
380 // breaking callers that pass a vector's elements as pointer/length to
381 // code that bounds its operation by length but (even just as a sanity
382 // check) always wants a non-null pointer. Fake up an aligned, non-null
383 // pointer to support these callers.
384 return reinterpret_cast<T
*>(sizeof(T
));
388 CRAndStorage
<kInlineCapacity
, 0> mTail
;
391 # pragma warning(pop)
395 friend class ReentrancyGuard
;
399 /* private accessors */
401 bool usingInlineStorage() const {
402 return mBegin
== const_cast<Vector
*>(this)->inlineStorage();
405 T
* inlineStorage() { return mTail
.storage(); }
407 T
* beginNoCheck() const { return mBegin
; }
409 T
* endNoCheck() { return mBegin
+ mLength
; }
411 const T
* endNoCheck() const { return mBegin
+ mLength
; }
415 * The amount of explicitly allocated space in this vector that is immediately
416 * available to be filled by appending additional elements. This value is
417 * always greater than or equal to |length()| -- the vector's actual elements
418 * are implicitly reserved. This value is always less than or equal to
419 * |capacity()|. It may be explicitly increased using the |reserve()| method.
421 size_t reserved() const {
422 MOZ_ASSERT(mLength
<= mTail
.mReserved
);
423 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
424 return mTail
.mReserved
;
428 bool internalEnsureCapacity(size_t aNeeded
);
430 /* Append operations guaranteed to succeed due to pre-reserved space. */
431 template <typename U
>
432 void internalAppend(U
&& aU
);
433 template <typename U
, size_t O
, class BP
>
434 void internalAppendAll(const Vector
<U
, O
, BP
>& aU
);
435 void internalAppendN(const T
& aT
, size_t aN
);
436 template <typename U
>
437 void internalAppend(const U
* aBegin
, size_t aLength
);
438 template <typename U
>
439 void internalMoveAppend(U
* aBegin
, size_t aLength
);
442 static const size_t sMaxInlineStorage
= MinInlineCapacity
;
444 typedef T ElementType
;
446 explicit Vector(AllocPolicy
= AllocPolicy());
447 Vector(Vector
&&); /* Move constructor. */
448 Vector
& operator=(Vector
&&); /* Move assignment. */
453 const AllocPolicy
& allocPolicy() const { return *this; }
455 AllocPolicy
& allocPolicy() { return *this; }
457 enum { InlineLength
= MinInlineCapacity
};
459 size_t length() const { return mLength
; }
461 bool empty() const { return mLength
== 0; }
463 size_t capacity() const { return mTail
.mCapacity
; }
466 MOZ_ASSERT(!mEntered
);
470 const T
* begin() const {
471 MOZ_ASSERT(!mEntered
);
476 MOZ_ASSERT(!mEntered
);
477 return mBegin
+ mLength
;
480 const T
* end() const {
481 MOZ_ASSERT(!mEntered
);
482 return mBegin
+ mLength
;
485 T
& operator[](size_t aIndex
) {
486 MOZ_ASSERT(!mEntered
);
487 MOZ_ASSERT(aIndex
< mLength
);
488 return begin()[aIndex
];
491 const T
& operator[](size_t aIndex
) const {
492 MOZ_ASSERT(!mEntered
);
493 MOZ_ASSERT(aIndex
< mLength
);
494 return begin()[aIndex
];
498 MOZ_ASSERT(!mEntered
);
499 MOZ_ASSERT(!empty());
503 const T
& back() const {
504 MOZ_ASSERT(!mEntered
);
505 MOZ_ASSERT(!empty());
509 operator mozilla::Span
<const T
>() const {
510 // Explicitly specify template argument here to avoid instantiating Span<T>
511 // first and then implicitly converting to Span<const T>
512 return mozilla::Span
<const T
>{mBegin
, mLength
};
515 operator mozilla::Span
<T
>() { return mozilla::Span
{mBegin
, mLength
}; }
521 Range(T
* aCur
, T
* aEnd
) : mCur(aCur
), mEnd(aEnd
) {
522 MOZ_ASSERT(aCur
<= aEnd
);
526 bool empty() const { return mCur
== mEnd
; }
527 size_t remain() const { return PointerRangeSize(mCur
, mEnd
); }
529 MOZ_ASSERT(!empty());
533 MOZ_ASSERT(!empty());
537 MOZ_ASSERT(!empty());
546 ConstRange(const T
* aCur
, const T
* aEnd
) : mCur(aCur
), mEnd(aEnd
) {
547 MOZ_ASSERT(aCur
<= aEnd
);
551 bool empty() const { return mCur
== mEnd
; }
552 size_t remain() const { return PointerRangeSize(mCur
, mEnd
); }
553 const T
& front() const {
554 MOZ_ASSERT(!empty());
558 MOZ_ASSERT(!empty());
562 MOZ_ASSERT(!empty());
567 Range
all() { return Range(begin(), end()); }
568 ConstRange
all() const { return ConstRange(begin(), end()); }
573 * Reverse the order of the elements in the vector in place.
578 * Given that the vector is empty, grow the internal capacity to |aRequest|,
579 * keeping the length 0.
581 [[nodiscard
]] bool initCapacity(size_t aRequest
);
584 * Given that the vector is empty, grow the internal capacity and length to
585 * |aRequest| leaving the elements' memory completely uninitialized (with all
586 * the associated hazards and caveats). This avoids the usual allocation-size
587 * rounding that happens in resize and overhead of initialization for elements
588 * that are about to be overwritten.
590 [[nodiscard
]] bool initLengthUninitialized(size_t aRequest
);
593 * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
594 * |aRequest - length()| elements, in any sequence of append/appendAll calls,
595 * is guaranteed to succeed.
597 * A request to reserve an amount less than the current length does not affect
600 [[nodiscard
]] bool reserve(size_t aRequest
);
603 * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
604 * or unreserve storage for those elements.
606 void shrinkBy(size_t aIncr
);
609 * Destroy elements in the range [aNewLength, end()). Does not deallocate
610 * or unreserve storage for those elements.
612 void shrinkTo(size_t aNewLength
);
614 /** Grow the vector by aIncr elements. */
615 [[nodiscard
]] bool growBy(size_t aIncr
);
617 /** Call shrinkBy or growBy based on whether newSize > length(). */
618 [[nodiscard
]] bool resize(size_t aNewLength
);
621 * Increase the length of the vector, but don't initialize the new elements
622 * -- leave them as uninitialized memory.
624 [[nodiscard
]] bool growByUninitialized(size_t aIncr
);
625 void infallibleGrowByUninitialized(size_t aIncr
);
626 [[nodiscard
]] bool resizeUninitialized(size_t aNewLength
);
628 /** Shorthand for shrinkBy(length()). */
631 /** Clears and releases any heap-allocated storage. */
635 * Shrinks the storage to drop excess capacity if possible.
637 * The return value indicates whether the operation succeeded, otherwise, it
638 * represents an OOM. The bool can be safely ignored unless you want to
639 * provide the guarantee that `length() == capacity()`.
641 * For PODs, it calls the AllocPolicy's pod_realloc. For non-PODs, it moves
642 * the elements into the new storage.
644 bool shrinkStorageToFit();
647 * If true, appending |aNeeded| elements won't reallocate elements storage.
648 * This *doesn't* mean that infallibleAppend may be used! You still must
649 * reserve the extra space, even if this method indicates that appends won't
650 * need to reallocate elements storage.
652 bool canAppendWithoutRealloc(size_t aNeeded
) const;
654 /** Potentially fallible append operations. */
657 * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
658 * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
661 template <typename U
>
662 [[nodiscard
]] bool append(U
&& aU
);
665 * Construct a T in-place as a new entry at the end of this vector.
667 template <typename
... Args
>
668 [[nodiscard
]] bool emplaceBack(Args
&&... aArgs
) {
669 if (!growByUninitialized(1)) return false;
670 Impl::new_(&back(), std::forward
<Args
>(aArgs
)...);
674 template <typename U
, size_t O
, class BP
>
675 [[nodiscard
]] bool appendAll(const Vector
<U
, O
, BP
>& aU
);
676 template <typename U
, size_t O
, class BP
>
677 [[nodiscard
]] bool appendAll(Vector
<U
, O
, BP
>&& aU
);
678 [[nodiscard
]] bool appendN(const T
& aT
, size_t aN
);
679 template <typename U
>
680 [[nodiscard
]] bool append(const U
* aBegin
, const U
* aEnd
);
681 template <typename U
>
682 [[nodiscard
]] bool append(const U
* aBegin
, size_t aLength
);
683 template <typename U
>
684 [[nodiscard
]] bool moveAppend(U
* aBegin
, U
* aEnd
);
687 * Guaranteed-infallible append operations for use upon vectors whose
688 * memory has been pre-reserved. Don't use this if you haven't reserved the
691 template <typename U
>
692 void infallibleAppend(U
&& aU
) {
693 internalAppend(std::forward
<U
>(aU
));
695 void infallibleAppendN(const T
& aT
, size_t aN
) { internalAppendN(aT
, aN
); }
696 template <typename U
>
697 void infallibleAppend(const U
* aBegin
, const U
* aEnd
) {
698 internalAppend(aBegin
, PointerRangeSize(aBegin
, aEnd
));
700 template <typename U
>
701 void infallibleAppend(const U
* aBegin
, size_t aLength
) {
702 internalAppend(aBegin
, aLength
);
704 template <typename
... Args
>
705 void infallibleEmplaceBack(Args
&&... aArgs
) {
706 infallibleGrowByUninitialized(1);
707 Impl::new_(&back(), std::forward
<Args
>(aArgs
)...);
715 * If elements are stored in-place, return nullptr and leave this vector
718 * Otherwise return this vector's elements buffer, and clear this vector as if
719 * by clearAndFree(). The caller now owns the buffer and is responsible for
720 * deallocating it consistent with this vector's AllocPolicy.
722 * N.B. Although a T*, only the range [0, length()) is constructed.
724 [[nodiscard
]] T
* extractRawBuffer();
727 * If elements are stored in-place, allocate a new buffer, move this vector's
728 * elements into it, and return that buffer.
730 * Otherwise return this vector's elements buffer. The caller now owns the
731 * buffer and is responsible for deallocating it consistent with this vector's
734 * This vector is cleared, as if by clearAndFree(), when this method
735 * succeeds. This method fails and returns nullptr only if new elements buffer
738 * N.B. Only the range [0, length()) of the returned buffer is constructed.
739 * If any of these elements are uninitialized (as growByUninitialized
740 * enables), behavior is undefined.
742 [[nodiscard
]] T
* extractOrCopyRawBuffer();
745 * Transfer ownership of an array of objects into the vector. The caller
746 * must have allocated the array in accordance with this vector's
749 * N.B. This call assumes that there are no uninitialized elements in the
750 * passed range [aP, aP + aLength). The range [aP + aLength, aP +
751 * aCapacity) must be allocated uninitialized memory.
753 void replaceRawBuffer(T
* aP
, size_t aLength
, size_t aCapacity
);
756 * Transfer ownership of an array of objects into the vector. The caller
757 * must have allocated the array in accordance with this vector's
760 * N.B. This call assumes that there are no uninitialized elements in the
763 void replaceRawBuffer(T
* aP
, size_t aLength
);
766 * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
767 * one position higher. On success, |aP| should not be reused because it'll
768 * be a dangling pointer if reallocation of the vector storage occurred; the
769 * return value should be used instead. On failure, nullptr is returned.
773 * if (!(p = vec.insert(p, val))) {
776 * <keep working with p>
778 * This is inherently a linear-time operation. Be careful!
780 template <typename U
>
781 [[nodiscard
]] T
* insert(T
* aP
, U
&& aVal
);
784 * Removes the element |aT|, which must fall in the bounds [begin, end),
785 * shifting existing elements from |aT + 1| onward one position lower.
790 * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
791 * [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
794 void erase(T
* aBegin
, T
* aEnd
);
797 * Removes all elements that satisfy the predicate, shifting existing elements
798 * lower to fill erased gaps.
800 template <typename Pred
>
801 void eraseIf(Pred aPred
);
804 * Removes all elements that compare equal to |aU|, shifting existing elements
805 * lower to fill erased gaps.
807 template <typename U
>
808 void eraseIfEqual(const U
& aU
);
811 * Measure the size of the vector's heap-allocated storage.
813 size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf
) const;
816 * Like sizeOfExcludingThis, but also measures the size of the vector
817 * object (which must be heap-allocated) itself.
819 size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf
) const;
821 void swap(Vector
& aOther
);
824 Vector(const Vector
&) = delete;
825 void operator=(const Vector
&) = delete;
828 /* This does the re-entrancy check plus several other sanity checks. */
829 #define MOZ_REENTRANCY_GUARD_ET_AL \
830 ReentrancyGuard g(*this); \
831 MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
832 MOZ_ASSERT(reserved() <= mTail.mCapacity); \
833 MOZ_ASSERT(mLength <= reserved()); \
834 MOZ_ASSERT(mLength <= mTail.mCapacity)
836 /* Vector Implementation */
838 template <typename T
, size_t N
, class AP
>
839 MOZ_ALWAYS_INLINE Vector
<T
, N
, AP
>::Vector(AP aAP
)
840 : AP(std::move(aAP
)),
842 mTail(kInlineCapacity
, 0)
848 mBegin
= inlineStorage();
851 /* Move constructor. */
852 template <typename T
, size_t N
, class AllocPolicy
>
853 MOZ_ALWAYS_INLINE Vector
<T
, N
, AllocPolicy
>::Vector(Vector
&& aRhs
)
854 : AllocPolicy(std::move(aRhs
))
860 mLength
= aRhs
.mLength
;
861 mTail
.mCapacity
= aRhs
.mTail
.mCapacity
;
863 mTail
.mReserved
= aRhs
.mTail
.mReserved
;
866 if (aRhs
.usingInlineStorage()) {
867 /* We can't move the buffer over in this case, so copy elements. */
868 mBegin
= inlineStorage();
869 Impl::moveConstruct(mBegin
, aRhs
.beginNoCheck(), aRhs
.endNoCheck());
871 * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
872 * The elements in its in-line storage still need to be destroyed.
876 * Take src's buffer, and turn src into an empty vector using
879 mBegin
= aRhs
.mBegin
;
880 aRhs
.mBegin
= aRhs
.inlineStorage();
881 aRhs
.mTail
.mCapacity
= kInlineCapacity
;
884 aRhs
.mTail
.mReserved
= 0;
889 /* Move assignment. */
890 template <typename T
, size_t N
, class AP
>
891 MOZ_ALWAYS_INLINE Vector
<T
, N
, AP
>& Vector
<T
, N
, AP
>::operator=(Vector
&& aRhs
) {
892 MOZ_ASSERT(this != &aRhs
, "self-move assignment is prohibited");
894 new (KnownNotNull
, this) Vector(std::move(aRhs
));
898 template <typename T
, size_t N
, class AP
>
899 MOZ_ALWAYS_INLINE Vector
<T
, N
, AP
>::~Vector() {
900 MOZ_REENTRANCY_GUARD_ET_AL
;
901 Impl::destroy(beginNoCheck(), endNoCheck());
902 if (!usingInlineStorage()) {
903 this->free_(beginNoCheck(), mTail
.mCapacity
);
907 template <typename T
, size_t N
, class AP
>
908 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::reverse() {
909 MOZ_REENTRANCY_GUARD_ET_AL
;
911 size_t len
= mLength
;
912 size_t mid
= len
/ 2;
913 for (size_t i
= 0; i
< mid
; i
++) {
914 std::swap(elems
[i
], elems
[len
- i
- 1]);
919 * This function will create a new heap buffer with capacity aNewCap,
920 * move all elements in the inline buffer to this new buffer,
923 template <typename T
, size_t N
, class AP
>
924 inline bool Vector
<T
, N
, AP
>::convertToHeapStorage(size_t aNewCap
) {
925 MOZ_ASSERT(usingInlineStorage());
927 /* Allocate buffer. */
928 MOZ_ASSERT(!detail::CapacityHasExcessSpace
<T
>(aNewCap
));
929 T
* newBuf
= this->template pod_malloc
<T
>(aNewCap
);
930 if (MOZ_UNLIKELY(!newBuf
)) {
934 /* Copy inline elements into heap buffer. */
935 Impl::moveConstruct(newBuf
, beginNoCheck(), endNoCheck());
936 Impl::destroy(beginNoCheck(), endNoCheck());
938 /* Switch in heap buffer. */
940 /* mLength is unchanged. */
941 mTail
.mCapacity
= aNewCap
;
945 template <typename T
, size_t N
, class AP
>
946 MOZ_NEVER_INLINE
bool Vector
<T
, N
, AP
>::growStorageBy(size_t aIncr
) {
947 MOZ_ASSERT(mLength
+ aIncr
> mTail
.mCapacity
);
950 * When choosing a new capacity, its size should is as close to 2**N bytes
951 * as possible. 2**N-sized requests are best because they are unlikely to
952 * be rounded up by the allocator. Asking for a 2**N number of elements
953 * isn't as good, because if sizeof(T) is not a power-of-two that would
954 * result in a non-2**N request size.
960 if (usingInlineStorage()) {
961 /* This case occurs in ~70--80% of the calls to this function. */
963 tl::RoundUpPow2
<(kInlineCapacity
+ 1) * sizeof(T
)>::value
;
964 newCap
= newSize
/ sizeof(T
);
969 /* This case occurs in ~0--10% of the calls to this function. */
974 /* This case occurs in ~15--20% of the calls to this function. */
977 * Will mLength * 4 *sizeof(T) overflow? This condition limits a vector
978 * to 1GB of memory on a 32-bit system, which is a reasonable limit. It
981 * static_cast<char*>(end()) - static_cast<char*>(begin())
983 * doesn't overflow ptrdiff_t (see bug 510319).
985 if (MOZ_UNLIKELY(mLength
& tl::MulOverflowMask
<4 * sizeof(T
)>::value
)) {
986 this->reportAllocOverflow();
991 * If we reach here, the existing capacity will have a size that is already
992 * as close to 2^N as sizeof(T) will allow. Just double the capacity, and
993 * then there might be space for one more element.
995 newCap
= mLength
* 2;
996 if (detail::CapacityHasExcessSpace
<T
>(newCap
)) {
1000 /* This case occurs in ~2% of the calls to this function. */
1001 size_t newMinCap
= mLength
+ aIncr
;
1003 /* Did mLength + aIncr overflow? Will newCap * sizeof(T) overflow? */
1004 if (MOZ_UNLIKELY(newMinCap
< mLength
||
1005 newMinCap
& tl::MulOverflowMask
<2 * sizeof(T
)>::value
)) {
1006 this->reportAllocOverflow();
1010 size_t newMinSize
= newMinCap
* sizeof(T
);
1011 size_t newSize
= RoundUpPow2(newMinSize
);
1012 newCap
= newSize
/ sizeof(T
);
1015 if (usingInlineStorage()) {
1017 return convertToHeapStorage(newCap
);
1021 return Impl::growTo(*this, newCap
);
1024 template <typename T
, size_t N
, class AP
>
1025 inline bool Vector
<T
, N
, AP
>::initCapacity(size_t aRequest
) {
1026 MOZ_ASSERT(empty());
1027 MOZ_ASSERT(usingInlineStorage());
1028 if (aRequest
== 0) {
1031 T
* newbuf
= this->template pod_malloc
<T
>(aRequest
);
1032 if (MOZ_UNLIKELY(!newbuf
)) {
1036 mTail
.mCapacity
= aRequest
;
1038 mTail
.mReserved
= aRequest
;
1043 template <typename T
, size_t N
, class AP
>
1044 inline bool Vector
<T
, N
, AP
>::initLengthUninitialized(size_t aRequest
) {
1045 if (!initCapacity(aRequest
)) {
1048 infallibleGrowByUninitialized(aRequest
);
1052 template <typename T
, size_t N
, class AP
>
1053 inline bool Vector
<T
, N
, AP
>::maybeCheckSimulatedOOM(size_t aRequestedSize
) {
1054 if (aRequestedSize
<= N
) {
1059 if (aRequestedSize
<= mTail
.mReserved
) {
1064 return allocPolicy().checkSimulatedOOM();
1067 template <typename T
, size_t N
, class AP
>
1068 inline bool Vector
<T
, N
, AP
>::reserve(size_t aRequest
) {
1069 MOZ_REENTRANCY_GUARD_ET_AL
;
1070 if (aRequest
> mTail
.mCapacity
) {
1071 if (MOZ_UNLIKELY(!growStorageBy(aRequest
- mLength
))) {
1074 } else if (!maybeCheckSimulatedOOM(aRequest
)) {
1078 if (aRequest
> mTail
.mReserved
) {
1079 mTail
.mReserved
= aRequest
;
1081 MOZ_ASSERT(mLength
<= mTail
.mReserved
);
1082 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1087 template <typename T
, size_t N
, class AP
>
1088 inline void Vector
<T
, N
, AP
>::shrinkBy(size_t aIncr
) {
1089 MOZ_REENTRANCY_GUARD_ET_AL
;
1090 MOZ_ASSERT(aIncr
<= mLength
);
1091 Impl::destroy(endNoCheck() - aIncr
, endNoCheck());
1095 template <typename T
, size_t N
, class AP
>
1096 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::shrinkTo(size_t aNewLength
) {
1097 MOZ_ASSERT(aNewLength
<= mLength
);
1098 shrinkBy(mLength
- aNewLength
);
1101 template <typename T
, size_t N
, class AP
>
1102 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::growBy(size_t aIncr
) {
1103 MOZ_REENTRANCY_GUARD_ET_AL
;
1104 if (aIncr
> mTail
.mCapacity
- mLength
) {
1105 if (MOZ_UNLIKELY(!growStorageBy(aIncr
))) {
1108 } else if (!maybeCheckSimulatedOOM(mLength
+ aIncr
)) {
1111 MOZ_ASSERT(mLength
+ aIncr
<= mTail
.mCapacity
);
1112 T
* newend
= endNoCheck() + aIncr
;
1113 Impl::initialize(endNoCheck(), newend
);
1116 if (mLength
> mTail
.mReserved
) {
1117 mTail
.mReserved
= mLength
;
1123 template <typename T
, size_t N
, class AP
>
1124 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::growByUninitialized(size_t aIncr
) {
1125 MOZ_REENTRANCY_GUARD_ET_AL
;
1126 if (aIncr
> mTail
.mCapacity
- mLength
) {
1127 if (MOZ_UNLIKELY(!growStorageBy(aIncr
))) {
1130 } else if (!maybeCheckSimulatedOOM(mLength
+ aIncr
)) {
1134 if (mLength
+ aIncr
> mTail
.mReserved
) {
1135 mTail
.mReserved
= mLength
+ aIncr
;
1138 infallibleGrowByUninitialized(aIncr
);
1142 template <typename T
, size_t N
, class AP
>
1143 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::infallibleGrowByUninitialized(
1145 MOZ_ASSERT(mLength
+ aIncr
<= reserved());
1149 template <typename T
, size_t N
, class AP
>
1150 inline bool Vector
<T
, N
, AP
>::resize(size_t aNewLength
) {
1151 size_t curLength
= mLength
;
1152 if (aNewLength
> curLength
) {
1153 return growBy(aNewLength
- curLength
);
1155 shrinkBy(curLength
- aNewLength
);
1159 template <typename T
, size_t N
, class AP
>
1160 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::resizeUninitialized(
1161 size_t aNewLength
) {
1162 size_t curLength
= mLength
;
1163 if (aNewLength
> curLength
) {
1164 return growByUninitialized(aNewLength
- curLength
);
1166 shrinkBy(curLength
- aNewLength
);
1170 template <typename T
, size_t N
, class AP
>
1171 inline void Vector
<T
, N
, AP
>::clear() {
1172 MOZ_REENTRANCY_GUARD_ET_AL
;
1173 Impl::destroy(beginNoCheck(), endNoCheck());
1177 template <typename T
, size_t N
, class AP
>
1178 inline void Vector
<T
, N
, AP
>::clearAndFree() {
1181 if (usingInlineStorage()) {
1184 this->free_(beginNoCheck(), mTail
.mCapacity
);
1185 mBegin
= inlineStorage();
1186 mTail
.mCapacity
= kInlineCapacity
;
1188 mTail
.mReserved
= 0;
1192 template <typename T
, size_t N
, class AP
>
1193 inline bool Vector
<T
, N
, AP
>::shrinkStorageToFit() {
1194 MOZ_REENTRANCY_GUARD_ET_AL
;
1196 const auto length
= this->length();
1197 if (usingInlineStorage() || length
== capacity()) {
1202 this->free_(beginNoCheck(), mTail
.mCapacity
);
1203 mBegin
= inlineStorage();
1204 mTail
.mCapacity
= kInlineCapacity
;
1206 mTail
.mReserved
= 0;
1213 if (length
<= kInlineCapacity
) {
1214 newBuf
= inlineStorage();
1215 newCap
= kInlineCapacity
;
1218 newBuf
= this->template pod_realloc
<T
>(beginNoCheck(), mTail
.mCapacity
,
1221 newBuf
= this->template pod_malloc
<T
>(length
);
1223 if (MOZ_UNLIKELY(!newBuf
)) {
1228 if (!kElemIsPod
|| newBuf
== inlineStorage()) {
1229 Impl::moveConstruct(newBuf
, beginNoCheck(), endNoCheck());
1230 Impl::destroy(beginNoCheck(), endNoCheck());
1233 this->free_(beginNoCheck(), mTail
.mCapacity
);
1236 mTail
.mCapacity
= newCap
;
1238 mTail
.mReserved
= length
;
1243 template <typename T
, size_t N
, class AP
>
1244 inline bool Vector
<T
, N
, AP
>::canAppendWithoutRealloc(size_t aNeeded
) const {
1245 return mLength
+ aNeeded
<= mTail
.mCapacity
;
1248 template <typename T
, size_t N
, class AP
>
1249 template <typename U
, size_t O
, class BP
>
1250 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::internalAppendAll(
1251 const Vector
<U
, O
, BP
>& aOther
) {
1252 internalAppend(aOther
.begin(), aOther
.length());
1255 template <typename T
, size_t N
, class AP
>
1256 template <typename U
>
1257 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::internalAppend(U
&& aU
) {
1258 MOZ_ASSERT(mLength
+ 1 <= mTail
.mReserved
);
1259 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1260 Impl::new_(endNoCheck(), std::forward
<U
>(aU
));
1264 template <typename T
, size_t N
, class AP
>
1265 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::appendN(const T
& aT
, size_t aNeeded
) {
1266 MOZ_REENTRANCY_GUARD_ET_AL
;
1267 if (mLength
+ aNeeded
> mTail
.mCapacity
) {
1268 if (MOZ_UNLIKELY(!growStorageBy(aNeeded
))) {
1271 } else if (!maybeCheckSimulatedOOM(mLength
+ aNeeded
)) {
1275 if (mLength
+ aNeeded
> mTail
.mReserved
) {
1276 mTail
.mReserved
= mLength
+ aNeeded
;
1279 internalAppendN(aT
, aNeeded
);
1283 template <typename T
, size_t N
, class AP
>
1284 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::internalAppendN(const T
& aT
,
1286 MOZ_ASSERT(mLength
+ aNeeded
<= mTail
.mReserved
);
1287 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1288 Impl::copyConstructN(endNoCheck(), aNeeded
, aT
);
1292 template <typename T
, size_t N
, class AP
>
1293 template <typename U
>
1294 inline T
* Vector
<T
, N
, AP
>::insert(T
* aP
, U
&& aVal
) {
1295 MOZ_ASSERT(begin() <= aP
);
1296 MOZ_ASSERT(aP
<= end());
1297 size_t pos
= aP
- begin();
1298 MOZ_ASSERT(pos
<= mLength
);
1299 size_t oldLength
= mLength
;
1300 if (pos
== oldLength
) {
1301 if (!append(std::forward
<U
>(aVal
))) {
1305 T oldBack
= std::move(back());
1306 if (!append(std::move(oldBack
))) {
1309 for (size_t i
= oldLength
- 1; i
> pos
; --i
) {
1310 (*this)[i
] = std::move((*this)[i
- 1]);
1312 (*this)[pos
] = std::forward
<U
>(aVal
);
1314 return begin() + pos
;
1317 template <typename T
, size_t N
, class AP
>
1318 inline void Vector
<T
, N
, AP
>::erase(T
* aIt
) {
1319 MOZ_ASSERT(begin() <= aIt
);
1320 MOZ_ASSERT(aIt
< end());
1321 while (aIt
+ 1 < end()) {
1322 *aIt
= std::move(*(aIt
+ 1));
1328 template <typename T
, size_t N
, class AP
>
1329 inline void Vector
<T
, N
, AP
>::erase(T
* aBegin
, T
* aEnd
) {
1330 MOZ_ASSERT(begin() <= aBegin
);
1331 MOZ_ASSERT(aBegin
<= aEnd
);
1332 MOZ_ASSERT(aEnd
<= end());
1333 while (aEnd
< end()) {
1334 *aBegin
++ = std::move(*aEnd
++);
1336 shrinkBy(aEnd
- aBegin
);
1339 template <typename T
, size_t N
, class AP
>
1340 template <typename Pred
>
1341 void Vector
<T
, N
, AP
>::eraseIf(Pred aPred
) {
1342 // remove_if finds the first element to be erased, and then efficiently move-
1343 // assigns elements to effectively overwrite elements that satisfy the
1344 // predicate. It returns the new end pointer, after which there are only
1345 // moved-from elements ready to be destroyed, so we just need to shrink the
1346 // vector accordingly.
1347 T
* newEnd
= std::remove_if(begin(), end(),
1348 [&aPred
](const T
& aT
) { return aPred(aT
); });
1349 MOZ_ASSERT(newEnd
<= end());
1350 shrinkBy(end() - newEnd
);
1353 template <typename T
, size_t N
, class AP
>
1354 template <typename U
>
1355 void Vector
<T
, N
, AP
>::eraseIfEqual(const U
& aU
) {
1356 return eraseIf([&aU
](const T
& aT
) { return aT
== aU
; });
1359 template <typename T
, size_t N
, class AP
>
1360 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::internalEnsureCapacity(
1362 if (mLength
+ aNeeded
> mTail
.mCapacity
) {
1363 if (MOZ_UNLIKELY(!growStorageBy(aNeeded
))) {
1366 } else if (!maybeCheckSimulatedOOM(mLength
+ aNeeded
)) {
1370 if (mLength
+ aNeeded
> mTail
.mReserved
) {
1371 mTail
.mReserved
= mLength
+ aNeeded
;
1377 template <typename T
, size_t N
, class AP
>
1378 template <typename U
>
1379 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::append(const U
* aInsBegin
,
1381 MOZ_REENTRANCY_GUARD_ET_AL
;
1382 const size_t needed
= PointerRangeSize(aInsBegin
, aInsEnd
);
1383 if (!internalEnsureCapacity(needed
)) {
1386 internalAppend(aInsBegin
, needed
);
1390 template <typename T
, size_t N
, class AP
>
1391 template <typename U
>
1392 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::internalAppend(const U
* aInsBegin
,
1393 size_t aInsLength
) {
1394 MOZ_ASSERT(mLength
+ aInsLength
<= mTail
.mReserved
);
1395 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1396 Impl::copyConstruct(endNoCheck(), aInsBegin
, aInsBegin
+ aInsLength
);
1397 mLength
+= aInsLength
;
1400 template <typename T
, size_t N
, class AP
>
1401 template <typename U
>
1402 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::moveAppend(U
* aInsBegin
, U
* aInsEnd
) {
1403 MOZ_REENTRANCY_GUARD_ET_AL
;
1404 const size_t needed
= PointerRangeSize(aInsBegin
, aInsEnd
);
1405 if (!internalEnsureCapacity(needed
)) {
1408 internalMoveAppend(aInsBegin
, needed
);
1412 template <typename T
, size_t N
, class AP
>
1413 template <typename U
>
1414 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::internalMoveAppend(U
* aInsBegin
,
1415 size_t aInsLength
) {
1416 MOZ_ASSERT(mLength
+ aInsLength
<= mTail
.mReserved
);
1417 MOZ_ASSERT(mTail
.mReserved
<= mTail
.mCapacity
);
1418 Impl::moveConstruct(endNoCheck(), aInsBegin
, aInsBegin
+ aInsLength
);
1419 mLength
+= aInsLength
;
1422 template <typename T
, size_t N
, class AP
>
1423 template <typename U
>
1424 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::append(U
&& aU
) {
1425 MOZ_REENTRANCY_GUARD_ET_AL
;
1426 if (mLength
== mTail
.mCapacity
) {
1427 if (MOZ_UNLIKELY(!growStorageBy(1))) {
1430 } else if (!maybeCheckSimulatedOOM(mLength
+ 1)) {
1434 if (mLength
+ 1 > mTail
.mReserved
) {
1435 mTail
.mReserved
= mLength
+ 1;
1438 internalAppend(std::forward
<U
>(aU
));
1442 template <typename T
, size_t N
, class AP
>
1443 template <typename U
, size_t O
, class BP
>
1444 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::appendAll(
1445 const Vector
<U
, O
, BP
>& aOther
) {
1446 return append(aOther
.begin(), aOther
.length());
1449 template <typename T
, size_t N
, class AP
>
1450 template <typename U
, size_t O
, class BP
>
1451 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::appendAll(Vector
<U
, O
, BP
>&& aOther
) {
1452 if (empty() && capacity() < aOther
.length()) {
1453 *this = std::move(aOther
);
1457 if (moveAppend(aOther
.begin(), aOther
.end())) {
1458 aOther
.clearAndFree();
1465 template <typename T
, size_t N
, class AP
>
1467 MOZ_ALWAYS_INLINE
bool Vector
<T
, N
, AP
>::append(const U
* aInsBegin
,
1468 size_t aInsLength
) {
1469 return append(aInsBegin
, aInsBegin
+ aInsLength
);
1472 template <typename T
, size_t N
, class AP
>
1473 MOZ_ALWAYS_INLINE
void Vector
<T
, N
, AP
>::popBack() {
1474 MOZ_REENTRANCY_GUARD_ET_AL
;
1475 MOZ_ASSERT(!empty());
1480 template <typename T
, size_t N
, class AP
>
1481 MOZ_ALWAYS_INLINE T Vector
<T
, N
, AP
>::popCopy() {
1487 template <typename T
, size_t N
, class AP
>
1488 inline T
* Vector
<T
, N
, AP
>::extractRawBuffer() {
1489 MOZ_REENTRANCY_GUARD_ET_AL
;
1491 if (usingInlineStorage()) {
1496 mBegin
= inlineStorage();
1498 mTail
.mCapacity
= kInlineCapacity
;
1500 mTail
.mReserved
= 0;
1505 template <typename T
, size_t N
, class AP
>
1506 inline T
* Vector
<T
, N
, AP
>::extractOrCopyRawBuffer() {
1507 if (T
* ret
= extractRawBuffer()) {
1511 MOZ_REENTRANCY_GUARD_ET_AL
;
1513 T
* copy
= this->template pod_malloc
<T
>(mLength
);
1518 Impl::moveConstruct(copy
, beginNoCheck(), endNoCheck());
1519 Impl::destroy(beginNoCheck(), endNoCheck());
1520 mBegin
= inlineStorage();
1522 mTail
.mCapacity
= kInlineCapacity
;
1524 mTail
.mReserved
= 0;
1529 template <typename T
, size_t N
, class AP
>
1530 inline void Vector
<T
, N
, AP
>::replaceRawBuffer(T
* aP
, size_t aLength
,
1532 MOZ_REENTRANCY_GUARD_ET_AL
;
1534 /* Destroy what we have. */
1535 Impl::destroy(beginNoCheck(), endNoCheck());
1536 if (!usingInlineStorage()) {
1537 this->free_(beginNoCheck(), mTail
.mCapacity
);
1540 /* Take in the new buffer. */
1541 if (aCapacity
<= kInlineCapacity
) {
1543 * We convert to inline storage if possible, even though aP might
1544 * otherwise be acceptable. Maybe this behaviour should be
1545 * specifiable with an argument to this function.
1547 mBegin
= inlineStorage();
1549 mTail
.mCapacity
= kInlineCapacity
;
1550 Impl::moveConstruct(mBegin
, aP
, aP
+ aLength
);
1551 Impl::destroy(aP
, aP
+ aLength
);
1552 this->free_(aP
, aCapacity
);
1556 mTail
.mCapacity
= aCapacity
;
1559 mTail
.mReserved
= aCapacity
;
1563 template <typename T
, size_t N
, class AP
>
1564 inline void Vector
<T
, N
, AP
>::replaceRawBuffer(T
* aP
, size_t aLength
) {
1565 replaceRawBuffer(aP
, aLength
, aLength
);
1568 template <typename T
, size_t N
, class AP
>
1569 inline size_t Vector
<T
, N
, AP
>::sizeOfExcludingThis(
1570 MallocSizeOf aMallocSizeOf
) const {
1571 return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
1574 template <typename T
, size_t N
, class AP
>
1575 inline size_t Vector
<T
, N
, AP
>::sizeOfIncludingThis(
1576 MallocSizeOf aMallocSizeOf
) const {
1577 return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf
);
1580 template <typename T
, size_t N
, class AP
>
1581 inline void Vector
<T
, N
, AP
>::swap(Vector
& aOther
) {
1582 static_assert(N
== 0, "still need to implement this for N != 0");
1584 // This only works when inline storage is always empty.
1585 if (!usingInlineStorage() && aOther
.usingInlineStorage()) {
1586 aOther
.mBegin
= mBegin
;
1587 mBegin
= inlineStorage();
1588 } else if (usingInlineStorage() && !aOther
.usingInlineStorage()) {
1589 mBegin
= aOther
.mBegin
;
1590 aOther
.mBegin
= aOther
.inlineStorage();
1591 } else if (!usingInlineStorage() && !aOther
.usingInlineStorage()) {
1592 std::swap(mBegin
, aOther
.mBegin
);
1594 // This case is a no-op, since we'd set both to use their inline storage.
1597 std::swap(mLength
, aOther
.mLength
);
1598 std::swap(mTail
.mCapacity
, aOther
.mTail
.mCapacity
);
1600 std::swap(mTail
.mReserved
, aOther
.mTail
.mReserved
);
1604 } // namespace mozilla
1606 #endif /* mozilla_Vector_h */