1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef mozilla_BufferList_h
8 #define mozilla_BufferList_h
15 #include "mozilla/Assertions.h"
16 #include "mozilla/Attributes.h"
17 #include "mozilla/Maybe.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/Vector.h"
21 // BufferList represents a sequence of buffers of data. A BufferList can choose
22 // to own its buffers or not. The class handles writing to the buffers,
23 // iterating over them, and reading data out. Unlike SegmentedVector, the
24 // buffers may be of unequal size. Like SegmentedVector, BufferList is a nice
25 // way to avoid large contiguous allocations (which can trigger OOMs).
27 class InfallibleAllocPolicy
;
31 template <typename AllocPolicy
>
32 class BufferList
: private AllocPolicy
{
33 // Each buffer in a BufferList has a size and a capacity. The first mSize
34 // bytes are initialized and the remaining |mCapacity - mSize| bytes are free.
40 Segment(char* aData
, size_t aSize
, size_t aCapacity
)
41 : mData(aData
), mSize(aSize
), mCapacity(aCapacity
) {}
43 Segment(const Segment
&) = delete;
44 Segment
& operator=(const Segment
&) = delete;
46 Segment(Segment
&&) = default;
47 Segment
& operator=(Segment
&&) = default;
49 char* Start() const { return mData
; }
50 char* End() const { return mData
+ mSize
; }
53 template <typename OtherAllocPolicy
>
54 friend class BufferList
;
57 // For the convenience of callers, all segments are required to be a multiple
58 // of 8 bytes in capacity. Also, every buffer except the last one is required
59 // to be full (i.e., size == capacity). Therefore, a byte at offset N within
60 // the BufferList and stored in memory at an address A will satisfy
61 // (N % Align == A % Align) if Align == 2, 4, or 8.
62 static const size_t kSegmentAlignment
= 8;
64 // Allocate a BufferList. The BufferList will free all its buffers when it is
65 // destroyed. If an infallible allocator is used, an initial buffer of size
66 // aInitialSize and capacity aInitialCapacity is allocated automatically. This
67 // data will be contiguous and can be accessed via |Start()|. If a fallible
68 // alloc policy is used, aInitialSize must be 0, and the fallible |Init()|
69 // method may be called instead. Subsequent buffers will be allocated with
70 // capacity aStandardCapacity.
71 BufferList(size_t aInitialSize
, size_t aInitialCapacity
,
72 size_t aStandardCapacity
, AllocPolicy aAP
= AllocPolicy())
77 mStandardCapacity(aStandardCapacity
) {
78 MOZ_ASSERT(aInitialCapacity
% kSegmentAlignment
== 0);
79 MOZ_ASSERT(aStandardCapacity
% kSegmentAlignment
== 0);
81 if (aInitialCapacity
) {
82 MOZ_ASSERT((aInitialSize
== 0 ||
83 std::is_same_v
<AllocPolicy
, InfallibleAllocPolicy
>),
84 "BufferList may only be constructed with an initial size when "
85 "using an infallible alloc policy");
87 AllocateSegment(aInitialSize
, aInitialCapacity
);
91 BufferList(const BufferList
& aOther
) = delete;
93 BufferList(BufferList
&& aOther
)
94 : mOwning(aOther
.mOwning
),
95 mSegments(std::move(aOther
.mSegments
)),
97 mStandardCapacity(aOther
.mStandardCapacity
) {
98 aOther
.mSegments
.clear();
102 BufferList
& operator=(const BufferList
& aOther
) = delete;
104 BufferList
& operator=(BufferList
&& aOther
) {
107 mOwning
= aOther
.mOwning
;
108 mSegments
= std::move(aOther
.mSegments
);
109 mSize
= aOther
.mSize
;
110 aOther
.mSegments
.clear();
115 ~BufferList() { Clear(); }
117 // Initializes the BufferList with a segment of the given size and capacity.
118 // May only be called once, before any segments have been allocated.
119 bool Init(size_t aInitialSize
, size_t aInitialCapacity
) {
120 MOZ_ASSERT(mSegments
.empty());
121 MOZ_ASSERT(aInitialCapacity
!= 0);
122 MOZ_ASSERT(aInitialCapacity
% kSegmentAlignment
== 0);
124 return AllocateSegment(aInitialSize
, aInitialCapacity
);
127 bool CopyFrom(const BufferList
& aOther
) {
132 // We don't make an exact copy of aOther. Instead, create a single segment
133 // with enough space to hold all data in aOther.
134 if (!Init(aOther
.mSize
, (aOther
.mSize
+ kSegmentAlignment
- 1) &
135 ~(kSegmentAlignment
- 1))) {
140 for (const Segment
& segment
: aOther
.mSegments
) {
141 memcpy(Start() + offset
, segment
.mData
, segment
.mSize
);
142 offset
+= segment
.mSize
;
144 MOZ_ASSERT(offset
== mSize
);
149 // Returns the sum of the sizes of all the buffers.
150 size_t Size() const { return mSize
; }
152 size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf
) {
153 size_t size
= mSegments
.sizeOfExcludingThis(aMallocSizeOf
);
154 for (Segment
& segment
: mSegments
) {
155 size
+= aMallocSizeOf(segment
.Start());
162 for (Segment
& segment
: mSegments
) {
163 this->free_(segment
.mData
, segment
.mCapacity
);
171 // Iterates over bytes in the segments. You can advance it by as many bytes as
175 // (0) mSegment <= bufferList.mSegments.length()
176 // (1) mData <= mDataEnd
177 // (2) If mSegment is not the last segment, mData < mDataEnd
178 uintptr_t mSegment
{0};
179 char* mData
{nullptr};
180 char* mDataEnd
{nullptr};
181 size_t mAbsoluteOffset
{0};
183 friend class BufferList
;
186 explicit IterImpl(const BufferList
& aBuffers
) {
187 if (!aBuffers
.mSegments
.empty()) {
188 mData
= aBuffers
.mSegments
[0].Start();
189 mDataEnd
= aBuffers
.mSegments
[0].End();
193 // Returns a pointer to the raw data. It is valid to access up to
194 // RemainingInSegment bytes of this buffer.
196 MOZ_RELEASE_ASSERT(!Done());
200 // Returns true if the memory in the range [Data(), Data() + aBytes) is all
201 // part of one contiguous buffer.
202 bool HasRoomFor(size_t aBytes
) const {
203 return RemainingInSegment() >= aBytes
;
206 // Returns the largest value aBytes for which HasRoomFor(aBytes) will be
208 size_t RemainingInSegment() const {
209 MOZ_RELEASE_ASSERT(mData
<= mDataEnd
);
210 return mDataEnd
- mData
;
213 // Returns true if there are at least aBytes entries remaining in the
214 // BufferList after this iterator.
215 bool HasBytesAvailable(const BufferList
& aBuffers
, size_t aBytes
) const {
216 return TotalBytesAvailable(aBuffers
) >= aBytes
;
219 // Returns the largest value `aBytes` for which HasBytesAvailable(aBytes)
221 size_t TotalBytesAvailable(const BufferList
& aBuffers
) const {
222 return aBuffers
.mSize
- mAbsoluteOffset
;
225 // Advances the iterator by aBytes bytes. aBytes must be less than
226 // RemainingInSegment(). If advancing by aBytes takes the iterator to the
227 // end of a buffer, it will be moved to the beginning of the next buffer
228 // unless it is the last buffer.
229 void Advance(const BufferList
& aBuffers
, size_t aBytes
) {
230 const Segment
& segment
= aBuffers
.mSegments
[mSegment
];
231 MOZ_RELEASE_ASSERT(segment
.Start() <= mData
);
232 MOZ_RELEASE_ASSERT(mData
<= mDataEnd
);
233 MOZ_RELEASE_ASSERT(mDataEnd
== segment
.End());
235 MOZ_RELEASE_ASSERT(HasRoomFor(aBytes
));
237 mAbsoluteOffset
+= aBytes
;
239 if (mData
== mDataEnd
&& mSegment
+ 1 < aBuffers
.mSegments
.length()) {
241 const Segment
& nextSegment
= aBuffers
.mSegments
[mSegment
];
242 mData
= nextSegment
.Start();
243 mDataEnd
= nextSegment
.End();
244 MOZ_RELEASE_ASSERT(mData
< mDataEnd
);
248 // Advance the iterator by aBytes, possibly crossing segments. This function
249 // returns false if it runs out of buffers to advance through. Otherwise it
251 bool AdvanceAcrossSegments(const BufferList
& aBuffers
, size_t aBytes
) {
252 // If we don't need to cross segments, we can directly use `Advance` to
253 // get to our destination.
254 if (MOZ_LIKELY(aBytes
<= RemainingInSegment())) {
255 Advance(aBuffers
, aBytes
);
259 // Check if we have enough bytes to scan this far forward.
260 if (!HasBytesAvailable(aBuffers
, aBytes
)) {
264 // Compare the distance to our target offset from the end of the
265 // BufferList to the distance from the start of our next segment.
266 // Depending on which is closer, we'll advance either forwards or
268 size_t targetOffset
= mAbsoluteOffset
+ aBytes
;
269 size_t fromEnd
= aBuffers
.mSize
- targetOffset
;
270 if (aBytes
- RemainingInSegment() < fromEnd
) {
271 // Advance through the buffer list until we reach the desired absolute
273 while (mAbsoluteOffset
< targetOffset
) {
274 Advance(aBuffers
, std::min(targetOffset
- mAbsoluteOffset
,
275 RemainingInSegment()));
277 MOZ_ASSERT(mAbsoluteOffset
== targetOffset
);
281 // Scanning starting from the end of the BufferList. We advance
282 // backwards from the final segment until we find the segment to end in.
284 // If we end on a segment boundary, make sure to place the cursor at the
285 // beginning of the next segment.
286 mSegment
= aBuffers
.mSegments
.length() - 1;
287 while (fromEnd
> aBuffers
.mSegments
[mSegment
].mSize
) {
288 fromEnd
-= aBuffers
.mSegments
[mSegment
].mSize
;
291 mDataEnd
= aBuffers
.mSegments
[mSegment
].End();
292 mData
= mDataEnd
- fromEnd
;
293 mAbsoluteOffset
= targetOffset
;
294 MOZ_ASSERT_IF(Done(), mSegment
== aBuffers
.mSegments
.length() - 1);
295 MOZ_ASSERT_IF(Done(), mAbsoluteOffset
== aBuffers
.mSize
);
299 // Returns true when the iterator reaches the end of the BufferList.
300 bool Done() const { return mData
== mDataEnd
; }
302 // The absolute offset of this iterator within the BufferList.
303 size_t AbsoluteOffset() const { return mAbsoluteOffset
; }
306 bool IsIn(const BufferList
& aBuffers
) const {
307 return mSegment
< aBuffers
.mSegments
.length() &&
308 mData
>= aBuffers
.mSegments
[mSegment
].mData
&&
309 mData
< aBuffers
.mSegments
[mSegment
].End();
313 // Special convenience method that returns Iter().Data().
315 MOZ_RELEASE_ASSERT(!mSegments
.empty());
316 return mSegments
[0].mData
;
318 const char* Start() const { return mSegments
[0].mData
; }
320 IterImpl
Iter() const { return IterImpl(*this); }
322 // Copies aSize bytes from aData into the BufferList. The storage for these
323 // bytes may be split across multiple buffers. Size() is increased by aSize.
324 [[nodiscard
]] inline bool WriteBytes(const char* aData
, size_t aSize
);
326 // Allocates a buffer of at most |aMaxBytes| bytes and, if successful, returns
327 // that buffer, and places its size in |aSize|. If unsuccessful, returns null
328 // and leaves |aSize| undefined.
329 inline char* AllocateBytes(size_t aMaxSize
, size_t* aSize
);
331 // Copies possibly non-contiguous byte range starting at aIter into
332 // aData. aIter is advanced by aSize bytes. Returns false if it runs out of
333 // data before aSize.
334 inline bool ReadBytes(IterImpl
& aIter
, char* aData
, size_t aSize
) const;
336 // Return a new BufferList that shares storage with this BufferList. The new
337 // BufferList is read-only. It allows iteration over aSize bytes starting at
338 // aIter. Borrow can fail, in which case *aSuccess will be false upon
339 // return. The borrowed BufferList can use a different AllocPolicy than the
340 // original one. However, it is not responsible for freeing buffers, so the
341 // AllocPolicy is only used for the buffer vector.
342 template <typename BorrowingAllocPolicy
>
343 BufferList
<BorrowingAllocPolicy
> Borrow(
344 IterImpl
& aIter
, size_t aSize
, bool* aSuccess
,
345 BorrowingAllocPolicy aAP
= BorrowingAllocPolicy()) const;
347 // Return a new BufferList and move storage from this BufferList to it. The
348 // new BufferList owns the buffers. Move can fail, in which case *aSuccess
349 // will be false upon return. The new BufferList can use a different
350 // AllocPolicy than the original one. The new OtherAllocPolicy is responsible
351 // for freeing buffers, so the OtherAllocPolicy must use freeing method
352 // compatible to the original one.
353 template <typename OtherAllocPolicy
>
354 BufferList
<OtherAllocPolicy
> MoveFallible(
355 bool* aSuccess
, OtherAllocPolicy aAP
= OtherAllocPolicy());
357 // Return a new BufferList that adopts the byte range starting at Iter so that
358 // range [aIter, aIter + aSize) is transplanted to the returned BufferList.
359 // Contents of the buffer before aIter + aSize is left undefined.
360 // Extract can fail, in which case *aSuccess will be false upon return. The
361 // moved buffers are erased from the original BufferList. In case of extract
362 // fails, the original BufferList is intact. All other iterators except aIter
364 // This method requires aIter and aSize to be 8-byte aligned.
365 BufferList
Extract(IterImpl
& aIter
, size_t aSize
, bool* aSuccess
);
367 // Return the number of bytes from 'start' to 'end', two iterators within
369 size_t RangeLength(const IterImpl
& start
, const IterImpl
& end
) const {
370 MOZ_ASSERT(start
.IsIn(*this) && end
.IsIn(*this));
371 return end
.mAbsoluteOffset
- start
.mAbsoluteOffset
;
374 // This takes ownership of the data
375 void* WriteBytesZeroCopy(char* aData
, size_t aSize
, size_t aCapacity
) {
376 MOZ_ASSERT(aCapacity
!= 0);
377 MOZ_ASSERT(aSize
<= aCapacity
);
380 if (!mSegments
.append(Segment(aData
, aSize
, aCapacity
))) {
381 this->free_(aData
, aCapacity
);
388 // Truncate this BufferList at the given iterator location, discarding all
389 // data after this point. After this call, all other iterators will be
390 // invalidated, and the passed-in iterator will be "Done".
392 // Returns the number of bytes discarded by this truncation.
393 size_t Truncate(IterImpl
& aIter
);
396 explicit BufferList(AllocPolicy aAP
)
397 : AllocPolicy(aAP
), mOwning(false), mSize(0), mStandardCapacity(0) {}
399 char* AllocateSegment(size_t aSize
, size_t aCapacity
) {
400 MOZ_RELEASE_ASSERT(mOwning
);
401 MOZ_ASSERT(aCapacity
!= 0);
402 MOZ_ASSERT(aSize
<= aCapacity
);
404 char* data
= this->template pod_malloc
<char>(aCapacity
);
408 if (!mSegments
.append(Segment(data
, aSize
, aCapacity
))) {
409 this->free_(data
, aCapacity
);
416 void AssertConsistentSize() const {
419 for (const auto& segment
: mSegments
) {
420 realSize
+= segment
.mSize
;
422 MOZ_ASSERT(realSize
== mSize
, "cached size value is inconsistent!");
427 Vector
<Segment
, 1, AllocPolicy
> mSegments
;
429 size_t mStandardCapacity
;
432 template <typename AllocPolicy
>
433 [[nodiscard
]] bool BufferList
<AllocPolicy
>::WriteBytes(const char* aData
,
435 MOZ_RELEASE_ASSERT(mOwning
);
436 MOZ_RELEASE_ASSERT(mStandardCapacity
);
439 while (copied
< aSize
) {
441 char* data
= AllocateBytes(aSize
- copied
, &toCopy
);
445 memcpy(data
, aData
+ copied
, toCopy
);
452 template <typename AllocPolicy
>
453 char* BufferList
<AllocPolicy
>::AllocateBytes(size_t aMaxSize
, size_t* aSize
) {
454 MOZ_RELEASE_ASSERT(mOwning
);
455 MOZ_RELEASE_ASSERT(mStandardCapacity
);
457 if (!mSegments
.empty()) {
458 Segment
& lastSegment
= mSegments
.back();
460 size_t capacity
= lastSegment
.mCapacity
- lastSegment
.mSize
;
462 size_t size
= std::min(aMaxSize
, capacity
);
463 char* data
= lastSegment
.mData
+ lastSegment
.mSize
;
465 lastSegment
.mSize
+= size
;
473 size_t size
= std::min(aMaxSize
, mStandardCapacity
);
474 char* data
= AllocateSegment(size
, mStandardCapacity
);
481 template <typename AllocPolicy
>
482 bool BufferList
<AllocPolicy
>::ReadBytes(IterImpl
& aIter
, char* aData
,
483 size_t aSize
) const {
485 size_t remaining
= aSize
;
487 size_t toCopy
= std::min(aIter
.RemainingInSegment(), remaining
);
489 // We've run out of data in the last segment.
492 memcpy(aData
+ copied
, aIter
.Data(), toCopy
);
496 aIter
.Advance(*this, toCopy
);
502 template <typename AllocPolicy
>
503 template <typename BorrowingAllocPolicy
>
504 BufferList
<BorrowingAllocPolicy
> BufferList
<AllocPolicy
>::Borrow(
505 IterImpl
& aIter
, size_t aSize
, bool* aSuccess
,
506 BorrowingAllocPolicy aAP
) const {
507 BufferList
<BorrowingAllocPolicy
> result(aAP
);
511 size_t toAdvance
= std::min(size
, aIter
.RemainingInSegment());
513 if (!toAdvance
|| !result
.mSegments
.append(
514 typename BufferList
<BorrowingAllocPolicy
>::Segment(
515 aIter
.mData
, toAdvance
, toAdvance
))) {
519 aIter
.Advance(*this, toAdvance
);
523 result
.mSize
= aSize
;
528 template <typename AllocPolicy
>
529 template <typename OtherAllocPolicy
>
530 BufferList
<OtherAllocPolicy
> BufferList
<AllocPolicy
>::MoveFallible(
531 bool* aSuccess
, OtherAllocPolicy aAP
) {
532 BufferList
<OtherAllocPolicy
> result(0, 0, mStandardCapacity
, aAP
);
534 IterImpl iter
= Iter();
535 while (!iter
.Done()) {
536 size_t toAdvance
= iter
.RemainingInSegment();
539 !result
.mSegments
.append(typename BufferList
<OtherAllocPolicy
>::Segment(
540 iter
.mData
, toAdvance
, toAdvance
))) {
542 result
.mSegments
.clear();
545 iter
.Advance(*this, toAdvance
);
548 result
.mSize
= mSize
;
555 template <typename AllocPolicy
>
556 BufferList
<AllocPolicy
> BufferList
<AllocPolicy
>::Extract(IterImpl
& aIter
,
559 MOZ_RELEASE_ASSERT(aSize
);
560 MOZ_RELEASE_ASSERT(mOwning
);
561 MOZ_ASSERT(aSize
% kSegmentAlignment
== 0);
562 MOZ_ASSERT(intptr_t(aIter
.mData
) % kSegmentAlignment
== 0);
564 auto failure
= [this, aSuccess
]() {
566 return BufferList(0, 0, mStandardCapacity
);
569 // Number of segments we'll need to copy data from to satisfy the request.
570 size_t segmentsNeeded
= 0;
571 // If this is None then the last segment is a full segment, otherwise we need
572 // to copy this many bytes.
573 Maybe
<size_t> lastSegmentSize
;
575 // Copy of the iterator to walk the BufferList and see how many segments we
577 IterImpl iter
= aIter
;
578 size_t remaining
= aSize
;
579 while (!iter
.Done() && remaining
&&
580 remaining
>= iter
.RemainingInSegment()) {
581 remaining
-= iter
.RemainingInSegment();
582 iter
.Advance(*this, iter
.RemainingInSegment());
588 // We reached the end of the BufferList and there wasn't enough data to
589 // satisfy the request.
592 lastSegmentSize
.emplace(remaining
);
593 // The last block also counts as a segment. This makes the conditionals
594 // on segmentsNeeded work in the rest of the function.
599 BufferList
result(0, 0, mStandardCapacity
);
600 if (!result
.mSegments
.reserve(segmentsNeeded
+ lastSegmentSize
.isSome())) {
604 // Copy the first segment, it's special because we can't just steal the
605 // entire Segment struct from this->mSegments.
607 // As we leave the data before the new `aIter` position as "unspecified", we
608 // leave this data in the existing buffer, despite copying it into the new
610 size_t firstSegmentSize
= std::min(aSize
, aIter
.RemainingInSegment());
611 if (!result
.WriteBytes(aIter
.Data(), firstSegmentSize
)) {
614 aIter
.Advance(*this, firstSegmentSize
);
617 // The entirety of the request wasn't in the first segment, now copy the
619 if (segmentsNeeded
) {
620 size_t finalSegmentCapacity
= 0;
621 char* finalSegment
= nullptr;
622 // Pre-allocate the final segment so that if this fails, we return before
623 // we delete the elements from |this->mSegments|.
624 if (lastSegmentSize
.isSome()) {
625 finalSegmentCapacity
= std::max(mStandardCapacity
, *lastSegmentSize
);
626 finalSegment
= this->template pod_malloc
<char>(finalSegmentCapacity
);
632 size_t removedBytes
= 0;
633 size_t copyStart
= aIter
.mSegment
;
634 // Copy segments from this over to the result and remove them from our
635 // storage. Not needed if the only segment we need to copy is the last
637 size_t segmentsToCopy
= segmentsNeeded
- lastSegmentSize
.isSome();
638 for (size_t i
= 0; i
< segmentsToCopy
; ++i
) {
639 result
.mSegments
.infallibleAppend(Segment(
640 mSegments
[aIter
.mSegment
].mData
, mSegments
[aIter
.mSegment
].mSize
,
641 mSegments
[aIter
.mSegment
].mCapacity
));
642 removedBytes
+= mSegments
[aIter
.mSegment
].mSize
;
643 aIter
.Advance(*this, aIter
.RemainingInSegment());
645 // Due to the way IterImpl works, there are two cases here: (1) if we've
646 // consumed the entirety of the BufferList, then the iterator is pointed at
647 // the end of the final segment, (2) otherwise it is pointed at the start
648 // of the next segment. We want to verify that we really consumed all
649 // |segmentsToCopy| segments.
651 (aIter
.mSegment
== copyStart
+ segmentsToCopy
) ||
652 (aIter
.Done() && aIter
.mSegment
== copyStart
+ segmentsToCopy
- 1));
653 mSegments
.erase(mSegments
.begin() + copyStart
,
654 mSegments
.begin() + copyStart
+ segmentsToCopy
);
656 // Reset the iter's position for what we just deleted.
657 aIter
.mSegment
-= segmentsToCopy
;
658 aIter
.mAbsoluteOffset
-= removedBytes
;
659 mSize
-= removedBytes
;
661 // If our iterator is already at the end, we just removed the very last
662 // segment of our buffer list and need to shift the iterator back to point
663 // at the end of the previous segment.
665 MOZ_ASSERT(lastSegmentSize
.isNothing());
666 if (mSegments
.empty()) {
667 MOZ_ASSERT(aIter
.mSegment
== 0);
668 aIter
.mData
= aIter
.mDataEnd
= nullptr;
670 MOZ_ASSERT(aIter
.mSegment
== mSegments
.length() - 1);
671 aIter
.mData
= aIter
.mDataEnd
= mSegments
.back().End();
675 if (lastSegmentSize
.isSome()) {
676 // We called reserve() on result.mSegments so infallibleAppend is safe.
677 result
.mSegments
.infallibleAppend(
678 Segment(finalSegment
, 0, finalSegmentCapacity
));
679 bool r
= result
.WriteBytes(aIter
.Data(), *lastSegmentSize
);
680 MOZ_RELEASE_ASSERT(r
);
681 aIter
.Advance(*this, *lastSegmentSize
);
685 result
.mSize
= aSize
;
687 AssertConsistentSize();
688 result
.AssertConsistentSize();
690 // Ensure that the iterator is still valid when Extract returns.
692 if (!mSegments
.empty()) {
693 auto& segment
= mSegments
[aIter
.mSegment
];
694 MOZ_ASSERT(segment
.Start() <= aIter
.mData
);
695 MOZ_ASSERT(aIter
.mDataEnd
== segment
.End());
703 template <typename AllocPolicy
>
704 size_t BufferList
<AllocPolicy
>::Truncate(IterImpl
& aIter
) {
705 MOZ_ASSERT(aIter
.IsIn(*this) || aIter
.Done());
710 size_t prevSize
= mSize
;
712 // Remove any segments after the iterator's current segment.
713 while (mSegments
.length() > aIter
.mSegment
+ 1) {
714 Segment
& toFree
= mSegments
.back();
715 mSize
-= toFree
.mSize
;
717 this->free_(toFree
.mData
, toFree
.mCapacity
);
722 // The last segment is now aIter's current segment. Truncate or remove it.
723 Segment
& seg
= mSegments
.back();
724 MOZ_ASSERT(aIter
.mDataEnd
== seg
.End());
725 mSize
-= aIter
.RemainingInSegment();
726 seg
.mSize
-= aIter
.RemainingInSegment();
729 this->free_(seg
.mData
, seg
.mCapacity
);
734 // Correct `aIter` to point to the new end of the BufferList.
735 if (mSegments
.empty()) {
736 MOZ_ASSERT(mSize
== 0);
738 aIter
.mData
= aIter
.mDataEnd
= nullptr;
740 aIter
.mSegment
= mSegments
.length() - 1;
741 aIter
.mData
= aIter
.mDataEnd
= mSegments
.back().End();
743 MOZ_ASSERT(aIter
.Done());
745 AssertConsistentSize();
746 return prevSize
- mSize
;
749 } // namespace mozilla
751 #endif /* mozilla_BufferList_h */