Bug 1773770: Part 3 - Migrate XRE module to static component registration. r=xpcom...
[gecko.git] / mfbt / BufferList.h
blob0d3a968bca97a777f0e2e39b107ee797eff5077a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef mozilla_BufferList_h
8 #define mozilla_BufferList_h
10 #include <algorithm>
11 #include <cstdint>
12 #include <cstring>
13 #include <numeric>
15 #include "mozilla/Assertions.h"
16 #include "mozilla/Attributes.h"
17 #include "mozilla/Maybe.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/Vector.h"
21 // BufferList represents a sequence of buffers of data. A BufferList can choose
22 // to own its buffers or not. The class handles writing to the buffers,
23 // iterating over them, and reading data out. Unlike SegmentedVector, the
24 // buffers may be of unequal size. Like SegmentedVector, BufferList is a nice
25 // way to avoid large contiguous allocations (which can trigger OOMs).
27 class InfallibleAllocPolicy;
29 namespace mozilla {
31 template <typename AllocPolicy>
32 class BufferList : private AllocPolicy {
33 // Each buffer in a BufferList has a size and a capacity. The first mSize
34 // bytes are initialized and the remaining |mCapacity - mSize| bytes are free.
35 struct Segment {
36 char* mData;
37 size_t mSize;
38 size_t mCapacity;
40 Segment(char* aData, size_t aSize, size_t aCapacity)
41 : mData(aData), mSize(aSize), mCapacity(aCapacity) {}
43 Segment(const Segment&) = delete;
44 Segment& operator=(const Segment&) = delete;
46 Segment(Segment&&) = default;
47 Segment& operator=(Segment&&) = default;
49 char* Start() const { return mData; }
50 char* End() const { return mData + mSize; }
53 template <typename OtherAllocPolicy>
54 friend class BufferList;
56 public:
57 // For the convenience of callers, all segments are required to be a multiple
58 // of 8 bytes in capacity. Also, every buffer except the last one is required
59 // to be full (i.e., size == capacity). Therefore, a byte at offset N within
60 // the BufferList and stored in memory at an address A will satisfy
61 // (N % Align == A % Align) if Align == 2, 4, or 8.
62 static const size_t kSegmentAlignment = 8;
64 // Allocate a BufferList. The BufferList will free all its buffers when it is
65 // destroyed. If an infallible allocator is used, an initial buffer of size
66 // aInitialSize and capacity aInitialCapacity is allocated automatically. This
67 // data will be contiguous and can be accessed via |Start()|. If a fallible
68 // alloc policy is used, aInitialSize must be 0, and the fallible |Init()|
69 // method may be called instead. Subsequent buffers will be allocated with
70 // capacity aStandardCapacity.
71 BufferList(size_t aInitialSize, size_t aInitialCapacity,
72 size_t aStandardCapacity, AllocPolicy aAP = AllocPolicy())
73 : AllocPolicy(aAP),
74 mOwning(true),
75 mSegments(aAP),
76 mSize(0),
77 mStandardCapacity(aStandardCapacity) {
78 MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0);
79 MOZ_ASSERT(aStandardCapacity % kSegmentAlignment == 0);
81 if (aInitialCapacity) {
82 MOZ_ASSERT((aInitialSize == 0 ||
83 std::is_same_v<AllocPolicy, InfallibleAllocPolicy>),
84 "BufferList may only be constructed with an initial size when "
85 "using an infallible alloc policy");
87 AllocateSegment(aInitialSize, aInitialCapacity);
91 BufferList(const BufferList& aOther) = delete;
93 BufferList(BufferList&& aOther)
94 : mOwning(aOther.mOwning),
95 mSegments(std::move(aOther.mSegments)),
96 mSize(aOther.mSize),
97 mStandardCapacity(aOther.mStandardCapacity) {
98 aOther.mSegments.clear();
99 aOther.mSize = 0;
102 BufferList& operator=(const BufferList& aOther) = delete;
104 BufferList& operator=(BufferList&& aOther) {
105 Clear();
107 mOwning = aOther.mOwning;
108 mSegments = std::move(aOther.mSegments);
109 mSize = aOther.mSize;
110 aOther.mSegments.clear();
111 aOther.mSize = 0;
112 return *this;
115 ~BufferList() { Clear(); }
117 // Initializes the BufferList with a segment of the given size and capacity.
118 // May only be called once, before any segments have been allocated.
119 bool Init(size_t aInitialSize, size_t aInitialCapacity) {
120 MOZ_ASSERT(mSegments.empty());
121 MOZ_ASSERT(aInitialCapacity != 0);
122 MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0);
124 return AllocateSegment(aInitialSize, aInitialCapacity);
127 bool CopyFrom(const BufferList& aOther) {
128 MOZ_ASSERT(mOwning);
130 Clear();
132 // We don't make an exact copy of aOther. Instead, create a single segment
133 // with enough space to hold all data in aOther.
134 if (!Init(aOther.mSize, (aOther.mSize + kSegmentAlignment - 1) &
135 ~(kSegmentAlignment - 1))) {
136 return false;
139 size_t offset = 0;
140 for (const Segment& segment : aOther.mSegments) {
141 memcpy(Start() + offset, segment.mData, segment.mSize);
142 offset += segment.mSize;
144 MOZ_ASSERT(offset == mSize);
146 return true;
149 // Returns the sum of the sizes of all the buffers.
150 size_t Size() const { return mSize; }
152 size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
153 size_t size = mSegments.sizeOfExcludingThis(aMallocSizeOf);
154 for (Segment& segment : mSegments) {
155 size += aMallocSizeOf(segment.Start());
157 return size;
160 void Clear() {
161 if (mOwning) {
162 for (Segment& segment : mSegments) {
163 this->free_(segment.mData, segment.mCapacity);
166 mSegments.clear();
168 mSize = 0;
171 // Iterates over bytes in the segments. You can advance it by as many bytes as
172 // you choose.
173 class IterImpl {
174 // Invariants:
175 // (0) mSegment <= bufferList.mSegments.length()
176 // (1) mData <= mDataEnd
177 // (2) If mSegment is not the last segment, mData < mDataEnd
178 uintptr_t mSegment{0};
179 char* mData{nullptr};
180 char* mDataEnd{nullptr};
181 size_t mAbsoluteOffset{0};
183 friend class BufferList;
185 public:
186 explicit IterImpl(const BufferList& aBuffers) {
187 if (!aBuffers.mSegments.empty()) {
188 mData = aBuffers.mSegments[0].Start();
189 mDataEnd = aBuffers.mSegments[0].End();
193 // Returns a pointer to the raw data. It is valid to access up to
194 // RemainingInSegment bytes of this buffer.
195 char* Data() const {
196 MOZ_RELEASE_ASSERT(!Done());
197 return mData;
200 bool operator==(const IterImpl& other) const {
201 return mAbsoluteOffset == other.mAbsoluteOffset;
203 bool operator!=(const IterImpl& other) const { return !(*this == other); }
205 // Returns true if the memory in the range [Data(), Data() + aBytes) is all
206 // part of one contiguous buffer.
207 bool HasRoomFor(size_t aBytes) const {
208 return RemainingInSegment() >= aBytes;
211 // Returns the largest value aBytes for which HasRoomFor(aBytes) will be
212 // true.
213 size_t RemainingInSegment() const {
214 MOZ_RELEASE_ASSERT(mData <= mDataEnd);
215 return mDataEnd - mData;
218 // Returns true if there are at least aBytes entries remaining in the
219 // BufferList after this iterator.
220 bool HasBytesAvailable(const BufferList& aBuffers, size_t aBytes) const {
221 return TotalBytesAvailable(aBuffers) >= aBytes;
224 // Returns the largest value `aBytes` for which HasBytesAvailable(aBytes)
225 // will be true.
226 size_t TotalBytesAvailable(const BufferList& aBuffers) const {
227 return aBuffers.mSize - mAbsoluteOffset;
230 // Advances the iterator by aBytes bytes. aBytes must be less than
231 // RemainingInSegment(). If advancing by aBytes takes the iterator to the
232 // end of a buffer, it will be moved to the beginning of the next buffer
233 // unless it is the last buffer.
234 void Advance(const BufferList& aBuffers, size_t aBytes) {
235 const Segment& segment = aBuffers.mSegments[mSegment];
236 MOZ_RELEASE_ASSERT(segment.Start() <= mData);
237 MOZ_RELEASE_ASSERT(mData <= mDataEnd);
238 MOZ_RELEASE_ASSERT(mDataEnd == segment.End());
240 MOZ_RELEASE_ASSERT(HasRoomFor(aBytes));
241 mData += aBytes;
242 mAbsoluteOffset += aBytes;
244 if (mData == mDataEnd && mSegment + 1 < aBuffers.mSegments.length()) {
245 mSegment++;
246 const Segment& nextSegment = aBuffers.mSegments[mSegment];
247 mData = nextSegment.Start();
248 mDataEnd = nextSegment.End();
249 MOZ_RELEASE_ASSERT(mData < mDataEnd);
253 // Advance the iterator by aBytes, possibly crossing segments. This function
254 // returns false if it runs out of buffers to advance through. Otherwise it
255 // returns true.
256 bool AdvanceAcrossSegments(const BufferList& aBuffers, size_t aBytes) {
257 // If we don't need to cross segments, we can directly use `Advance` to
258 // get to our destination.
259 if (MOZ_LIKELY(aBytes <= RemainingInSegment())) {
260 Advance(aBuffers, aBytes);
261 return true;
264 // Check if we have enough bytes to scan this far forward.
265 if (!HasBytesAvailable(aBuffers, aBytes)) {
266 return false;
269 // Compare the distance to our target offset from the end of the
270 // BufferList to the distance from the start of our next segment.
271 // Depending on which is closer, we'll advance either forwards or
272 // backwards.
273 size_t targetOffset = mAbsoluteOffset + aBytes;
274 size_t fromEnd = aBuffers.mSize - targetOffset;
275 if (aBytes - RemainingInSegment() < fromEnd) {
276 // Advance through the buffer list until we reach the desired absolute
277 // offset.
278 while (mAbsoluteOffset < targetOffset) {
279 Advance(aBuffers, std::min(targetOffset - mAbsoluteOffset,
280 RemainingInSegment()));
282 MOZ_ASSERT(mAbsoluteOffset == targetOffset);
283 return true;
286 // Scanning starting from the end of the BufferList. We advance
287 // backwards from the final segment until we find the segment to end in.
289 // If we end on a segment boundary, make sure to place the cursor at the
290 // beginning of the next segment.
291 mSegment = aBuffers.mSegments.length() - 1;
292 while (fromEnd > aBuffers.mSegments[mSegment].mSize) {
293 fromEnd -= aBuffers.mSegments[mSegment].mSize;
294 mSegment--;
296 mDataEnd = aBuffers.mSegments[mSegment].End();
297 mData = mDataEnd - fromEnd;
298 mAbsoluteOffset = targetOffset;
299 MOZ_ASSERT_IF(Done(), mSegment == aBuffers.mSegments.length() - 1);
300 MOZ_ASSERT_IF(Done(), mAbsoluteOffset == aBuffers.mSize);
301 return true;
304 // Returns true when the iterator reaches the end of the BufferList.
305 bool Done() const { return mData == mDataEnd; }
307 // The absolute offset of this iterator within the BufferList.
308 size_t AbsoluteOffset() const { return mAbsoluteOffset; }
310 private:
311 bool IsIn(const BufferList& aBuffers) const {
312 return mSegment < aBuffers.mSegments.length() &&
313 mData >= aBuffers.mSegments[mSegment].mData &&
314 mData < aBuffers.mSegments[mSegment].End();
318 // Special convenience method that returns Iter().Data().
319 char* Start() {
320 MOZ_RELEASE_ASSERT(!mSegments.empty());
321 return mSegments[0].mData;
323 const char* Start() const { return mSegments[0].mData; }
325 IterImpl Iter() const { return IterImpl(*this); }
327 // Copies aSize bytes from aData into the BufferList. The storage for these
328 // bytes may be split across multiple buffers. Size() is increased by aSize.
329 [[nodiscard]] inline bool WriteBytes(const char* aData, size_t aSize);
331 // Allocates a buffer of at most |aMaxBytes| bytes and, if successful, returns
332 // that buffer, and places its size in |aSize|. If unsuccessful, returns null
333 // and leaves |aSize| undefined.
334 inline char* AllocateBytes(size_t aMaxSize, size_t* aSize);
336 // Copies possibly non-contiguous byte range starting at aIter into
337 // aData. aIter is advanced by aSize bytes. Returns false if it runs out of
338 // data before aSize.
339 inline bool ReadBytes(IterImpl& aIter, char* aData, size_t aSize) const;
341 // Return a new BufferList that shares storage with this BufferList. The new
342 // BufferList is read-only. It allows iteration over aSize bytes starting at
343 // aIter. Borrow can fail, in which case *aSuccess will be false upon
344 // return. The borrowed BufferList can use a different AllocPolicy than the
345 // original one. However, it is not responsible for freeing buffers, so the
346 // AllocPolicy is only used for the buffer vector.
347 template <typename BorrowingAllocPolicy>
348 BufferList<BorrowingAllocPolicy> Borrow(
349 IterImpl& aIter, size_t aSize, bool* aSuccess,
350 BorrowingAllocPolicy aAP = BorrowingAllocPolicy()) const;
352 // Return a new BufferList and move storage from this BufferList to it. The
353 // new BufferList owns the buffers. Move can fail, in which case *aSuccess
354 // will be false upon return. The new BufferList can use a different
355 // AllocPolicy than the original one. The new OtherAllocPolicy is responsible
356 // for freeing buffers, so the OtherAllocPolicy must use freeing method
357 // compatible to the original one.
358 template <typename OtherAllocPolicy>
359 BufferList<OtherAllocPolicy> MoveFallible(
360 bool* aSuccess, OtherAllocPolicy aAP = OtherAllocPolicy());
362 // Return a new BufferList that adopts the byte range starting at Iter so that
363 // range [aIter, aIter + aSize) is transplanted to the returned BufferList.
364 // Contents of the buffer before aIter + aSize is left undefined.
365 // Extract can fail, in which case *aSuccess will be false upon return. The
366 // moved buffers are erased from the original BufferList. In case of extract
367 // fails, the original BufferList is intact. All other iterators except aIter
368 // are invalidated.
369 // This method requires aIter and aSize to be 8-byte aligned.
370 BufferList Extract(IterImpl& aIter, size_t aSize, bool* aSuccess);
372 // Return the number of bytes from 'start' to 'end', two iterators within
373 // this BufferList.
374 size_t RangeLength(const IterImpl& start, const IterImpl& end) const {
375 MOZ_ASSERT(start.IsIn(*this) && end.IsIn(*this));
376 return end.mAbsoluteOffset - start.mAbsoluteOffset;
379 // This takes ownership of the data
380 void* WriteBytesZeroCopy(char* aData, size_t aSize, size_t aCapacity) {
381 MOZ_ASSERT(aCapacity != 0);
382 MOZ_ASSERT(aSize <= aCapacity);
383 MOZ_ASSERT(mOwning);
385 if (!mSegments.append(Segment(aData, aSize, aCapacity))) {
386 this->free_(aData, aCapacity);
387 return nullptr;
389 mSize += aSize;
390 return aData;
393 // Truncate this BufferList at the given iterator location, discarding all
394 // data after this point. After this call, all other iterators will be
395 // invalidated, and the passed-in iterator will be "Done".
397 // Returns the number of bytes discarded by this truncation.
398 size_t Truncate(IterImpl& aIter);
400 private:
401 explicit BufferList(AllocPolicy aAP)
402 : AllocPolicy(aAP), mOwning(false), mSize(0), mStandardCapacity(0) {}
404 char* AllocateSegment(size_t aSize, size_t aCapacity) {
405 MOZ_RELEASE_ASSERT(mOwning);
406 MOZ_ASSERT(aCapacity != 0);
407 MOZ_ASSERT(aSize <= aCapacity);
409 char* data = this->template pod_malloc<char>(aCapacity);
410 if (!data) {
411 return nullptr;
413 if (!mSegments.append(Segment(data, aSize, aCapacity))) {
414 this->free_(data, aCapacity);
415 return nullptr;
417 mSize += aSize;
418 return data;
421 void AssertConsistentSize() const {
422 #ifdef DEBUG
423 size_t realSize = 0;
424 for (const auto& segment : mSegments) {
425 realSize += segment.mSize;
427 MOZ_ASSERT(realSize == mSize, "cached size value is inconsistent!");
428 #endif
431 bool mOwning;
432 Vector<Segment, 1, AllocPolicy> mSegments;
433 size_t mSize;
434 size_t mStandardCapacity;
437 template <typename AllocPolicy>
438 [[nodiscard]] bool BufferList<AllocPolicy>::WriteBytes(const char* aData,
439 size_t aSize) {
440 MOZ_RELEASE_ASSERT(mOwning);
441 MOZ_RELEASE_ASSERT(mStandardCapacity);
443 size_t copied = 0;
444 while (copied < aSize) {
445 size_t toCopy;
446 char* data = AllocateBytes(aSize - copied, &toCopy);
447 if (!data) {
448 return false;
450 memcpy(data, aData + copied, toCopy);
451 copied += toCopy;
454 return true;
457 template <typename AllocPolicy>
458 char* BufferList<AllocPolicy>::AllocateBytes(size_t aMaxSize, size_t* aSize) {
459 MOZ_RELEASE_ASSERT(mOwning);
460 MOZ_RELEASE_ASSERT(mStandardCapacity);
462 if (!mSegments.empty()) {
463 Segment& lastSegment = mSegments.back();
465 size_t capacity = lastSegment.mCapacity - lastSegment.mSize;
466 if (capacity) {
467 size_t size = std::min(aMaxSize, capacity);
468 char* data = lastSegment.mData + lastSegment.mSize;
470 lastSegment.mSize += size;
471 mSize += size;
473 *aSize = size;
474 return data;
478 size_t size = std::min(aMaxSize, mStandardCapacity);
479 char* data = AllocateSegment(size, mStandardCapacity);
480 if (data) {
481 *aSize = size;
483 return data;
486 template <typename AllocPolicy>
487 bool BufferList<AllocPolicy>::ReadBytes(IterImpl& aIter, char* aData,
488 size_t aSize) const {
489 size_t copied = 0;
490 size_t remaining = aSize;
491 while (remaining) {
492 size_t toCopy = std::min(aIter.RemainingInSegment(), remaining);
493 if (!toCopy) {
494 // We've run out of data in the last segment.
495 return false;
497 memcpy(aData + copied, aIter.Data(), toCopy);
498 copied += toCopy;
499 remaining -= toCopy;
501 aIter.Advance(*this, toCopy);
504 return true;
507 template <typename AllocPolicy>
508 template <typename BorrowingAllocPolicy>
509 BufferList<BorrowingAllocPolicy> BufferList<AllocPolicy>::Borrow(
510 IterImpl& aIter, size_t aSize, bool* aSuccess,
511 BorrowingAllocPolicy aAP) const {
512 BufferList<BorrowingAllocPolicy> result(aAP);
514 size_t size = aSize;
515 while (size) {
516 size_t toAdvance = std::min(size, aIter.RemainingInSegment());
518 if (!toAdvance || !result.mSegments.append(
519 typename BufferList<BorrowingAllocPolicy>::Segment(
520 aIter.mData, toAdvance, toAdvance))) {
521 *aSuccess = false;
522 return result;
524 aIter.Advance(*this, toAdvance);
525 size -= toAdvance;
528 result.mSize = aSize;
529 *aSuccess = true;
530 return result;
533 template <typename AllocPolicy>
534 template <typename OtherAllocPolicy>
535 BufferList<OtherAllocPolicy> BufferList<AllocPolicy>::MoveFallible(
536 bool* aSuccess, OtherAllocPolicy aAP) {
537 BufferList<OtherAllocPolicy> result(0, 0, mStandardCapacity, aAP);
539 IterImpl iter = Iter();
540 while (!iter.Done()) {
541 size_t toAdvance = iter.RemainingInSegment();
543 if (!toAdvance ||
544 !result.mSegments.append(typename BufferList<OtherAllocPolicy>::Segment(
545 iter.mData, toAdvance, toAdvance))) {
546 *aSuccess = false;
547 result.mSegments.clear();
548 return result;
550 iter.Advance(*this, toAdvance);
553 result.mSize = mSize;
554 mSegments.clear();
555 mSize = 0;
556 *aSuccess = true;
557 return result;
560 template <typename AllocPolicy>
561 BufferList<AllocPolicy> BufferList<AllocPolicy>::Extract(IterImpl& aIter,
562 size_t aSize,
563 bool* aSuccess) {
564 MOZ_RELEASE_ASSERT(aSize);
565 MOZ_RELEASE_ASSERT(mOwning);
566 MOZ_ASSERT(aSize % kSegmentAlignment == 0);
567 MOZ_ASSERT(intptr_t(aIter.mData) % kSegmentAlignment == 0);
569 auto failure = [this, aSuccess]() {
570 *aSuccess = false;
571 return BufferList(0, 0, mStandardCapacity);
574 // Number of segments we'll need to copy data from to satisfy the request.
575 size_t segmentsNeeded = 0;
576 // If this is None then the last segment is a full segment, otherwise we need
577 // to copy this many bytes.
578 Maybe<size_t> lastSegmentSize;
580 // Copy of the iterator to walk the BufferList and see how many segments we
581 // need to copy.
582 IterImpl iter = aIter;
583 size_t remaining = aSize;
584 while (!iter.Done() && remaining &&
585 remaining >= iter.RemainingInSegment()) {
586 remaining -= iter.RemainingInSegment();
587 iter.Advance(*this, iter.RemainingInSegment());
588 segmentsNeeded++;
591 if (remaining) {
592 if (iter.Done()) {
593 // We reached the end of the BufferList and there wasn't enough data to
594 // satisfy the request.
595 return failure();
597 lastSegmentSize.emplace(remaining);
598 // The last block also counts as a segment. This makes the conditionals
599 // on segmentsNeeded work in the rest of the function.
600 segmentsNeeded++;
604 BufferList result(0, 0, mStandardCapacity);
605 if (!result.mSegments.reserve(segmentsNeeded + lastSegmentSize.isSome())) {
606 return failure();
609 // Copy the first segment, it's special because we can't just steal the
610 // entire Segment struct from this->mSegments.
612 // As we leave the data before the new `aIter` position as "unspecified", we
613 // leave this data in the existing buffer, despite copying it into the new
614 // buffer.
615 size_t firstSegmentSize = std::min(aSize, aIter.RemainingInSegment());
616 if (!result.WriteBytes(aIter.Data(), firstSegmentSize)) {
617 return failure();
619 aIter.Advance(*this, firstSegmentSize);
620 segmentsNeeded--;
622 // The entirety of the request wasn't in the first segment, now copy the
623 // rest.
624 if (segmentsNeeded) {
625 size_t finalSegmentCapacity = 0;
626 char* finalSegment = nullptr;
627 // Pre-allocate the final segment so that if this fails, we return before
628 // we delete the elements from |this->mSegments|.
629 if (lastSegmentSize.isSome()) {
630 finalSegmentCapacity = std::max(mStandardCapacity, *lastSegmentSize);
631 finalSegment = this->template pod_malloc<char>(finalSegmentCapacity);
632 if (!finalSegment) {
633 return failure();
637 size_t removedBytes = 0;
638 size_t copyStart = aIter.mSegment;
639 // Copy segments from this over to the result and remove them from our
640 // storage. Not needed if the only segment we need to copy is the last
641 // partial one.
642 size_t segmentsToCopy = segmentsNeeded - lastSegmentSize.isSome();
643 for (size_t i = 0; i < segmentsToCopy; ++i) {
644 result.mSegments.infallibleAppend(Segment(
645 mSegments[aIter.mSegment].mData, mSegments[aIter.mSegment].mSize,
646 mSegments[aIter.mSegment].mCapacity));
647 removedBytes += mSegments[aIter.mSegment].mSize;
648 aIter.Advance(*this, aIter.RemainingInSegment());
650 // Due to the way IterImpl works, there are two cases here: (1) if we've
651 // consumed the entirety of the BufferList, then the iterator is pointed at
652 // the end of the final segment, (2) otherwise it is pointed at the start
653 // of the next segment. We want to verify that we really consumed all
654 // |segmentsToCopy| segments.
655 MOZ_RELEASE_ASSERT(
656 (aIter.mSegment == copyStart + segmentsToCopy) ||
657 (aIter.Done() && aIter.mSegment == copyStart + segmentsToCopy - 1));
658 mSegments.erase(mSegments.begin() + copyStart,
659 mSegments.begin() + copyStart + segmentsToCopy);
661 // Reset the iter's position for what we just deleted.
662 aIter.mSegment -= segmentsToCopy;
663 aIter.mAbsoluteOffset -= removedBytes;
664 mSize -= removedBytes;
666 // If our iterator is already at the end, we just removed the very last
667 // segment of our buffer list and need to shift the iterator back to point
668 // at the end of the previous segment.
669 if (aIter.Done()) {
670 MOZ_ASSERT(lastSegmentSize.isNothing());
671 if (mSegments.empty()) {
672 MOZ_ASSERT(aIter.mSegment == 0);
673 aIter.mData = aIter.mDataEnd = nullptr;
674 } else {
675 MOZ_ASSERT(aIter.mSegment == mSegments.length() - 1);
676 aIter.mData = aIter.mDataEnd = mSegments.back().End();
680 if (lastSegmentSize.isSome()) {
681 // We called reserve() on result.mSegments so infallibleAppend is safe.
682 result.mSegments.infallibleAppend(
683 Segment(finalSegment, 0, finalSegmentCapacity));
684 bool r = result.WriteBytes(aIter.Data(), *lastSegmentSize);
685 MOZ_RELEASE_ASSERT(r);
686 aIter.Advance(*this, *lastSegmentSize);
690 result.mSize = aSize;
692 AssertConsistentSize();
693 result.AssertConsistentSize();
695 // Ensure that the iterator is still valid when Extract returns.
696 #ifdef DEBUG
697 if (!mSegments.empty()) {
698 auto& segment = mSegments[aIter.mSegment];
699 MOZ_ASSERT(segment.Start() <= aIter.mData);
700 MOZ_ASSERT(aIter.mDataEnd == segment.End());
702 #endif
704 *aSuccess = true;
705 return result;
708 template <typename AllocPolicy>
709 size_t BufferList<AllocPolicy>::Truncate(IterImpl& aIter) {
710 MOZ_ASSERT(aIter.IsIn(*this) || aIter.Done());
711 if (aIter.Done()) {
712 return 0;
715 size_t prevSize = mSize;
717 // Remove any segments after the iterator's current segment.
718 while (mSegments.length() > aIter.mSegment + 1) {
719 Segment& toFree = mSegments.back();
720 mSize -= toFree.mSize;
721 if (mOwning) {
722 this->free_(toFree.mData, toFree.mCapacity);
724 mSegments.popBack();
727 // The last segment is now aIter's current segment. Truncate or remove it.
728 Segment& seg = mSegments.back();
729 MOZ_ASSERT(aIter.mDataEnd == seg.End());
730 mSize -= aIter.RemainingInSegment();
731 seg.mSize -= aIter.RemainingInSegment();
732 if (!seg.mSize) {
733 if (mOwning) {
734 this->free_(seg.mData, seg.mCapacity);
736 mSegments.popBack();
739 // Correct `aIter` to point to the new end of the BufferList.
740 if (mSegments.empty()) {
741 MOZ_ASSERT(mSize == 0);
742 aIter.mSegment = 0;
743 aIter.mData = aIter.mDataEnd = nullptr;
744 } else {
745 aIter.mSegment = mSegments.length() - 1;
746 aIter.mData = aIter.mDataEnd = mSegments.back().End();
748 MOZ_ASSERT(aIter.Done());
750 AssertConsistentSize();
751 return prevSize - mSize;
754 } // namespace mozilla
756 #endif /* mozilla_BufferList_h */