1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef mozilla_BufferList_h
8 #define mozilla_BufferList_h
13 #include <type_traits>
16 #include "mozilla/AllocPolicy.h"
17 #include "mozilla/Maybe.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/ScopeExit.h"
20 #include "mozilla/Types.h"
21 #include "mozilla/Vector.h"
23 // BufferList represents a sequence of buffers of data. A BufferList can choose
24 // to own its buffers or not. The class handles writing to the buffers,
25 // iterating over them, and reading data out. Unlike SegmentedVector, the
26 // buffers may be of unequal size. Like SegmentedVector, BufferList is a nice
27 // way to avoid large contiguous allocations (which can trigger OOMs).
29 class InfallibleAllocPolicy
;
33 template <typename AllocPolicy
>
34 class BufferList
: private AllocPolicy
{
35 // Each buffer in a BufferList has a size and a capacity. The first mSize
36 // bytes are initialized and the remaining |mCapacity - mSize| bytes are free.
42 Segment(char* aData
, size_t aSize
, size_t aCapacity
)
43 : mData(aData
), mSize(aSize
), mCapacity(aCapacity
) {}
45 Segment(const Segment
&) = delete;
46 Segment
& operator=(const Segment
&) = delete;
48 Segment(Segment
&&) = default;
49 Segment
& operator=(Segment
&&) = default;
51 char* Start() const { return mData
; }
52 char* End() const { return mData
+ mSize
; }
55 template <typename OtherAllocPolicy
>
56 friend class BufferList
;
59 // For the convenience of callers, all segments are required to be a multiple
60 // of 8 bytes in capacity. Also, every buffer except the last one is required
61 // to be full (i.e., size == capacity). Therefore, a byte at offset N within
62 // the BufferList and stored in memory at an address A will satisfy
63 // (N % Align == A % Align) if Align == 2, 4, or 8.
64 static const size_t kSegmentAlignment
= 8;
66 // Allocate a BufferList. The BufferList will free all its buffers when it is
67 // destroyed. If an infallible allocator is used, an initial buffer of size
68 // aInitialSize and capacity aInitialCapacity is allocated automatically. This
69 // data will be contiguous and can be accessed via |Start()|. If a fallible
70 // alloc policy is used, aInitialSize must be 0, and the fallible |Init()|
71 // method may be called instead. Subsequent buffers will be allocated with
72 // capacity aStandardCapacity.
73 BufferList(size_t aInitialSize
, size_t aInitialCapacity
,
74 size_t aStandardCapacity
, AllocPolicy aAP
= AllocPolicy())
79 mStandardCapacity(aStandardCapacity
) {
80 MOZ_ASSERT(aInitialCapacity
% kSegmentAlignment
== 0);
81 MOZ_ASSERT(aStandardCapacity
% kSegmentAlignment
== 0);
83 if (aInitialCapacity
) {
84 MOZ_ASSERT((aInitialSize
== 0 ||
85 std::is_same_v
<AllocPolicy
, InfallibleAllocPolicy
>),
86 "BufferList may only be constructed with an initial size when "
87 "using an infallible alloc policy");
89 AllocateSegment(aInitialSize
, aInitialCapacity
);
93 BufferList(const BufferList
& aOther
) = delete;
95 BufferList(BufferList
&& aOther
)
96 : mOwning(aOther
.mOwning
),
97 mSegments(std::move(aOther
.mSegments
)),
99 mStandardCapacity(aOther
.mStandardCapacity
) {
100 aOther
.mSegments
.clear();
104 BufferList
& operator=(const BufferList
& aOther
) = delete;
106 BufferList
& operator=(BufferList
&& aOther
) {
109 mOwning
= aOther
.mOwning
;
110 mSegments
= std::move(aOther
.mSegments
);
111 mSize
= aOther
.mSize
;
112 aOther
.mSegments
.clear();
117 ~BufferList() { Clear(); }
119 // Initializes the BufferList with a segment of the given size and capacity.
120 // May only be called once, before any segments have been allocated.
121 bool Init(size_t aInitialSize
, size_t aInitialCapacity
) {
122 MOZ_ASSERT(mSegments
.empty());
123 MOZ_ASSERT(aInitialCapacity
!= 0);
124 MOZ_ASSERT(aInitialCapacity
% kSegmentAlignment
== 0);
126 return AllocateSegment(aInitialSize
, aInitialCapacity
);
129 bool CopyFrom(const BufferList
& aOther
) {
134 // We don't make an exact copy of aOther. Instead, create a single segment
135 // with enough space to hold all data in aOther.
136 if (!Init(aOther
.mSize
, (aOther
.mSize
+ kSegmentAlignment
- 1) &
137 ~(kSegmentAlignment
- 1))) {
142 for (const Segment
& segment
: aOther
.mSegments
) {
143 memcpy(Start() + offset
, segment
.mData
, segment
.mSize
);
144 offset
+= segment
.mSize
;
146 MOZ_ASSERT(offset
== mSize
);
151 // Returns the sum of the sizes of all the buffers.
152 size_t Size() const { return mSize
; }
154 size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf
) {
155 size_t size
= mSegments
.sizeOfExcludingThis(aMallocSizeOf
);
156 for (Segment
& segment
: mSegments
) {
157 size
+= aMallocSizeOf(segment
.Start());
164 for (Segment
& segment
: mSegments
) {
165 this->free_(segment
.mData
, segment
.mCapacity
);
173 // Iterates over bytes in the segments. You can advance it by as many bytes as
177 // (0) mSegment <= bufferList.mSegments.length()
178 // (1) mData <= mDataEnd
179 // (2) If mSegment is not the last segment, mData < mDataEnd
184 friend class BufferList
;
187 explicit IterImpl(const BufferList
& aBuffers
)
188 : mSegment(0), mData(nullptr), mDataEnd(nullptr) {
189 if (!aBuffers
.mSegments
.empty()) {
190 mData
= aBuffers
.mSegments
[0].Start();
191 mDataEnd
= aBuffers
.mSegments
[0].End();
195 // Returns a pointer to the raw data. It is valid to access up to
196 // RemainingInSegment bytes of this buffer.
198 MOZ_RELEASE_ASSERT(!Done());
202 // Returns true if the memory in the range [Data(), Data() + aBytes) is all
203 // part of one contiguous buffer.
204 bool HasRoomFor(size_t aBytes
) const {
205 MOZ_RELEASE_ASSERT(mData
<= mDataEnd
);
206 return size_t(mDataEnd
- mData
) >= aBytes
;
209 // Returns the maximum value aBytes for which HasRoomFor(aBytes) will be
211 size_t RemainingInSegment() const {
212 MOZ_RELEASE_ASSERT(mData
<= mDataEnd
);
213 return mDataEnd
- mData
;
216 bool HasBytesAvailable(const BufferList
& aBuffers
, uint32_t aBytes
) const {
217 if (RemainingInSegment() >= aBytes
) {
220 aBytes
-= RemainingInSegment();
221 for (size_t i
= mSegment
+ 1; i
< aBuffers
.mSegments
.length(); i
++) {
222 if (aBuffers
.mSegments
[i
].mSize
>= aBytes
) {
225 aBytes
-= aBuffers
.mSegments
[i
].mSize
;
231 // Advances the iterator by aBytes bytes. aBytes must be less than
232 // RemainingInSegment(). If advancing by aBytes takes the iterator to the
233 // end of a buffer, it will be moved to the beginning of the next buffer
234 // unless it is the last buffer.
235 void Advance(const BufferList
& aBuffers
, size_t aBytes
) {
236 const Segment
& segment
= aBuffers
.mSegments
[mSegment
];
237 MOZ_RELEASE_ASSERT(segment
.Start() <= mData
);
238 MOZ_RELEASE_ASSERT(mData
<= mDataEnd
);
239 MOZ_RELEASE_ASSERT(mDataEnd
== segment
.End());
241 MOZ_RELEASE_ASSERT(HasRoomFor(aBytes
));
244 if (mData
== mDataEnd
&& mSegment
+ 1 < aBuffers
.mSegments
.length()) {
246 const Segment
& nextSegment
= aBuffers
.mSegments
[mSegment
];
247 mData
= nextSegment
.Start();
248 mDataEnd
= nextSegment
.End();
249 MOZ_RELEASE_ASSERT(mData
< mDataEnd
);
253 // Advance the iterator by aBytes, possibly crossing segments. This function
254 // returns false if it runs out of buffers to advance through. Otherwise it
256 bool AdvanceAcrossSegments(const BufferList
& aBuffers
, size_t aBytes
) {
257 size_t bytes
= aBytes
;
259 size_t toAdvance
= std::min(bytes
, RemainingInSegment());
263 Advance(aBuffers
, toAdvance
);
269 // Returns true when the iterator reaches the end of the BufferList.
270 bool Done() const { return mData
== mDataEnd
; }
273 // Count the bytes we would need to advance in order to reach aTarget.
274 size_t BytesUntil(const BufferList
& aBuffers
,
275 const IterImpl
& aTarget
) const {
278 MOZ_ASSERT(aTarget
.IsIn(aBuffers
));
281 for (uintptr_t segment
= mSegment
; segment
< aTarget
.mSegment
;
283 offset
+= aBuffers
.mSegments
[segment
].End() - data
;
284 data
= aBuffers
.mSegments
[segment
].mData
;
287 MOZ_RELEASE_ASSERT(IsIn(aBuffers
));
288 MOZ_RELEASE_ASSERT(aTarget
.mData
>= data
);
290 offset
+= aTarget
.mData
- data
;
294 bool IsIn(const BufferList
& aBuffers
) const {
295 return mSegment
< aBuffers
.mSegments
.length() &&
296 mData
>= aBuffers
.mSegments
[mSegment
].mData
&&
297 mData
< aBuffers
.mSegments
[mSegment
].End();
301 // Special convenience method that returns Iter().Data().
303 MOZ_RELEASE_ASSERT(!mSegments
.empty());
304 return mSegments
[0].mData
;
306 const char* Start() const { return mSegments
[0].mData
; }
308 IterImpl
Iter() const { return IterImpl(*this); }
310 // Copies aSize bytes from aData into the BufferList. The storage for these
311 // bytes may be split across multiple buffers. Size() is increased by aSize.
312 inline MOZ_MUST_USE
bool WriteBytes(const char* aData
, size_t aSize
);
314 // Allocates a buffer of at most |aMaxBytes| bytes and, if successful, returns
315 // that buffer, and places its size in |aSize|. If unsuccessful, returns null
316 // and leaves |aSize| undefined.
317 inline char* AllocateBytes(size_t aMaxSize
, size_t* aSize
);
319 // Copies possibly non-contiguous byte range starting at aIter into
320 // aData. aIter is advanced by aSize bytes. Returns false if it runs out of
321 // data before aSize.
322 inline bool ReadBytes(IterImpl
& aIter
, char* aData
, size_t aSize
) const;
324 // Return a new BufferList that shares storage with this BufferList. The new
325 // BufferList is read-only. It allows iteration over aSize bytes starting at
326 // aIter. Borrow can fail, in which case *aSuccess will be false upon
327 // return. The borrowed BufferList can use a different AllocPolicy than the
328 // original one. However, it is not responsible for freeing buffers, so the
329 // AllocPolicy is only used for the buffer vector.
330 template <typename BorrowingAllocPolicy
>
331 BufferList
<BorrowingAllocPolicy
> Borrow(
332 IterImpl
& aIter
, size_t aSize
, bool* aSuccess
,
333 BorrowingAllocPolicy aAP
= BorrowingAllocPolicy()) const;
335 // Return a new BufferList and move storage from this BufferList to it. The
336 // new BufferList owns the buffers. Move can fail, in which case *aSuccess
337 // will be false upon return. The new BufferList can use a different
338 // AllocPolicy than the original one. The new OtherAllocPolicy is responsible
339 // for freeing buffers, so the OtherAllocPolicy must use freeing method
340 // compatible to the original one.
341 template <typename OtherAllocPolicy
>
342 BufferList
<OtherAllocPolicy
> MoveFallible(
343 bool* aSuccess
, OtherAllocPolicy aAP
= OtherAllocPolicy());
345 // Return a new BufferList that adopts the byte range starting at Iter so that
346 // range [aIter, aIter + aSize) is transplanted to the returned BufferList.
347 // Contents of the buffer before aIter + aSize is left undefined.
348 // Extract can fail, in which case *aSuccess will be false upon return. The
349 // moved buffers are erased from the original BufferList. In case of extract
350 // fails, the original BufferList is intact. All other iterators except aIter
352 // This method requires aIter and aSize to be 8-byte aligned.
353 BufferList
Extract(IterImpl
& aIter
, size_t aSize
, bool* aSuccess
);
355 // Return the number of bytes from 'start' to 'end', two iterators within
357 size_t RangeLength(const IterImpl
& start
, const IterImpl
& end
) const {
358 MOZ_ASSERT(start
.IsIn(*this) && end
.IsIn(*this));
359 return start
.BytesUntil(*this, end
);
362 // This takes ownership of the data
363 void* WriteBytesZeroCopy(char* aData
, size_t aSize
, size_t aCapacity
) {
364 MOZ_ASSERT(aCapacity
!= 0);
365 MOZ_ASSERT(aSize
<= aCapacity
);
368 if (!mSegments
.append(Segment(aData
, aSize
, aCapacity
))) {
369 this->free_(aData
, aCapacity
);
377 explicit BufferList(AllocPolicy aAP
)
378 : AllocPolicy(aAP
), mOwning(false), mSize(0), mStandardCapacity(0) {}
380 char* AllocateSegment(size_t aSize
, size_t aCapacity
) {
381 MOZ_RELEASE_ASSERT(mOwning
);
382 MOZ_ASSERT(aCapacity
!= 0);
383 MOZ_ASSERT(aSize
<= aCapacity
);
385 char* data
= this->template pod_malloc
<char>(aCapacity
);
389 if (!mSegments
.append(Segment(data
, aSize
, aCapacity
))) {
390 this->free_(data
, aCapacity
);
398 Vector
<Segment
, 1, AllocPolicy
> mSegments
;
400 size_t mStandardCapacity
;
403 template <typename AllocPolicy
>
404 MOZ_MUST_USE
bool BufferList
<AllocPolicy
>::WriteBytes(const char* aData
,
406 MOZ_RELEASE_ASSERT(mOwning
);
407 MOZ_RELEASE_ASSERT(mStandardCapacity
);
410 while (copied
< aSize
) {
412 char* data
= AllocateBytes(aSize
- copied
, &toCopy
);
416 memcpy(data
, aData
+ copied
, toCopy
);
423 template <typename AllocPolicy
>
424 char* BufferList
<AllocPolicy
>::AllocateBytes(size_t aMaxSize
, size_t* aSize
) {
425 MOZ_RELEASE_ASSERT(mOwning
);
426 MOZ_RELEASE_ASSERT(mStandardCapacity
);
428 if (!mSegments
.empty()) {
429 Segment
& lastSegment
= mSegments
.back();
431 size_t capacity
= lastSegment
.mCapacity
- lastSegment
.mSize
;
433 size_t size
= std::min(aMaxSize
, capacity
);
434 char* data
= lastSegment
.mData
+ lastSegment
.mSize
;
436 lastSegment
.mSize
+= size
;
444 size_t size
= std::min(aMaxSize
, mStandardCapacity
);
445 char* data
= AllocateSegment(size
, mStandardCapacity
);
452 template <typename AllocPolicy
>
453 bool BufferList
<AllocPolicy
>::ReadBytes(IterImpl
& aIter
, char* aData
,
454 size_t aSize
) const {
456 size_t remaining
= aSize
;
458 size_t toCopy
= std::min(aIter
.RemainingInSegment(), remaining
);
460 // We've run out of data in the last segment.
463 memcpy(aData
+ copied
, aIter
.Data(), toCopy
);
467 aIter
.Advance(*this, toCopy
);
473 template <typename AllocPolicy
>
474 template <typename BorrowingAllocPolicy
>
475 BufferList
<BorrowingAllocPolicy
> BufferList
<AllocPolicy
>::Borrow(
476 IterImpl
& aIter
, size_t aSize
, bool* aSuccess
,
477 BorrowingAllocPolicy aAP
) const {
478 BufferList
<BorrowingAllocPolicy
> result(aAP
);
482 size_t toAdvance
= std::min(size
, aIter
.RemainingInSegment());
484 if (!toAdvance
|| !result
.mSegments
.append(
485 typename BufferList
<BorrowingAllocPolicy
>::Segment(
486 aIter
.mData
, toAdvance
, toAdvance
))) {
490 aIter
.Advance(*this, toAdvance
);
494 result
.mSize
= aSize
;
499 template <typename AllocPolicy
>
500 template <typename OtherAllocPolicy
>
501 BufferList
<OtherAllocPolicy
> BufferList
<AllocPolicy
>::MoveFallible(
502 bool* aSuccess
, OtherAllocPolicy aAP
) {
503 BufferList
<OtherAllocPolicy
> result(0, 0, mStandardCapacity
, aAP
);
505 IterImpl iter
= Iter();
506 while (!iter
.Done()) {
507 size_t toAdvance
= iter
.RemainingInSegment();
510 !result
.mSegments
.append(typename BufferList
<OtherAllocPolicy
>::Segment(
511 iter
.mData
, toAdvance
, toAdvance
))) {
513 result
.mSegments
.clear();
516 iter
.Advance(*this, toAdvance
);
519 result
.mSize
= mSize
;
526 template <typename AllocPolicy
>
527 BufferList
<AllocPolicy
> BufferList
<AllocPolicy
>::Extract(IterImpl
& aIter
,
530 MOZ_RELEASE_ASSERT(aSize
);
531 MOZ_RELEASE_ASSERT(mOwning
);
532 MOZ_ASSERT(aSize
% kSegmentAlignment
== 0);
533 MOZ_ASSERT(intptr_t(aIter
.mData
) % kSegmentAlignment
== 0);
535 auto failure
= [this, aSuccess
]() {
537 return BufferList(0, 0, mStandardCapacity
);
540 // Number of segments we'll need to copy data from to satisfy the request.
541 size_t segmentsNeeded
= 0;
542 // If this is None then the last segment is a full segment, otherwise we need
543 // to copy this many bytes.
544 Maybe
<size_t> lastSegmentSize
;
546 // Copy of the iterator to walk the BufferList and see how many segments we
548 IterImpl iter
= aIter
;
549 size_t remaining
= aSize
;
550 while (!iter
.Done() && remaining
&&
551 remaining
>= iter
.RemainingInSegment()) {
552 remaining
-= iter
.RemainingInSegment();
553 iter
.Advance(*this, iter
.RemainingInSegment());
559 // We reached the end of the BufferList and there wasn't enough data to
560 // satisfy the request.
563 lastSegmentSize
.emplace(remaining
);
564 // The last block also counts as a segment. This makes the conditionals
565 // on segmentsNeeded work in the rest of the function.
570 BufferList
result(0, 0, mStandardCapacity
);
571 if (!result
.mSegments
.reserve(segmentsNeeded
+ lastSegmentSize
.isSome())) {
575 // Copy the first segment, it's special because we can't just steal the
576 // entire Segment struct from this->mSegments.
577 size_t firstSegmentSize
= std::min(aSize
, aIter
.RemainingInSegment());
578 if (!result
.WriteBytes(aIter
.Data(), firstSegmentSize
)) {
581 aIter
.Advance(*this, firstSegmentSize
);
584 // The entirety of the request wasn't in the first segment, now copy the
586 if (segmentsNeeded
) {
587 char* finalSegment
= nullptr;
588 // Pre-allocate the final segment so that if this fails, we return before
589 // we delete the elements from |this->mSegments|.
590 if (lastSegmentSize
.isSome()) {
591 MOZ_RELEASE_ASSERT(mStandardCapacity
>= *lastSegmentSize
);
592 finalSegment
= this->template pod_malloc
<char>(mStandardCapacity
);
598 size_t copyStart
= aIter
.mSegment
;
599 // Copy segments from this over to the result and remove them from our
600 // storage. Not needed if the only segment we need to copy is the last
602 size_t segmentsToCopy
= segmentsNeeded
- lastSegmentSize
.isSome();
603 for (size_t i
= 0; i
< segmentsToCopy
; ++i
) {
604 result
.mSegments
.infallibleAppend(Segment(
605 mSegments
[aIter
.mSegment
].mData
, mSegments
[aIter
.mSegment
].mSize
,
606 mSegments
[aIter
.mSegment
].mCapacity
));
607 aIter
.Advance(*this, aIter
.RemainingInSegment());
609 // Due to the way IterImpl works, there are two cases here: (1) if we've
610 // consumed the entirety of the BufferList, then the iterator is pointed at
611 // the end of the final segment, (2) otherwise it is pointed at the start
612 // of the next segment. We want to verify that we really consumed all
613 // |segmentsToCopy| segments.
615 (aIter
.mSegment
== copyStart
+ segmentsToCopy
) ||
616 (aIter
.Done() && aIter
.mSegment
== copyStart
+ segmentsToCopy
- 1));
617 mSegments
.erase(mSegments
.begin() + copyStart
,
618 mSegments
.begin() + copyStart
+ segmentsToCopy
);
620 // Reset the iter's position for what we just deleted.
621 aIter
.mSegment
-= segmentsToCopy
;
623 if (lastSegmentSize
.isSome()) {
624 // We called reserve() on result.mSegments so infallibleAppend is safe.
625 result
.mSegments
.infallibleAppend(
626 Segment(finalSegment
, 0, mStandardCapacity
));
627 bool r
= result
.WriteBytes(aIter
.Data(), *lastSegmentSize
);
628 MOZ_RELEASE_ASSERT(r
);
629 aIter
.Advance(*this, *lastSegmentSize
);
634 result
.mSize
= aSize
;
640 } // namespace mozilla
642 #endif /* mozilla_BufferList_h */