1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef ProfileBufferChunk_h
8 #define ProfileBufferChunk_h
10 #include "mozilla/MemoryReporting.h"
11 #include "mozilla/ProfileBufferIndex.h"
12 #include "mozilla/Span.h"
13 #include "mozilla/TimeStamp.h"
14 #include "mozilla/UniquePtr.h"
16 #if defined(MOZ_MEMORY)
17 # include "mozmemory.h"
22 #include <type_traits>
30 // Represents a single chunk of memory, with a link to the next chunk (or null).
32 // A chunk is made of an internal header (which contains a public part) followed
33 // by user-accessible bytes.
35 // +---------------+---------+----------------------------------------------+
36 // | public Header | private | memory containing user blocks |
37 // +---------------+---------+----------------------------------------------+
38 // <---------------BufferBytes()------------------>
39 // <------------------------------ChunkBytes()------------------------------>
41 // The chunk can reserve "blocks", but doesn't know the internal contents of
42 // each block, it only knows where the first one starts, and where the last one
43 // ends (which is where the next one will begin, if not already out of range).
44 // It is up to the user to add structure to each block so that they can be
45 // distinguished when later read.
47 // +---------------+---------+----------------------------------------------+
48 // | public Header | private | [1st block]...[last full block] |
49 // +---------------+---------+----------------------------------------------+
50 // ChunkHeader().mOffsetFirstBlock ^ ^
51 // ChunkHeader().mOffsetPastLastBlock --'
53 // It is possible to attempt to reserve more than the remaining space, in which
54 // case only what is available is returned. The caller is responsible for using
55 // another chunk, reserving a block "tail" in it, and using both parts to
56 // constitute a full block. (This initial tail may be empty in some chunks.)
58 // +---------------+---------+----------------------------------------------+
59 // | public Header | private | tail][1st block]...[last full block][head... |
60 // +---------------+---------+----------------------------------------------+
61 // ChunkHeader().mOffsetFirstBlock ^ ^
62 // ChunkHeader().mOffsetPastLastBlock --'
64 // Each Chunk has an internal state (checked in DEBUG builds) that directs how
65 // to use it during creation, initialization, use, end of life, recycling, and
66 // destruction. See `State` below for details.
68 // - `ReserveInitialBlockAsTail()` must be called before the first `Reserve()`
69 // after construction or recycling, even with a size of 0 (no actual tail),
70 // - `MarkDone()` and `MarkRecycled()` must be called as appropriate.
71 class ProfileBufferChunk
{
74 using Length
= uint32_t;
76 using SpanOfBytes
= Span
<Byte
>;
78 // Hint about the size of the metadata (public and private headers).
79 // `Create()` below takes the minimum *buffer* size, so the minimum total
80 // Chunk size is at least `SizeofChunkMetadata() + aMinBufferBytes`.
81 [[nodiscard
]] static constexpr Length
SizeofChunkMetadata() {
82 return static_cast<Length
>(sizeof(InternalHeader
));
85 // Allocate space for a chunk with a given minimum size, and construct it.
86 // The actual size may be higher, to match the actual space taken in the
88 [[nodiscard
]] static UniquePtr
<ProfileBufferChunk
> Create(
89 Length aMinBufferBytes
) {
90 // We need at least one byte, to cover the always-present `mBuffer` byte.
91 aMinBufferBytes
= std::max(aMinBufferBytes
, Length(1));
92 // Trivial struct with the same alignment as `ProfileBufferChunk`, and size
93 // equal to that alignment, because typically the sizeof of an object is
94 // a multiple of its alignment.
95 struct alignas(alignof(InternalHeader
)) ChunkStruct
{
96 Byte c
[alignof(InternalHeader
)];
98 static_assert(std::is_trivial_v
<ChunkStruct
>,
99 "ChunkStruct must be trivial to avoid any construction");
100 // Allocate an array of that struct, enough to contain the expected
101 // `ProfileBufferChunk` (with its header+buffer).
102 size_t count
= (sizeof(InternalHeader
) + aMinBufferBytes
+
103 (alignof(InternalHeader
) - 1)) /
104 alignof(InternalHeader
);
105 #if defined(MOZ_MEMORY)
106 // Potentially expand the array to use more of the effective allocation.
107 count
= (malloc_good_size(count
* sizeof(ChunkStruct
)) +
108 (sizeof(ChunkStruct
) - 1)) /
111 auto chunkStorage
= MakeUnique
<ChunkStruct
[]>(count
);
112 MOZ_ASSERT(reinterpret_cast<uintptr_t>(chunkStorage
.get()) %
113 alignof(InternalHeader
) ==
115 // After the allocation, compute the actual chunk size (including header).
116 const size_t chunkBytes
= count
* sizeof(ChunkStruct
);
117 MOZ_ASSERT(chunkBytes
>= sizeof(ProfileBufferChunk
),
118 "Not enough space to construct a ProfileBufferChunk");
119 MOZ_ASSERT(chunkBytes
<=
120 static_cast<size_t>(std::numeric_limits
<Length
>::max()));
121 // Compute the size of the user-accessible buffer inside the chunk.
122 const Length bufferBytes
=
123 static_cast<Length
>(chunkBytes
- sizeof(InternalHeader
));
124 MOZ_ASSERT(bufferBytes
>= aMinBufferBytes
,
125 "Not enough space for minimum buffer size");
126 // Construct the header at the beginning of the allocated array, with the
127 // known buffer size.
128 new (chunkStorage
.get()) ProfileBufferChunk(bufferBytes
);
129 // We now have a proper `ProfileBufferChunk` object, create the appropriate
131 UniquePtr
<ProfileBufferChunk
> chunk
{
132 reinterpret_cast<ProfileBufferChunk
*>(chunkStorage
.release())};
134 size_t(reinterpret_cast<const char*>(
135 &chunk
.get()->BufferSpan()[bufferBytes
- 1]) -
136 reinterpret_cast<const char*>(chunk
.get())) == chunkBytes
- 1,
137 "Buffer span spills out of chunk allocation");
142 ~ProfileBufferChunk() {
143 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::InUse
);
144 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Full
);
145 MOZ_ASSERT(mInternalHeader
.mState
== InternalHeader::State::Created
||
146 mInternalHeader
.mState
== InternalHeader::State::Done
||
147 mInternalHeader
.mState
== InternalHeader::State::Recycled
);
151 // Must be called with the first block tail (may be empty), which will be
152 // skipped if the reader starts with this ProfileBufferChunk.
153 [[nodiscard
]] SpanOfBytes
ReserveInitialBlockAsTail(Length aTailSize
) {
155 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::InUse
);
156 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Full
);
157 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Done
);
158 MOZ_ASSERT(mInternalHeader
.mState
== InternalHeader::State::Created
||
159 mInternalHeader
.mState
== InternalHeader::State::Recycled
);
160 mInternalHeader
.mState
= InternalHeader::State::InUse
;
162 mInternalHeader
.mHeader
.mOffsetFirstBlock
= aTailSize
;
163 mInternalHeader
.mHeader
.mOffsetPastLastBlock
= aTailSize
;
164 mInternalHeader
.mHeader
.mStartTimeStamp
= TimeStamp::Now();
165 return SpanOfBytes(&mBuffer
, aTailSize
);
168 struct ReserveReturn
{
170 ProfileBufferBlockIndex mBlockRangeIndex
;
173 // Reserve a block of up to `aBlockSize` bytes, and return a Span to it, and
174 // its starting index. The actual size may be smaller, if the block cannot fit
175 // in the remaining space.
176 [[nodiscard
]] ReserveReturn
ReserveBlock(Length aBlockSize
) {
177 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Created
);
178 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Full
);
179 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Done
);
180 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Recycled
);
181 MOZ_ASSERT(mInternalHeader
.mState
== InternalHeader::State::InUse
);
182 MOZ_ASSERT(RangeStart() != 0,
183 "Expected valid range start before first Reserve()");
184 const Length blockOffset
= mInternalHeader
.mHeader
.mOffsetPastLastBlock
;
185 Length reservedSize
= aBlockSize
;
186 if (MOZ_UNLIKELY(aBlockSize
>= RemainingBytes())) {
187 reservedSize
= RemainingBytes();
189 mInternalHeader
.mState
= InternalHeader::State::Full
;
192 mInternalHeader
.mHeader
.mOffsetPastLastBlock
+= reservedSize
;
193 mInternalHeader
.mHeader
.mBlockCount
+= 1;
194 return {SpanOfBytes(&mBuffer
+ blockOffset
, reservedSize
),
195 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
196 mInternalHeader
.mHeader
.mRangeStart
+ blockOffset
)};
199 // When a chunk will not be used to store more blocks (because it is full, or
200 // because the profiler will not add more data), it should be marked "done".
201 // Access to its content is still allowed.
204 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Created
);
205 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Done
);
206 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Recycled
);
207 MOZ_ASSERT(mInternalHeader
.mState
== InternalHeader::State::InUse
||
208 mInternalHeader
.mState
== InternalHeader::State::Full
);
209 mInternalHeader
.mState
= InternalHeader::State::Done
;
211 mInternalHeader
.mHeader
.mDoneTimeStamp
= TimeStamp::Now();
214 // A "Done" chunk may be recycled, to avoid allocating a new one.
215 void MarkRecycled() {
217 // We also allow Created and already-Recycled chunks to be recycled, this
218 // way it's easier to recycle chunks when their state is not easily
220 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::InUse
);
221 MOZ_ASSERT(mInternalHeader
.mState
!= InternalHeader::State::Full
);
222 MOZ_ASSERT(mInternalHeader
.mState
== InternalHeader::State::Created
||
223 mInternalHeader
.mState
== InternalHeader::State::Done
||
224 mInternalHeader
.mState
== InternalHeader::State::Recycled
);
225 mInternalHeader
.mState
= InternalHeader::State::Recycled
;
227 // Reset all header fields, in case this recycled chunk gets read.
228 mInternalHeader
.mHeader
.Reset();
231 // Public header, meant to uniquely identify a chunk, it may be shared with
232 // other processes to coordinate global memory handling.
234 explicit Header(Length aBufferBytes
) : mBufferBytes(aBufferBytes
) {}
236 // Reset all members to their as-new values (apart from the buffer size,
237 // which cannot change), ready for re-use.
239 mOffsetFirstBlock
= 0;
240 mOffsetPastLastBlock
= 0;
241 mStartTimeStamp
= TimeStamp
{};
242 mDoneTimeStamp
= TimeStamp
{};
248 // Note: Part of the ordering of members below is to avoid unnecessary
251 // Members managed by the ProfileBufferChunk.
253 // Offset of the first block (past the initial tail block, which may be 0).
254 Length mOffsetFirstBlock
= 0;
255 // Offset past the last byte of the last reserved block
256 // It may be past mBufferBytes when last block continues in the next
257 // ProfileBufferChunk. It may be before mBufferBytes if ProfileBufferChunk
258 // is marked "Done" before the end is reached.
259 Length mOffsetPastLastBlock
= 0;
260 // Timestamp when the buffer becomes in-use, ready to record data.
261 TimeStamp mStartTimeStamp
;
262 // Timestamp when the buffer is "Done" (which happens when the last block is
263 // written). This will be used to find and discard the oldest
264 // ProfileBufferChunk.
265 TimeStamp mDoneTimeStamp
;
266 // Number of bytes in the buffer, set once at construction time.
267 const Length mBufferBytes
;
268 // Number of reserved blocks (including final one even if partial, but
269 // excluding initial tail).
270 Length mBlockCount
= 0;
272 // Meta-data set by the user.
274 // Index of the first byte of this ProfileBufferChunk, relative to all
275 // Chunks for this process. Index 0 is reserved as nullptr-like index,
276 // mRangeStart should be set to a non-0 value before the first `Reserve()`.
277 ProfileBufferIndex mRangeStart
= 0;
278 // Process writing to this ProfileBufferChunk.
281 // A bit of spare space (necessary here because of the alignment due to
282 // other members), may be later repurposed for extra data.
283 const int mPADDING
= 0;
286 [[nodiscard
]] const Header
& ChunkHeader() const {
287 return mInternalHeader
.mHeader
;
290 [[nodiscard
]] Length
BufferBytes() const {
291 return ChunkHeader().mBufferBytes
;
294 // Total size of the chunk (buffer + header).
295 [[nodiscard
]] Length
ChunkBytes() const {
296 return static_cast<Length
>(sizeof(InternalHeader
)) + BufferBytes();
299 // Size of external resources, in this case all the following chunks.
300 [[nodiscard
]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf
) const {
301 const ProfileBufferChunk
* const next
= GetNext();
302 return next
? next
->SizeOfIncludingThis(aMallocSizeOf
) : 0;
305 // Size of this chunk and all following ones.
306 [[nodiscard
]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf
) const {
307 // Just in case `aMallocSizeOf` falls back on just `sizeof`, make sure we
308 // account for at least the actual Chunk requested allocation size.
309 return std::max
<size_t>(aMallocSizeOf(this), ChunkBytes()) +
310 SizeOfExcludingThis(aMallocSizeOf
);
313 [[nodiscard
]] Length
RemainingBytes() const {
314 return BufferBytes() - OffsetPastLastBlock();
317 [[nodiscard
]] Length
OffsetFirstBlock() const {
318 return ChunkHeader().mOffsetFirstBlock
;
321 [[nodiscard
]] Length
OffsetPastLastBlock() const {
322 return ChunkHeader().mOffsetPastLastBlock
;
325 [[nodiscard
]] Length
BlockCount() const { return ChunkHeader().mBlockCount
; }
327 [[nodiscard
]] int ProcessId() const { return ChunkHeader().mProcessId
; }
329 void SetProcessId(int aProcessId
) {
330 mInternalHeader
.mHeader
.mProcessId
= aProcessId
;
333 // Global range index at the start of this Chunk.
334 [[nodiscard
]] ProfileBufferIndex
RangeStart() const {
335 return ChunkHeader().mRangeStart
;
338 void SetRangeStart(ProfileBufferIndex aRangeStart
) {
339 mInternalHeader
.mHeader
.mRangeStart
= aRangeStart
;
342 // Get a read-only Span to the buffer. It is up to the caller to decypher the
343 // contents, based on known offsets and the internal block structure.
344 [[nodiscard
]] Span
<const Byte
> BufferSpan() const {
345 return Span
<const Byte
>(&mBuffer
, BufferBytes());
348 [[nodiscard
]] Byte
ByteAt(Length aOffset
) const {
349 MOZ_ASSERT(aOffset
< OffsetPastLastBlock());
350 return *(&mBuffer
+ aOffset
);
353 [[nodiscard
]] ProfileBufferChunk
* GetNext() {
354 return mInternalHeader
.mNext
.get();
356 [[nodiscard
]] const ProfileBufferChunk
* GetNext() const {
357 return mInternalHeader
.mNext
.get();
360 [[nodiscard
]] UniquePtr
<ProfileBufferChunk
> ReleaseNext() {
361 return std::move(mInternalHeader
.mNext
);
364 void InsertNext(UniquePtr
<ProfileBufferChunk
>&& aChunk
) {
368 aChunk
->SetLast(ReleaseNext());
369 mInternalHeader
.mNext
= std::move(aChunk
);
372 // Find the last chunk in this chain (it may be `this`).
373 [[nodiscard
]] ProfileBufferChunk
* Last() {
374 ProfileBufferChunk
* chunk
= this;
376 ProfileBufferChunk
* next
= chunk
->GetNext();
383 [[nodiscard
]] const ProfileBufferChunk
* Last() const {
384 const ProfileBufferChunk
* chunk
= this;
386 const ProfileBufferChunk
* next
= chunk
->GetNext();
394 void SetLast(UniquePtr
<ProfileBufferChunk
>&& aChunk
) {
398 Last()->mInternalHeader
.mNext
= std::move(aChunk
);
401 // Join two possibly-null chunk lists.
402 [[nodiscard
]] static UniquePtr
<ProfileBufferChunk
> Join(
403 UniquePtr
<ProfileBufferChunk
>&& aFirst
,
404 UniquePtr
<ProfileBufferChunk
>&& aLast
) {
406 aFirst
->SetLast(std::move(aLast
));
407 return std::move(aFirst
);
409 return std::move(aLast
);
413 void Dump(std::FILE* aFile
= stdout
) const {
415 "Chunk[%p] chunkSize=%u bufferSize=%u state=%s rangeStart=%u "
416 "firstBlockOffset=%u offsetPastLastBlock=%u blockCount=%u",
417 this, unsigned(ChunkBytes()), unsigned(BufferBytes()),
418 mInternalHeader
.StateString(), unsigned(RangeStart()),
419 unsigned(OffsetFirstBlock()), unsigned(OffsetPastLastBlock()),
420 unsigned(BlockCount()));
421 const auto len
= OffsetPastLastBlock();
422 constexpr unsigned columns
= 16;
423 unsigned char ascii
[columns
+ 1];
424 ascii
[columns
] = '\0';
425 for (Length i
= 0; i
< len
; ++i
) {
426 if (i
% columns
== 0) {
427 fprintf(aFile
, "\n %4u=0x%03x:", unsigned(i
), unsigned(i
));
428 for (unsigned a
= 0; a
< columns
; ++a
) {
432 unsigned char sep
= ' ';
433 if (i
== OffsetFirstBlock()) {
434 if (i
== OffsetPastLastBlock()) {
439 } else if (i
== OffsetPastLastBlock()) {
442 unsigned char c
= *(&mBuffer
+ i
);
443 fprintf(aFile
, "%c%02x", sep
, c
);
446 if (i
+ 1 == OffsetPastLastBlock()) {
447 // Special case when last block ends right at the end.
452 } else if (i
% columns
== columns
- 1) {
456 ascii
[i
% columns
] = (c
>= ' ' && c
<= '~') ? c
: '.';
458 if (i
% columns
== columns
- 1) {
459 fprintf(aFile
, " %s", ascii
);
463 if (len
% columns
< columns
- 1) {
464 for (Length i
= len
% columns
; i
< columns
; ++i
) {
467 fprintf(aFile
, " %s", ascii
);
470 fprintf(aFile
, "\n");
475 // ProfileBufferChunk constructor. Use static `Create()` to allocate and
476 // construct a ProfileBufferChunk.
477 explicit ProfileBufferChunk(Length aBufferBytes
)
478 : mInternalHeader(aBufferBytes
) {}
480 // This internal header starts with the public `Header`, and adds some data
481 // only necessary for local handling.
482 // This encapsulation is also necessary to perform placement-new in
484 struct InternalHeader
{
485 explicit InternalHeader(Length aBufferBytes
) : mHeader(aBufferBytes
) {}
488 UniquePtr
<ProfileBufferChunk
> mNext
;
492 Created
, // Self-set. Just constructed, waiting for initial block tail.
493 InUse
, // Ready to accept blocks.
494 Full
, // Self-set. Blocks reach the end (or further).
495 Done
, // Blocks won't be added anymore.
496 Recycled
// Still full of data, but expecting an initial block tail.
499 State mState
= State::Created
;
500 // Transition table: (X=unexpected)
501 // Method \ State Created InUse Full Done Recycled
502 // ReserveInitialBlockAsTail InUse X X X InUse
503 // Reserve X InUse/Full X X X
504 // MarkDone X Done Done X X
505 // MarkRecycled X X X Recycled X
506 // destructor ok X X ok ok
508 const char* StateString() const {
518 case State::Recycled
:
525 const char* StateString() const { return "(non-DEBUG)"; }
529 InternalHeader mInternalHeader
;
532 // First byte of the buffer. Note that ProfileBufferChunk::Create allocates a
533 // bigger block, such that `mBuffer` is the first of `mBufferBytes` available
535 // The initialization is not strictly needed, because bytes should only be
536 // read after they have been written and `mOffsetPastLastBlock` has been
538 // - Reviewbot complains that it's not initialized.
539 // - It's cheap to initialize one byte.
540 // - In the worst case (reading does happen), zero is not a valid entry size
541 // and should get caught in entry readers.
545 } // namespace mozilla
547 #endif // ProfileBufferChunk_h