Bug 1852740: add tests for the `fetchpriority` attribute in Link headers. r=necko...
[gecko.git] / mozglue / baseprofiler / public / ProfileBufferChunk.h
blob9ba2483372e3b8ae3e6ade0acb036fa44e6f89c8
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef ProfileBufferChunk_h
8 #define ProfileBufferChunk_h
10 #include "mozilla/MemoryReporting.h"
11 #include "mozilla/ProfileBufferIndex.h"
12 #include "mozilla/Span.h"
13 #include "mozilla/TimeStamp.h"
14 #include "mozilla/UniquePtr.h"
16 #if defined(MOZ_MEMORY)
17 # include "mozmemory.h"
18 #endif
20 #include <algorithm>
21 #include <limits>
22 #include <type_traits>
24 #ifdef DEBUG
25 # include <cstdio>
26 #endif
28 namespace mozilla {
30 // Represents a single chunk of memory, with a link to the next chunk (or null).
32 // A chunk is made of an internal header (which contains a public part) followed
33 // by user-accessible bytes.
35 // +---------------+---------+----------------------------------------------+
36 // | public Header | private | memory containing user blocks |
37 // +---------------+---------+----------------------------------------------+
38 // <---------------BufferBytes()------------------>
39 // <------------------------------ChunkBytes()------------------------------>
41 // The chunk can reserve "blocks", but doesn't know the internal contents of
42 // each block, it only knows where the first one starts, and where the last one
43 // ends (which is where the next one will begin, if not already out of range).
44 // It is up to the user to add structure to each block so that they can be
45 // distinguished when later read.
47 // +---------------+---------+----------------------------------------------+
48 // | public Header | private | [1st block]...[last full block] |
49 // +---------------+---------+----------------------------------------------+
50 // ChunkHeader().mOffsetFirstBlock ^ ^
51 // ChunkHeader().mOffsetPastLastBlock --'
53 // It is possible to attempt to reserve more than the remaining space, in which
54 // case only what is available is returned. The caller is responsible for using
55 // another chunk, reserving a block "tail" in it, and using both parts to
56 // constitute a full block. (This initial tail may be empty in some chunks.)
58 // +---------------+---------+----------------------------------------------+
59 // | public Header | private | tail][1st block]...[last full block][head... |
60 // +---------------+---------+----------------------------------------------+
61 // ChunkHeader().mOffsetFirstBlock ^ ^
62 // ChunkHeader().mOffsetPastLastBlock --'
64 // Each Chunk has an internal state (checked in DEBUG builds) that directs how
65 // to use it during creation, initialization, use, end of life, recycling, and
66 // destruction. See `State` below for details.
67 // In particular:
68 // - `ReserveInitialBlockAsTail()` must be called before the first `Reserve()`
69 // after construction or recycling, even with a size of 0 (no actual tail),
70 // - `MarkDone()` and `MarkRecycled()` must be called as appropriate.
71 class ProfileBufferChunk {
72 public:
73 using Byte = uint8_t;
74 using Length = uint32_t;
76 using SpanOfBytes = Span<Byte>;
78 // Hint about the size of the metadata (public and private headers).
79 // `Create()` below takes the minimum *buffer* size, so the minimum total
80 // Chunk size is at least `SizeofChunkMetadata() + aMinBufferBytes`.
81 [[nodiscard]] static constexpr Length SizeofChunkMetadata() {
82 return static_cast<Length>(sizeof(InternalHeader));
85 // Allocate space for a chunk with a given minimum size, and construct it.
86 // The actual size may be higher, to match the actual space taken in the
87 // memory pool.
88 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Create(
89 Length aMinBufferBytes) {
90 // We need at least one byte, to cover the always-present `mBuffer` byte.
91 aMinBufferBytes = std::max(aMinBufferBytes, Length(1));
92 // Trivial struct with the same alignment as `ProfileBufferChunk`, and size
93 // equal to that alignment, because typically the sizeof of an object is
94 // a multiple of its alignment.
95 struct alignas(alignof(InternalHeader)) ChunkStruct {
96 Byte c[alignof(InternalHeader)];
98 static_assert(std::is_trivial_v<ChunkStruct>,
99 "ChunkStruct must be trivial to avoid any construction");
100 // Allocate an array of that struct, enough to contain the expected
101 // `ProfileBufferChunk` (with its header+buffer).
102 size_t count = (sizeof(InternalHeader) + aMinBufferBytes +
103 (alignof(InternalHeader) - 1)) /
104 alignof(InternalHeader);
105 #if defined(MOZ_MEMORY)
106 // Potentially expand the array to use more of the effective allocation.
107 count = (malloc_good_size(count * sizeof(ChunkStruct)) +
108 (sizeof(ChunkStruct) - 1)) /
109 sizeof(ChunkStruct);
110 #endif
111 auto chunkStorage = MakeUnique<ChunkStruct[]>(count);
112 MOZ_ASSERT(reinterpret_cast<uintptr_t>(chunkStorage.get()) %
113 alignof(InternalHeader) ==
115 // After the allocation, compute the actual chunk size (including header).
116 const size_t chunkBytes = count * sizeof(ChunkStruct);
117 MOZ_ASSERT(chunkBytes >= sizeof(ProfileBufferChunk),
118 "Not enough space to construct a ProfileBufferChunk");
119 MOZ_ASSERT(chunkBytes <=
120 static_cast<size_t>(std::numeric_limits<Length>::max()));
121 // Compute the size of the user-accessible buffer inside the chunk.
122 const Length bufferBytes =
123 static_cast<Length>(chunkBytes - sizeof(InternalHeader));
124 MOZ_ASSERT(bufferBytes >= aMinBufferBytes,
125 "Not enough space for minimum buffer size");
126 // Construct the header at the beginning of the allocated array, with the
127 // known buffer size.
128 new (chunkStorage.get()) ProfileBufferChunk(bufferBytes);
129 // We now have a proper `ProfileBufferChunk` object, create the appropriate
130 // UniquePtr for it.
131 UniquePtr<ProfileBufferChunk> chunk{
132 reinterpret_cast<ProfileBufferChunk*>(chunkStorage.release())};
133 MOZ_ASSERT(
134 size_t(reinterpret_cast<const char*>(
135 &chunk.get()->BufferSpan()[bufferBytes - 1]) -
136 reinterpret_cast<const char*>(chunk.get())) == chunkBytes - 1,
137 "Buffer span spills out of chunk allocation");
138 return chunk;
141 #ifdef DEBUG
142 ~ProfileBufferChunk() {
143 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
144 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
145 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
146 mInternalHeader.mState == InternalHeader::State::Done ||
147 mInternalHeader.mState == InternalHeader::State::Recycled);
149 #endif
151 // Must be called with the first block tail (may be empty), which will be
152 // skipped if the reader starts with this ProfileBufferChunk.
153 [[nodiscard]] SpanOfBytes ReserveInitialBlockAsTail(Length aTailSize) {
154 #ifdef DEBUG
155 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
156 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
157 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
158 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
159 mInternalHeader.mState == InternalHeader::State::Recycled);
160 mInternalHeader.mState = InternalHeader::State::InUse;
161 #endif
162 mInternalHeader.mHeader.mOffsetFirstBlock = aTailSize;
163 mInternalHeader.mHeader.mOffsetPastLastBlock = aTailSize;
164 mInternalHeader.mHeader.mStartTimeStamp = TimeStamp::Now();
165 return SpanOfBytes(&mBuffer, aTailSize);
168 struct ReserveReturn {
169 SpanOfBytes mSpan;
170 ProfileBufferBlockIndex mBlockRangeIndex;
173 // Reserve a block of up to `aBlockSize` bytes, and return a Span to it, and
174 // its starting index. The actual size may be smaller, if the block cannot fit
175 // in the remaining space.
176 [[nodiscard]] ReserveReturn ReserveBlock(Length aBlockSize) {
177 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created);
178 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
179 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
180 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled);
181 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse);
182 MOZ_ASSERT(RangeStart() != 0,
183 "Expected valid range start before first Reserve()");
184 const Length blockOffset = mInternalHeader.mHeader.mOffsetPastLastBlock;
185 Length reservedSize = aBlockSize;
186 if (MOZ_UNLIKELY(aBlockSize >= RemainingBytes())) {
187 reservedSize = RemainingBytes();
188 #ifdef DEBUG
189 mInternalHeader.mState = InternalHeader::State::Full;
190 #endif
192 mInternalHeader.mHeader.mOffsetPastLastBlock += reservedSize;
193 mInternalHeader.mHeader.mBlockCount += 1;
194 return {SpanOfBytes(&mBuffer + blockOffset, reservedSize),
195 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
196 mInternalHeader.mHeader.mRangeStart + blockOffset)};
199 // When a chunk will not be used to store more blocks (because it is full, or
200 // because the profiler will not add more data), it should be marked "done".
201 // Access to its content is still allowed.
202 void MarkDone() {
203 #ifdef DEBUG
204 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created);
205 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
206 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled);
207 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse ||
208 mInternalHeader.mState == InternalHeader::State::Full);
209 mInternalHeader.mState = InternalHeader::State::Done;
210 #endif
211 mInternalHeader.mHeader.mDoneTimeStamp = TimeStamp::Now();
214 // A "Done" chunk may be recycled, to avoid allocating a new one.
215 void MarkRecycled() {
216 #ifdef DEBUG
217 // We also allow Created and already-Recycled chunks to be recycled, this
218 // way it's easier to recycle chunks when their state is not easily
219 // trackable.
220 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
221 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
222 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
223 mInternalHeader.mState == InternalHeader::State::Done ||
224 mInternalHeader.mState == InternalHeader::State::Recycled);
225 mInternalHeader.mState = InternalHeader::State::Recycled;
226 #endif
227 // Reset all header fields, in case this recycled chunk gets read.
228 mInternalHeader.mHeader.Reset();
231 // Public header, meant to uniquely identify a chunk, it may be shared with
232 // other processes to coordinate global memory handling.
233 struct Header {
234 explicit Header(Length aBufferBytes) : mBufferBytes(aBufferBytes) {}
236 // Reset all members to their as-new values (apart from the buffer size,
237 // which cannot change), ready for re-use.
238 void Reset() {
239 mOffsetFirstBlock = 0;
240 mOffsetPastLastBlock = 0;
241 mStartTimeStamp = TimeStamp{};
242 mDoneTimeStamp = TimeStamp{};
243 mBlockCount = 0;
244 mRangeStart = 0;
245 mProcessId = 0;
248 // Note: Part of the ordering of members below is to avoid unnecessary
249 // padding.
251 // Members managed by the ProfileBufferChunk.
253 // Offset of the first block (past the initial tail block, which may be 0).
254 Length mOffsetFirstBlock = 0;
255 // Offset past the last byte of the last reserved block
256 // It may be past mBufferBytes when last block continues in the next
257 // ProfileBufferChunk. It may be before mBufferBytes if ProfileBufferChunk
258 // is marked "Done" before the end is reached.
259 Length mOffsetPastLastBlock = 0;
260 // Timestamp when the buffer becomes in-use, ready to record data.
261 TimeStamp mStartTimeStamp;
262 // Timestamp when the buffer is "Done" (which happens when the last block is
263 // written). This will be used to find and discard the oldest
264 // ProfileBufferChunk.
265 TimeStamp mDoneTimeStamp;
266 // Number of bytes in the buffer, set once at construction time.
267 const Length mBufferBytes;
268 // Number of reserved blocks (including final one even if partial, but
269 // excluding initial tail).
270 Length mBlockCount = 0;
272 // Meta-data set by the user.
274 // Index of the first byte of this ProfileBufferChunk, relative to all
275 // Chunks for this process. Index 0 is reserved as nullptr-like index,
276 // mRangeStart should be set to a non-0 value before the first `Reserve()`.
277 ProfileBufferIndex mRangeStart = 0;
278 // Process writing to this ProfileBufferChunk.
279 int mProcessId = 0;
281 // A bit of spare space (necessary here because of the alignment due to
282 // other members), may be later repurposed for extra data.
283 const int mPADDING = 0;
286 [[nodiscard]] const Header& ChunkHeader() const {
287 return mInternalHeader.mHeader;
290 [[nodiscard]] Length BufferBytes() const {
291 return ChunkHeader().mBufferBytes;
294 // Total size of the chunk (buffer + header).
295 [[nodiscard]] Length ChunkBytes() const {
296 return static_cast<Length>(sizeof(InternalHeader)) + BufferBytes();
299 // Size of external resources, in this case all the following chunks.
300 [[nodiscard]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
301 const ProfileBufferChunk* const next = GetNext();
302 return next ? next->SizeOfIncludingThis(aMallocSizeOf) : 0;
305 // Size of this chunk and all following ones.
306 [[nodiscard]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
307 // Just in case `aMallocSizeOf` falls back on just `sizeof`, make sure we
308 // account for at least the actual Chunk requested allocation size.
309 return std::max<size_t>(aMallocSizeOf(this), ChunkBytes()) +
310 SizeOfExcludingThis(aMallocSizeOf);
313 [[nodiscard]] Length RemainingBytes() const {
314 return BufferBytes() - OffsetPastLastBlock();
317 [[nodiscard]] Length OffsetFirstBlock() const {
318 return ChunkHeader().mOffsetFirstBlock;
321 [[nodiscard]] Length OffsetPastLastBlock() const {
322 return ChunkHeader().mOffsetPastLastBlock;
325 [[nodiscard]] Length BlockCount() const { return ChunkHeader().mBlockCount; }
327 [[nodiscard]] int ProcessId() const { return ChunkHeader().mProcessId; }
329 void SetProcessId(int aProcessId) {
330 mInternalHeader.mHeader.mProcessId = aProcessId;
333 // Global range index at the start of this Chunk.
334 [[nodiscard]] ProfileBufferIndex RangeStart() const {
335 return ChunkHeader().mRangeStart;
338 void SetRangeStart(ProfileBufferIndex aRangeStart) {
339 mInternalHeader.mHeader.mRangeStart = aRangeStart;
342 // Get a read-only Span to the buffer. It is up to the caller to decypher the
343 // contents, based on known offsets and the internal block structure.
344 [[nodiscard]] Span<const Byte> BufferSpan() const {
345 return Span<const Byte>(&mBuffer, BufferBytes());
348 [[nodiscard]] Byte ByteAt(Length aOffset) const {
349 MOZ_ASSERT(aOffset < OffsetPastLastBlock());
350 return *(&mBuffer + aOffset);
353 [[nodiscard]] ProfileBufferChunk* GetNext() {
354 return mInternalHeader.mNext.get();
356 [[nodiscard]] const ProfileBufferChunk* GetNext() const {
357 return mInternalHeader.mNext.get();
360 [[nodiscard]] UniquePtr<ProfileBufferChunk> ReleaseNext() {
361 return std::move(mInternalHeader.mNext);
364 void InsertNext(UniquePtr<ProfileBufferChunk>&& aChunk) {
365 if (!aChunk) {
366 return;
368 aChunk->SetLast(ReleaseNext());
369 mInternalHeader.mNext = std::move(aChunk);
372 // Find the last chunk in this chain (it may be `this`).
373 [[nodiscard]] ProfileBufferChunk* Last() {
374 ProfileBufferChunk* chunk = this;
375 for (;;) {
376 ProfileBufferChunk* next = chunk->GetNext();
377 if (!next) {
378 return chunk;
380 chunk = next;
383 [[nodiscard]] const ProfileBufferChunk* Last() const {
384 const ProfileBufferChunk* chunk = this;
385 for (;;) {
386 const ProfileBufferChunk* next = chunk->GetNext();
387 if (!next) {
388 return chunk;
390 chunk = next;
394 void SetLast(UniquePtr<ProfileBufferChunk>&& aChunk) {
395 if (!aChunk) {
396 return;
398 Last()->mInternalHeader.mNext = std::move(aChunk);
401 // Join two possibly-null chunk lists.
402 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Join(
403 UniquePtr<ProfileBufferChunk>&& aFirst,
404 UniquePtr<ProfileBufferChunk>&& aLast) {
405 if (aFirst) {
406 aFirst->SetLast(std::move(aLast));
407 return std::move(aFirst);
409 return std::move(aLast);
412 #ifdef DEBUG
413 void Dump(std::FILE* aFile = stdout) const {
414 fprintf(aFile,
415 "Chunk[%p] chunkSize=%u bufferSize=%u state=%s rangeStart=%u "
416 "firstBlockOffset=%u offsetPastLastBlock=%u blockCount=%u",
417 this, unsigned(ChunkBytes()), unsigned(BufferBytes()),
418 mInternalHeader.StateString(), unsigned(RangeStart()),
419 unsigned(OffsetFirstBlock()), unsigned(OffsetPastLastBlock()),
420 unsigned(BlockCount()));
421 const auto len = OffsetPastLastBlock();
422 constexpr unsigned columns = 16;
423 unsigned char ascii[columns + 1];
424 ascii[columns] = '\0';
425 for (Length i = 0; i < len; ++i) {
426 if (i % columns == 0) {
427 fprintf(aFile, "\n %4u=0x%03x:", unsigned(i), unsigned(i));
428 for (unsigned a = 0; a < columns; ++a) {
429 ascii[a] = ' ';
432 unsigned char sep = ' ';
433 if (i == OffsetFirstBlock()) {
434 if (i == OffsetPastLastBlock()) {
435 sep = '#';
436 } else {
437 sep = '[';
439 } else if (i == OffsetPastLastBlock()) {
440 sep = ']';
442 unsigned char c = *(&mBuffer + i);
443 fprintf(aFile, "%c%02x", sep, c);
445 if (i == len - 1) {
446 if (i + 1 == OffsetPastLastBlock()) {
447 // Special case when last block ends right at the end.
448 fprintf(aFile, "]");
449 } else {
450 fprintf(aFile, " ");
452 } else if (i % columns == columns - 1) {
453 fprintf(aFile, " ");
456 ascii[i % columns] = (c >= ' ' && c <= '~') ? c : '.';
458 if (i % columns == columns - 1) {
459 fprintf(aFile, " %s", ascii);
463 if (len % columns < columns - 1) {
464 for (Length i = len % columns; i < columns; ++i) {
465 fprintf(aFile, " ");
467 fprintf(aFile, " %s", ascii);
470 fprintf(aFile, "\n");
472 #endif // DEBUG
474 private:
475 // ProfileBufferChunk constructor. Use static `Create()` to allocate and
476 // construct a ProfileBufferChunk.
477 explicit ProfileBufferChunk(Length aBufferBytes)
478 : mInternalHeader(aBufferBytes) {}
480 // This internal header starts with the public `Header`, and adds some data
481 // only necessary for local handling.
482 // This encapsulation is also necessary to perform placement-new in
483 // `Create()`.
484 struct InternalHeader {
485 explicit InternalHeader(Length aBufferBytes) : mHeader(aBufferBytes) {}
487 Header mHeader;
488 UniquePtr<ProfileBufferChunk> mNext;
490 #ifdef DEBUG
491 enum class State {
492 Created, // Self-set. Just constructed, waiting for initial block tail.
493 InUse, // Ready to accept blocks.
494 Full, // Self-set. Blocks reach the end (or further).
495 Done, // Blocks won't be added anymore.
496 Recycled // Still full of data, but expecting an initial block tail.
499 State mState = State::Created;
500 // Transition table: (X=unexpected)
501 // Method \ State Created InUse Full Done Recycled
502 // ReserveInitialBlockAsTail InUse X X X InUse
503 // Reserve X InUse/Full X X X
504 // MarkDone X Done Done X X
505 // MarkRecycled X X X Recycled X
506 // destructor ok X X ok ok
508 const char* StateString() const {
509 switch (mState) {
510 case State::Created:
511 return "Created";
512 case State::InUse:
513 return "InUse";
514 case State::Full:
515 return "Full";
516 case State::Done:
517 return "Done";
518 case State::Recycled:
519 return "Recycled";
520 default:
521 return "?";
524 #else // DEBUG
525 const char* StateString() const { return "(non-DEBUG)"; }
526 #endif
529 InternalHeader mInternalHeader;
531 // KEEP THIS LAST!
532 // First byte of the buffer. Note that ProfileBufferChunk::Create allocates a
533 // bigger block, such that `mBuffer` is the first of `mBufferBytes` available
534 // bytes.
535 // The initialization is not strictly needed, because bytes should only be
536 // read after they have been written and `mOffsetPastLastBlock` has been
537 // updated. However:
538 // - Reviewbot complains that it's not initialized.
539 // - It's cheap to initialize one byte.
540 // - In the worst case (reading does happen), zero is not a valid entry size
541 // and should get caught in entry readers.
542 Byte mBuffer = '\0';
545 } // namespace mozilla
547 #endif // ProfileBufferChunk_h