[SM91] Update to Spidermonkey 91.1.3 APIs
[0ad.git] / libraries / source / spidermonkey / include-win32-release / mozilla / ProfileBufferChunk.h
blob24a516bcafe6fc3897b544d64b9765c4e4204e0c
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef ProfileBufferChunk_h
8 #define ProfileBufferChunk_h
10 #include "mozilla/MemoryReporting.h"
11 #include "mozilla/ProfileBufferIndex.h"
12 #include "mozilla/Span.h"
13 #include "mozilla/TimeStamp.h"
14 #include "mozilla/UniquePtr.h"
16 #if defined(MOZ_MEMORY)
17 # include "mozmemory.h"
18 #endif
20 #include <algorithm>
21 #include <limits>
22 #include <type_traits>
24 #ifdef DEBUG
25 # include <cstdio>
26 #endif
28 namespace mozilla {
30 // Represents a single chunk of memory, with a link to the next chunk (or null).
32 // A chunk is made of an internal header (which contains a public part) followed
33 // by user-accessible bytes.
35 // +---------------+---------+----------------------------------------------+
36 // | public Header | private | memory containing user blocks |
37 // +---------------+---------+----------------------------------------------+
38 // <---------------BufferBytes()------------------>
39 // <------------------------------ChunkBytes()------------------------------>
41 // The chunk can reserve "blocks", but doesn't know the internal contents of
42 // each block, it only knows where the first one starts, and where the last one
43 // ends (which is where the next one will begin, if not already out of range).
44 // It is up to the user to add structure to each block so that they can be
45 // distinguished when later read.
47 // +---------------+---------+----------------------------------------------+
48 // | public Header | private | [1st block]...[last full block] |
49 // +---------------+---------+----------------------------------------------+
50 // ChunkHeader().mOffsetFirstBlock ^ ^
51 // ChunkHeader().mOffsetPastLastBlock --'
53 // It is possible to attempt to reserve more than the remaining space, in which
54 // case only what is available is returned. The caller is responsible for using
55 // another chunk, reserving a block "tail" in it, and using both parts to
56 // constitute a full block. (This initial tail may be empty in some chunks.)
58 // +---------------+---------+----------------------------------------------+
59 // | public Header | private | tail][1st block]...[last full block][head... |
60 // +---------------+---------+----------------------------------------------+
61 // ChunkHeader().mOffsetFirstBlock ^ ^
62 // ChunkHeader().mOffsetPastLastBlock --'
64 // Each Chunk has an internal state (checked in DEBUG builds) that directs how
65 // to use it during creation, initialization, use, end of life, recycling, and
66 // destruction. See `State` below for details.
67 // In particular:
68 // - `ReserveInitialBlockAsTail()` must be called before the first `Reserve()`
69 // after construction or recycling, even with a size of 0 (no actual tail),
70 // - `MarkDone()` and `MarkRecycled()` must be called as appropriate.
71 class ProfileBufferChunk {
72 public:
73 using Byte = uint8_t;
74 using Length = uint32_t;
76 using SpanOfBytes = Span<Byte>;
78 // Hint about the size of the metadata (public and private headers).
79 // `Create()` below takes the minimum *buffer* size, so the minimum total
80 // Chunk size is at least `SizeofChunkMetadata() + aMinBufferBytes`.
81 [[nodiscard]] static constexpr Length SizeofChunkMetadata() {
82 return static_cast<Length>(sizeof(InternalHeader));
85 // Allocate space for a chunk with a given minimum size, and construct it.
86 // The actual size may be higher, to match the actual space taken in the
87 // memory pool.
88 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Create(
89 Length aMinBufferBytes) {
90 // We need at least one byte, to cover the always-present `mBuffer` byte.
91 aMinBufferBytes = std::max(aMinBufferBytes, Length(1));
92 // Trivial struct with the same alignment as `ProfileBufferChunk`, and size
93 // equal to that alignment, because typically the sizeof of an object is
94 // a multiple of its alignment.
95 struct alignas(alignof(InternalHeader)) ChunkStruct {
96 Byte c[alignof(InternalHeader)];
98 static_assert(std::is_trivial_v<ChunkStruct>,
99 "ChunkStruct must be trivial to avoid any construction");
100 // Allocate an array of that struct, enough to contain the expected
101 // `ProfileBufferChunk` (with its header+buffer).
102 size_t count = (sizeof(InternalHeader) + aMinBufferBytes +
103 (alignof(InternalHeader) - 1)) /
104 alignof(InternalHeader);
105 #if defined(MOZ_MEMORY)
106 // Potentially expand the array to use more of the effective allocation.
107 count = (malloc_good_size(count * sizeof(ChunkStruct)) +
108 (sizeof(ChunkStruct) - 1)) /
109 sizeof(ChunkStruct);
110 #endif
111 auto chunkStorage = MakeUnique<ChunkStruct[]>(count);
112 MOZ_ASSERT(reinterpret_cast<uintptr_t>(chunkStorage.get()) %
113 alignof(InternalHeader) ==
115 // After the allocation, compute the actual chunk size (including header).
116 const size_t chunkBytes = count * sizeof(ChunkStruct);
117 MOZ_ASSERT(chunkBytes >= sizeof(ProfileBufferChunk),
118 "Not enough space to construct a ProfileBufferChunk");
119 MOZ_ASSERT(chunkBytes <=
120 static_cast<size_t>(std::numeric_limits<Length>::max()));
121 // Compute the size of the user-accessible buffer inside the chunk.
122 const Length bufferBytes =
123 static_cast<Length>(chunkBytes - sizeof(InternalHeader));
124 MOZ_ASSERT(bufferBytes >= aMinBufferBytes,
125 "Not enough space for minimum buffer size");
126 // Construct the header at the beginning of the allocated array, with the
127 // known buffer size.
128 new (chunkStorage.get()) ProfileBufferChunk(bufferBytes);
129 // We now have a proper `ProfileBufferChunk` object, create the appropriate
130 // UniquePtr for it.
131 UniquePtr<ProfileBufferChunk> chunk{
132 reinterpret_cast<ProfileBufferChunk*>(chunkStorage.release())};
133 MOZ_ASSERT(
134 size_t(reinterpret_cast<const char*>(
135 &chunk.get()->BufferSpan()[bufferBytes - 1]) -
136 reinterpret_cast<const char*>(chunk.get())) == chunkBytes - 1,
137 "Buffer span spills out of chunk allocation");
138 return chunk;
141 #ifdef DEBUG
142 ~ProfileBufferChunk() {
143 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
144 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
145 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
146 mInternalHeader.mState == InternalHeader::State::Done ||
147 mInternalHeader.mState == InternalHeader::State::Recycled);
149 #endif
151 // Must be called with the first block tail (may be empty), which will be
152 // skipped if the reader starts with this ProfileBufferChunk.
153 [[nodiscard]] SpanOfBytes ReserveInitialBlockAsTail(Length aTailSize) {
154 #ifdef DEBUG
155 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
156 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
157 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
158 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
159 mInternalHeader.mState == InternalHeader::State::Recycled);
160 mInternalHeader.mState = InternalHeader::State::InUse;
161 #endif
162 mInternalHeader.mHeader.mOffsetFirstBlock = aTailSize;
163 mInternalHeader.mHeader.mOffsetPastLastBlock = aTailSize;
164 return SpanOfBytes(&mBuffer, aTailSize);
167 struct ReserveReturn {
168 SpanOfBytes mSpan;
169 ProfileBufferBlockIndex mBlockRangeIndex;
172 // Reserve a block of up to `aBlockSize` bytes, and return a Span to it, and
173 // its starting index. The actual size may be smaller, if the block cannot fit
174 // in the remaining space.
175 [[nodiscard]] ReserveReturn ReserveBlock(Length aBlockSize) {
176 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created);
177 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
178 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
179 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled);
180 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse);
181 MOZ_ASSERT(RangeStart() != 0,
182 "Expected valid range start before first Reserve()");
183 const Length blockOffset = mInternalHeader.mHeader.mOffsetPastLastBlock;
184 Length reservedSize = aBlockSize;
185 if (MOZ_UNLIKELY(aBlockSize >= RemainingBytes())) {
186 reservedSize = RemainingBytes();
187 #ifdef DEBUG
188 mInternalHeader.mState = InternalHeader::State::Full;
189 #endif
191 mInternalHeader.mHeader.mOffsetPastLastBlock += reservedSize;
192 mInternalHeader.mHeader.mBlockCount += 1;
193 return {SpanOfBytes(&mBuffer + blockOffset, reservedSize),
194 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
195 mInternalHeader.mHeader.mRangeStart + blockOffset)};
198 // When a chunk will not be used to store more blocks (because it is full, or
199 // because the profiler will not add more data), it should be marked "done".
200 // Access to its content is still allowed.
201 void MarkDone() {
202 #ifdef DEBUG
203 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Created);
204 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Done);
205 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Recycled);
206 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::InUse ||
207 mInternalHeader.mState == InternalHeader::State::Full);
208 mInternalHeader.mState = InternalHeader::State::Done;
209 #endif
210 mInternalHeader.mHeader.mDoneTimeStamp = TimeStamp::NowUnfuzzed();
213 // A "Done" chunk may be recycled, to avoid allocating a new one.
214 void MarkRecycled() {
215 #ifdef DEBUG
216 // We also allow Created and already-Recycled chunks to be recycled, this
217 // way it's easier to recycle chunks when their state is not easily
218 // trackable.
219 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::InUse);
220 MOZ_ASSERT(mInternalHeader.mState != InternalHeader::State::Full);
221 MOZ_ASSERT(mInternalHeader.mState == InternalHeader::State::Created ||
222 mInternalHeader.mState == InternalHeader::State::Done ||
223 mInternalHeader.mState == InternalHeader::State::Recycled);
224 mInternalHeader.mState = InternalHeader::State::Recycled;
225 #endif
226 // Reset all header fields, in case this recycled chunk gets read.
227 mInternalHeader.mHeader.Reset();
230 // Public header, meant to uniquely identify a chunk, it may be shared with
231 // other processes to coordinate global memory handling.
232 struct Header {
233 explicit Header(Length aBufferBytes) : mBufferBytes(aBufferBytes) {}
235 // Reset all members to their as-new values (apart from the buffer size,
236 // which cannot change), ready for re-use.
237 void Reset() {
238 mOffsetFirstBlock = 0;
239 mOffsetPastLastBlock = 0;
240 mDoneTimeStamp = TimeStamp{};
241 mBlockCount = 0;
242 mRangeStart = 0;
243 mProcessId = 0;
246 // Note: Part of the ordering of members below is to avoid unnecessary
247 // padding.
249 // Members managed by the ProfileBufferChunk.
251 // Offset of the first block (past the initial tail block, which may be 0).
252 Length mOffsetFirstBlock = 0;
253 // Offset past the last byte of the last reserved block
254 // It may be past mBufferBytes when last block continues in the next
255 // ProfileBufferChunk. It may be before mBufferBytes if ProfileBufferChunk
256 // is marked "Done" before the end is reached.
257 Length mOffsetPastLastBlock = 0;
258 // Timestamp when buffer is "Done" (which happens when the last block is
259 // written). This will be used to find and discard the oldest
260 // ProfileBufferChunk.
261 TimeStamp mDoneTimeStamp;
262 // Number of bytes in the buffer, set once at construction time.
263 const Length mBufferBytes;
264 // Number of reserved blocks (including final one even if partial, but
265 // excluding initial tail).
266 Length mBlockCount = 0;
268 // Meta-data set by the user.
270 // Index of the first byte of this ProfileBufferChunk, relative to all
271 // Chunks for this process. Index 0 is reserved as nullptr-like index,
272 // mRangeStart should be set to a non-0 value before the first `Reserve()`.
273 ProfileBufferIndex mRangeStart = 0;
274 // Process writing to this ProfileBufferChunk.
275 int mProcessId = 0;
277 // A bit of spare space (necessary here because of the alignment due to
278 // other members), may be later repurposed for extra data.
279 const int mPADDING = 0;
282 [[nodiscard]] const Header& ChunkHeader() const {
283 return mInternalHeader.mHeader;
286 [[nodiscard]] Length BufferBytes() const {
287 return ChunkHeader().mBufferBytes;
290 // Total size of the chunk (buffer + header).
291 [[nodiscard]] Length ChunkBytes() const {
292 return static_cast<Length>(sizeof(InternalHeader)) + BufferBytes();
295 // Size of external resources, in this case all the following chunks.
296 [[nodiscard]] size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
297 const ProfileBufferChunk* const next = GetNext();
298 return next ? next->SizeOfIncludingThis(aMallocSizeOf) : 0;
301 // Size of this chunk and all following ones.
302 [[nodiscard]] size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
303 // Just in case `aMallocSizeOf` falls back on just `sizeof`, make sure we
304 // account for at least the actual Chunk requested allocation size.
305 return std::max<size_t>(aMallocSizeOf(this), ChunkBytes()) +
306 SizeOfExcludingThis(aMallocSizeOf);
309 [[nodiscard]] Length RemainingBytes() const {
310 return BufferBytes() - OffsetPastLastBlock();
313 [[nodiscard]] Length OffsetFirstBlock() const {
314 return ChunkHeader().mOffsetFirstBlock;
317 [[nodiscard]] Length OffsetPastLastBlock() const {
318 return ChunkHeader().mOffsetPastLastBlock;
321 [[nodiscard]] Length BlockCount() const { return ChunkHeader().mBlockCount; }
323 [[nodiscard]] int ProcessId() const { return ChunkHeader().mProcessId; }
325 void SetProcessId(int aProcessId) {
326 mInternalHeader.mHeader.mProcessId = aProcessId;
329 // Global range index at the start of this Chunk.
330 [[nodiscard]] ProfileBufferIndex RangeStart() const {
331 return ChunkHeader().mRangeStart;
334 void SetRangeStart(ProfileBufferIndex aRangeStart) {
335 mInternalHeader.mHeader.mRangeStart = aRangeStart;
338 // Get a read-only Span to the buffer. It is up to the caller to decypher the
339 // contents, based on known offsets and the internal block structure.
340 [[nodiscard]] Span<const Byte> BufferSpan() const {
341 return Span<const Byte>(&mBuffer, BufferBytes());
344 [[nodiscard]] Byte ByteAt(Length aOffset) const {
345 MOZ_ASSERT(aOffset < OffsetPastLastBlock());
346 return *(&mBuffer + aOffset);
349 [[nodiscard]] ProfileBufferChunk* GetNext() {
350 return mInternalHeader.mNext.get();
352 [[nodiscard]] const ProfileBufferChunk* GetNext() const {
353 return mInternalHeader.mNext.get();
356 [[nodiscard]] UniquePtr<ProfileBufferChunk> ReleaseNext() {
357 return std::move(mInternalHeader.mNext);
360 void InsertNext(UniquePtr<ProfileBufferChunk>&& aChunk) {
361 if (!aChunk) {
362 return;
364 aChunk->SetLast(ReleaseNext());
365 mInternalHeader.mNext = std::move(aChunk);
368 // Find the last chunk in this chain (it may be `this`).
369 [[nodiscard]] ProfileBufferChunk* Last() {
370 ProfileBufferChunk* chunk = this;
371 for (;;) {
372 ProfileBufferChunk* next = chunk->GetNext();
373 if (!next) {
374 return chunk;
376 chunk = next;
379 [[nodiscard]] const ProfileBufferChunk* Last() const {
380 const ProfileBufferChunk* chunk = this;
381 for (;;) {
382 const ProfileBufferChunk* next = chunk->GetNext();
383 if (!next) {
384 return chunk;
386 chunk = next;
390 void SetLast(UniquePtr<ProfileBufferChunk>&& aChunk) {
391 if (!aChunk) {
392 return;
394 Last()->mInternalHeader.mNext = std::move(aChunk);
397 // Join two possibly-null chunk lists.
398 [[nodiscard]] static UniquePtr<ProfileBufferChunk> Join(
399 UniquePtr<ProfileBufferChunk>&& aFirst,
400 UniquePtr<ProfileBufferChunk>&& aLast) {
401 if (aFirst) {
402 aFirst->SetLast(std::move(aLast));
403 return std::move(aFirst);
405 return std::move(aLast);
408 #ifdef DEBUG
409 void Dump(std::FILE* aFile = stdout) const {
410 fprintf(aFile,
411 "Chunk[%p] chunkSize=%u bufferSize=%u state=%s rangeStart=%u "
412 "firstBlockOffset=%u offsetPastLastBlock=%u blockCount=%u",
413 this, unsigned(ChunkBytes()), unsigned(BufferBytes()),
414 mInternalHeader.StateString(), unsigned(RangeStart()),
415 unsigned(OffsetFirstBlock()), unsigned(OffsetPastLastBlock()),
416 unsigned(BlockCount()));
417 const auto len = OffsetPastLastBlock();
418 constexpr unsigned columns = 16;
419 unsigned char ascii[columns + 1];
420 ascii[columns] = '\0';
421 for (Length i = 0; i < len; ++i) {
422 if (i % columns == 0) {
423 fprintf(aFile, "\n %4u=0x%03x:", unsigned(i), unsigned(i));
424 for (unsigned a = 0; a < columns; ++a) {
425 ascii[a] = ' ';
428 unsigned char sep = ' ';
429 if (i == OffsetFirstBlock()) {
430 if (i == OffsetPastLastBlock()) {
431 sep = '#';
432 } else {
433 sep = '[';
435 } else if (i == OffsetPastLastBlock()) {
436 sep = ']';
438 unsigned char c = *(&mBuffer + i);
439 fprintf(aFile, "%c%02x", sep, c);
441 if (i == len - 1) {
442 if (i + 1 == OffsetPastLastBlock()) {
443 // Special case when last block ends right at the end.
444 fprintf(aFile, "]");
445 } else {
446 fprintf(aFile, " ");
448 } else if (i % columns == columns - 1) {
449 fprintf(aFile, " ");
452 ascii[i % columns] = (c >= ' ' && c <= '~') ? c : '.';
454 if (i % columns == columns - 1) {
455 fprintf(aFile, " %s", ascii);
459 if (len % columns < columns - 1) {
460 for (Length i = len % columns; i < columns; ++i) {
461 fprintf(aFile, " ");
463 fprintf(aFile, " %s", ascii);
466 fprintf(aFile, "\n");
468 #endif // DEBUG
470 private:
471 // ProfileBufferChunk constructor. Use static `Create()` to allocate and
472 // construct a ProfileBufferChunk.
473 explicit ProfileBufferChunk(Length aBufferBytes)
474 : mInternalHeader(aBufferBytes) {}
476 // This internal header starts with the public `Header`, and adds some data
477 // only necessary for local handling.
478 // This encapsulation is also necessary to perform placement-new in
479 // `Create()`.
480 struct InternalHeader {
481 explicit InternalHeader(Length aBufferBytes) : mHeader(aBufferBytes) {}
483 Header mHeader;
484 UniquePtr<ProfileBufferChunk> mNext;
486 #ifdef DEBUG
487 enum class State {
488 Created, // Self-set. Just constructed, waiting for initial block tail.
489 InUse, // Ready to accept blocks.
490 Full, // Self-set. Blocks reach the end (or further).
491 Done, // Blocks won't be added anymore.
492 Recycled // Still full of data, but expecting an initial block tail.
495 State mState = State::Created;
496 // Transition table: (X=unexpected)
497 // Method \ State Created InUse Full Done Recycled
498 // ReserveInitialBlockAsTail InUse X X X InUse
499 // Reserve X InUse/Full X X X
500 // MarkDone X Done Done X X
501 // MarkRecycled X X X Recycled X
502 // destructor ok X X ok ok
504 const char* StateString() const {
505 switch (mState) {
506 case State::Created:
507 return "Created";
508 case State::InUse:
509 return "InUse";
510 case State::Full:
511 return "Full";
512 case State::Done:
513 return "Done";
514 case State::Recycled:
515 return "Recycled";
516 default:
517 return "?";
520 #else // DEBUG
521 const char* StateString() const { return "(non-DEBUG)"; }
522 #endif
525 InternalHeader mInternalHeader;
527 // KEEP THIS LAST!
528 // First byte of the buffer. Note that ProfileBufferChunk::Create allocates a
529 // bigger block, such that `mBuffer` is the first of `mBufferBytes` available
530 // bytes.
531 // The initialization is not strictly needed, because bytes should only be
532 // read after they have been written and `mOffsetPastLastBlock` has been
533 // updated. However:
534 // - Reviewbot complains that it's not initialized.
535 // - It's cheap to initialize one byte.
536 // - In the worst case (reading does happen), zero is not a valid entry size
537 // and should get caught in entry readers.
538 Byte mBuffer = '\0';
541 } // namespace mozilla
543 #endif // ProfileBufferChunk_h