1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef ProfileBufferChunkManagerWithLocalLimit_h
8 #define ProfileBufferChunkManagerWithLocalLimit_h
10 #include "BaseProfiler.h"
11 #include "mozilla/BaseProfilerDetail.h"
12 #include "mozilla/ProfileBufferChunkManager.h"
13 #include "mozilla/ProfileBufferControlledChunkManager.h"
14 #include "mozilla/mozalloc.h"
20 // Manages the Chunks for this process in a thread-safe manner, with a maximum
23 // "Unreleased" chunks are not owned here, only "released" chunks can be
24 // destroyed or recycled when reaching the memory limit, so it is theoretically
25 // possible to break that limit, if:
26 // - The user of this class doesn't release their chunks, AND/OR
27 // - The limit is too small (e.g., smaller than 2 or 3 chunks, which should be
28 // the usual number of unreleased chunks in flight).
29 // In this case, it just means that we will use more memory than allowed,
30 // potentially risking OOMs. Hopefully this shouldn't happen in real code,
31 // assuming that the user is doing the right thing and releasing chunks ASAP,
32 // and that the memory limit is reasonably large.
33 class ProfileBufferChunkManagerWithLocalLimit final
34 : public ProfileBufferChunkManager
,
35 public ProfileBufferControlledChunkManager
{
37 using Length
= ProfileBufferChunk::Length
;
39 // MaxTotalBytes: Maximum number of bytes allocated in all local Chunks.
40 // ChunkMinBufferBytes: Minimum number of user-available bytes in each Chunk.
41 // Note that Chunks use a bit more memory for their header.
42 explicit ProfileBufferChunkManagerWithLocalLimit(size_t aMaxTotalBytes
,
43 Length aChunkMinBufferBytes
)
44 : mMaxTotalBytes(aMaxTotalBytes
),
45 mChunkMinBufferBytes(aChunkMinBufferBytes
) {}
47 ~ProfileBufferChunkManagerWithLocalLimit() {
48 if (mUpdateCallback
) {
49 // Signal the end of this callback.
50 std::move(mUpdateCallback
)(Update(nullptr));
54 [[nodiscard
]] size_t MaxTotalSize() const final
{
55 // `mMaxTotalBytes` is `const` so there is no need to lock the mutex.
56 return mMaxTotalBytes
;
59 [[nodiscard
]] size_t TotalSize() const { return mTotalBytes
; }
61 [[nodiscard
]] UniquePtr
<ProfileBufferChunk
> GetChunk() final
{
62 AUTO_PROFILER_STATS(Local_GetChunk
);
64 ChunkAndUpdate chunkAndUpdate
= [&]() {
65 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
66 return GetChunk(lock
);
69 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
70 if (mUpdateCallback
&& !chunkAndUpdate
.second
.IsNotUpdate()) {
71 mUpdateCallback(std::move(chunkAndUpdate
.second
));
74 return std::move(chunkAndUpdate
.first
);
77 void RequestChunk(std::function
<void(UniquePtr
<ProfileBufferChunk
>)>&&
78 aChunkReceiver
) final
{
79 AUTO_PROFILER_STATS(Local_RequestChunk
);
80 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
82 // We already have a chunk receiver, meaning a request is pending.
85 // Store the chunk receiver. This indicates that a request is pending, and
86 // it will be handled in the next `FulfillChunkRequests()` call.
87 mChunkReceiver
= std::move(aChunkReceiver
);
90 void FulfillChunkRequests() final
{
91 AUTO_PROFILER_STATS(Local_FulfillChunkRequests
);
92 std::function
<void(UniquePtr
<ProfileBufferChunk
>)> chunkReceiver
;
93 ChunkAndUpdate chunkAndUpdate
= [&]() -> ChunkAndUpdate
{
94 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
95 if (!mChunkReceiver
) {
96 // No receiver means no pending request, we're done.
99 // Otherwise there is a request, extract the receiver to call below.
100 std::swap(chunkReceiver
, mChunkReceiver
);
101 MOZ_ASSERT(!mChunkReceiver
, "mChunkReceiver should have been emptied");
102 // And allocate the requested chunk. This may fail, it's fine, we're
103 // letting the receiver know about it.
104 AUTO_PROFILER_STATS(Local_FulfillChunkRequests_GetChunk
);
105 return GetChunk(lock
);
110 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
111 if (mUpdateCallback
&& !chunkAndUpdate
.second
.IsNotUpdate()) {
112 mUpdateCallback(std::move(chunkAndUpdate
.second
));
116 // Invoke callback outside of lock, so that it can use other chunk manager
117 // functions if needed.
118 // Note that this means there could be a race, where another request
119 // happens now and even gets fulfilled before this one is! It should be
120 // rare, and shouldn't be a problem anyway, the user will still get their
121 // requested chunks, new/recycled chunks look the same so their order
123 std::move(chunkReceiver
)(std::move(chunkAndUpdate
.first
));
127 void ReleaseChunk(UniquePtr
<ProfileBufferChunk
> aChunk
) final
{
132 MOZ_RELEASE_ASSERT(!aChunk
->GetNext(), "ReleaseChunk only accepts 1 chunk");
133 MOZ_RELEASE_ASSERT(!aChunk
->ChunkHeader().mDoneTimeStamp
.IsNull(),
134 "Released chunk should have a 'Done' timestamp");
136 Update update
= [&]() {
137 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
138 MOZ_ASSERT(mUser
, "Not registered yet");
139 // Keep a pointer to the first newly-released chunk, so we can use it to
140 // prepare an update (after `aChunk` is moved-from).
141 const ProfileBufferChunk
* const newlyReleasedChunk
= aChunk
.get();
142 // Transfer the chunk size from the unreleased bucket to the released one.
143 mUnreleasedBufferBytes
-= aChunk
->BufferBytes();
144 mReleasedBufferBytes
+= aChunk
->BufferBytes();
145 if (!mReleasedChunks
) {
146 // No other released chunks at the moment, we're starting the list.
147 MOZ_ASSERT(mReleasedBufferBytes
== aChunk
->BufferBytes());
148 mReleasedChunks
= std::move(aChunk
);
150 // Insert aChunk in mReleasedChunks to keep done-timestamp order.
151 const TimeStamp
& releasedChunkDoneTimeStamp
=
152 aChunk
->ChunkHeader().mDoneTimeStamp
;
153 if (releasedChunkDoneTimeStamp
<
154 mReleasedChunks
->ChunkHeader().mDoneTimeStamp
) {
155 // aChunk is the oldest -> Insert at the beginning.
156 aChunk
->SetLast(std::move(mReleasedChunks
));
157 mReleasedChunks
= std::move(aChunk
);
159 // Go through the already-released chunk list, and insert aChunk
160 // before the first younger released chunk, or at the end.
161 ProfileBufferChunk
* chunk
= mReleasedChunks
.get();
163 ProfileBufferChunk
* const nextChunk
= chunk
->GetNext();
164 if (!nextChunk
|| releasedChunkDoneTimeStamp
<
165 nextChunk
->ChunkHeader().mDoneTimeStamp
) {
166 // Either we're at the last released chunk, or the next released
167 // chunk is younger -> Insert right after this released chunk.
168 chunk
->InsertNext(std::move(aChunk
));
176 return Update(mUnreleasedBufferBytes
, mReleasedBufferBytes
,
177 mReleasedChunks
.get(), newlyReleasedChunk
);
180 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
181 if (mUpdateCallback
&& !update
.IsNotUpdate()) {
182 mUpdateCallback(std::move(update
));
186 void SetChunkDestroyedCallback(
187 std::function
<void(const ProfileBufferChunk
&)>&& aChunkDestroyedCallback
)
189 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
190 MOZ_ASSERT(mUser
, "Not registered yet");
191 mChunkDestroyedCallback
= std::move(aChunkDestroyedCallback
);
194 [[nodiscard
]] UniquePtr
<ProfileBufferChunk
> GetExtantReleasedChunks() final
{
195 UniquePtr
<ProfileBufferChunk
> chunks
;
196 size_t unreleasedBufferBytes
= [&]() {
197 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
198 MOZ_ASSERT(mUser
, "Not registered yet");
199 mReleasedBufferBytes
= 0;
200 chunks
= std::move(mReleasedChunks
);
201 return mUnreleasedBufferBytes
;
204 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
205 if (mUpdateCallback
) {
206 mUpdateCallback(Update(unreleasedBufferBytes
, 0, nullptr, nullptr));
212 void ForgetUnreleasedChunks() final
{
213 Update update
= [&]() {
214 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
215 MOZ_ASSERT(mUser
, "Not registered yet");
216 mUnreleasedBufferBytes
= 0;
217 return Update(0, mReleasedBufferBytes
, mReleasedChunks
.get(), nullptr);
219 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
220 if (mUpdateCallback
) {
221 mUpdateCallback(std::move(update
));
225 [[nodiscard
]] size_t SizeOfExcludingThis(
226 MallocSizeOf aMallocSizeOf
) const final
{
227 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
228 return SizeOfExcludingThis(aMallocSizeOf
, lock
);
231 [[nodiscard
]] size_t SizeOfIncludingThis(
232 MallocSizeOf aMallocSizeOf
) const final
{
233 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
234 MOZ_ASSERT(mUser
, "Not registered yet");
235 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf
, lock
);
238 void SetUpdateCallback(UpdateCallback
&& aUpdateCallback
) final
{
240 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
241 if (mUpdateCallback
) {
242 // Signal the end of the previous callback.
243 std::move(mUpdateCallback
)(Update(nullptr));
244 mUpdateCallback
= nullptr;
248 if (aUpdateCallback
) {
249 Update initialUpdate
= [&]() {
250 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
251 return Update(mUnreleasedBufferBytes
, mReleasedBufferBytes
,
252 mReleasedChunks
.get(), nullptr);
255 baseprofiler::detail::BaseProfilerAutoLock
lock(mUpdateCallbackMutex
);
256 MOZ_ASSERT(!mUpdateCallback
, "Only one update callback allowed");
257 mUpdateCallback
= std::move(aUpdateCallback
);
258 mUpdateCallback(std::move(initialUpdate
));
262 void DestroyChunksAtOrBefore(TimeStamp aDoneTimeStamp
) final
{
263 MOZ_ASSERT(!aDoneTimeStamp
.IsNull());
264 baseprofiler::detail::BaseProfilerAutoLock
lock(mMutex
);
266 if (!mReleasedChunks
) {
267 // We don't own any released chunks (anymore), we're done.
270 if (mReleasedChunks
->ChunkHeader().mDoneTimeStamp
> aDoneTimeStamp
) {
271 // The current chunk is strictly after the given timestamp, we're done.
274 // We've found a chunk at or before the timestamp, discard it.
275 DiscardOldestReleasedChunk(lock
);
280 const ProfileBufferChunk
* PeekExtantReleasedChunksAndLock() final
{
282 MOZ_ASSERT(mUser
, "Not registered yet");
283 return mReleasedChunks
.get();
285 void UnlockAfterPeekExtantReleasedChunks() final
{ mMutex
.Unlock(); }
288 size_t MaybeRecycleChunkAndGetDeallocatedSize(
289 UniquePtr
<ProfileBufferChunk
>&& chunk
,
290 const baseprofiler::detail::BaseProfilerAutoLock
& aLock
) {
291 // Try to recycle big-enough chunks. (All chunks should have the same size,
292 // but it's a cheap test and may allow future adjustments based on actual
294 if (chunk
->BufferBytes() >= mChunkMinBufferBytes
) {
295 // We keep up to two recycled chunks at any time.
296 if (!mRecycledChunks
) {
297 mRecycledChunks
= std::move(chunk
);
299 } else if (!mRecycledChunks
->GetNext()) {
300 mRecycledChunks
->InsertNext(std::move(chunk
));
304 return moz_malloc_usable_size(chunk
.get());
307 UniquePtr
<ProfileBufferChunk
> TakeRecycledChunk(
308 const baseprofiler::detail::BaseProfilerAutoLock
& aLock
) {
309 UniquePtr
<ProfileBufferChunk
> recycled
;
310 if (mRecycledChunks
) {
311 recycled
= std::exchange(mRecycledChunks
, mRecycledChunks
->ReleaseNext());
312 recycled
->MarkRecycled();
317 void DiscardOldestReleasedChunk(
318 const baseprofiler::detail::BaseProfilerAutoLock
& aLock
) {
319 MOZ_ASSERT(!!mReleasedChunks
);
320 UniquePtr
<ProfileBufferChunk
> oldest
=
321 std::exchange(mReleasedChunks
, mReleasedChunks
->ReleaseNext());
322 mReleasedBufferBytes
-= oldest
->BufferBytes();
323 if (mChunkDestroyedCallback
) {
324 // Inform the user that we're going to destroy this chunk.
325 mChunkDestroyedCallback(*oldest
);
329 MaybeRecycleChunkAndGetDeallocatedSize(std::move(oldest
), aLock
);
332 using ChunkAndUpdate
= std::pair
<UniquePtr
<ProfileBufferChunk
>, Update
>;
333 [[nodiscard
]] ChunkAndUpdate
GetChunk(
334 const baseprofiler::detail::BaseProfilerAutoLock
& aLock
) {
335 MOZ_ASSERT(mUser
, "Not registered yet");
336 // After this function, the total memory consumption will be the sum of:
337 // - Bytes from released (i.e., full) chunks,
338 // - Bytes from unreleased (still in use) chunks,
339 // - Bytes from the chunk we want to create/recycle. (Note that we don't
340 // count the extra bytes of chunk header, and of extra allocation ability,
341 // for the new chunk, as it's assumed to be negligible compared to the
342 // total memory limit.)
343 // If this total is higher than the local limit, we'll want to destroy
344 // the oldest released chunks until we're under the limit; if any, we may
345 // recycle one of them to avoid a deallocation followed by an allocation.
346 while (mReleasedBufferBytes
+ mUnreleasedBufferBytes
+
347 mChunkMinBufferBytes
>=
350 // We have reached the local limit, discard the oldest released chunk.
351 DiscardOldestReleasedChunk(aLock
);
354 // Extract the recycled chunk, if any.
355 ChunkAndUpdate chunkAndUpdate
{TakeRecycledChunk(aLock
), Update()};
356 UniquePtr
<ProfileBufferChunk
>& chunk
= chunkAndUpdate
.first
;
359 // No recycled chunk -> Create a chunk now. (This could still fail.)
360 chunk
= ProfileBufferChunk::Create(mChunkMinBufferBytes
);
361 mTotalBytes
+= moz_malloc_usable_size(chunk
.get());
365 // We do have a chunk (recycled or new), record its size as "unreleased".
366 mUnreleasedBufferBytes
+= chunk
->BufferBytes();
368 chunkAndUpdate
.second
=
369 Update(mUnreleasedBufferBytes
, mReleasedBufferBytes
,
370 mReleasedChunks
.get(), nullptr);
373 return chunkAndUpdate
;
376 [[nodiscard
]] size_t SizeOfExcludingThis(
377 MallocSizeOf aMallocSizeOf
,
378 const baseprofiler::detail::BaseProfilerAutoLock
&) const {
379 MOZ_ASSERT(mUser
, "Not registered yet");
381 if (mReleasedChunks
) {
382 size
+= mReleasedChunks
->SizeOfIncludingThis(aMallocSizeOf
);
384 if (mRecycledChunks
) {
385 size
+= mRecycledChunks
->SizeOfIncludingThis(aMallocSizeOf
);
387 // Note: Missing size of std::function external resources (if any).
391 // Maxumum number of bytes that should be used by all unreleased and released
392 // chunks. Note that only released chunks can be destroyed here, so it is the
393 // responsibility of the user to properly release their chunks when possible.
394 const size_t mMaxTotalBytes
;
396 // Minimum number of bytes that new chunks should be able to store.
397 // Used when calling `ProfileBufferChunk::Create()`.
398 const Length mChunkMinBufferBytes
;
400 // Mutex guarding the following members.
401 mutable baseprofiler::detail::BaseProfilerMutex mMutex
;
403 // Number of bytes currently held in chunks that have been given away (through
404 // `GetChunk` or `RequestChunk`) and not released yet.
405 size_t mUnreleasedBufferBytes
= 0;
407 // Number of bytes currently held in chunks that have been released and stored
408 // in `mReleasedChunks` below.
409 size_t mReleasedBufferBytes
= 0;
411 // Total allocated size (used to substract it from memory counters).
412 size_t mTotalBytes
= 0;
414 // List of all released chunks. The oldest one should be at the start of the
415 // list, and may be destroyed or recycled when the memory limit is reached.
416 UniquePtr
<ProfileBufferChunk
> mReleasedChunks
;
418 // This may hold chunks that were released then slated for destruction, they
419 // will be reused next time an allocation would have been needed.
420 UniquePtr
<ProfileBufferChunk
> mRecycledChunks
;
422 // Optional callback used to notify the user when a chunk is about to be
423 // destroyed or recycled. (The data content is always destroyed, but the chunk
424 // container may be reused.)
425 std::function
<void(const ProfileBufferChunk
&)> mChunkDestroyedCallback
;
427 // Callback set from `RequestChunk()`, until it is serviced in
428 // `FulfillChunkRequests()`. There can only be one request in flight.
429 std::function
<void(UniquePtr
<ProfileBufferChunk
>)> mChunkReceiver
;
431 // Separate mutex guarding mUpdateCallback, so that it may be invoked outside
432 // of the main buffer `mMutex`.
433 mutable baseprofiler::detail::BaseProfilerMutex mUpdateCallbackMutex
;
435 UpdateCallback mUpdateCallback
;
438 } // namespace mozilla
440 #endif // ProfileBufferChunkManagerWithLocalLimit_h