Backed out 6 changesets (bug 1875528) for causing leakcheck failures. CLOSED TREE
[gecko.git] / ipc / glue / IdleSchedulerParent.cpp
blob7f91e9e754b1f21bcc0645abca07f65be2b04d66
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/StaticPrefs_page_load.h"
8 #include "mozilla/StaticPrefs_javascript.h"
9 #include "mozilla/Unused.h"
10 #include "mozilla/ipc/IdleSchedulerParent.h"
11 #include "mozilla/AppShutdown.h"
12 #include "mozilla/Telemetry.h"
13 #include "nsSystemInfo.h"
14 #include "nsThreadUtils.h"
15 #include "nsITimer.h"
16 #include "nsIThread.h"
18 namespace mozilla::ipc {
20 base::SharedMemory* IdleSchedulerParent::sActiveChildCounter = nullptr;
21 std::bitset<NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT>
22 IdleSchedulerParent::sInUseChildCounters;
23 LinkedList<IdleSchedulerParent> IdleSchedulerParent::sIdleAndGCRequests;
24 int32_t IdleSchedulerParent::sMaxConcurrentIdleTasksInChildProcesses = 1;
25 uint32_t IdleSchedulerParent::sMaxConcurrentGCs = 1;
26 uint32_t IdleSchedulerParent::sActiveGCs = 0;
27 uint32_t IdleSchedulerParent::sChildProcessesRunningPrioritizedOperation = 0;
28 uint32_t IdleSchedulerParent::sChildProcessesAlive = 0;
29 nsITimer* IdleSchedulerParent::sStarvationPreventer = nullptr;
31 uint32_t IdleSchedulerParent::sNumCPUs = 0;
32 uint32_t IdleSchedulerParent::sPrefConcurrentGCsMax = 0;
33 uint32_t IdleSchedulerParent::sPrefConcurrentGCsCPUDivisor = 0;
35 IdleSchedulerParent::IdleSchedulerParent() {
36 MOZ_ASSERT(!AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdownThreads));
38 sChildProcessesAlive++;
40 uint32_t max_gcs_pref =
41 StaticPrefs::javascript_options_concurrent_multiprocess_gcs_max();
42 uint32_t cpu_divisor_pref =
43 StaticPrefs::javascript_options_concurrent_multiprocess_gcs_cpu_divisor();
44 if (!max_gcs_pref) {
45 max_gcs_pref = UINT32_MAX;
47 if (!cpu_divisor_pref) {
48 cpu_divisor_pref = 4;
51 if (!sNumCPUs) {
52 // While waiting for the real logical core count behave as if there was
53 // just one core.
54 sNumCPUs = 1;
56 // nsISystemInfo can be initialized only on the main thread.
57 nsCOMPtr<nsIThread> thread = do_GetCurrentThread();
58 nsCOMPtr<nsIRunnable> runnable =
59 NS_NewRunnableFunction("cpucount getter", [thread]() {
60 ProcessInfo processInfo = {};
61 if (NS_SUCCEEDED(CollectProcessInfo(processInfo))) {
62 uint32_t num_cpus = processInfo.cpuCount;
63 // We have a new cpu count, Update the number of idle tasks.
64 if (MOZ_LIKELY(!AppShutdown::IsInOrBeyond(
65 ShutdownPhase::XPCOMShutdownThreads))) {
66 nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
67 "IdleSchedulerParent::CalculateNumIdleTasks", [num_cpus]() {
68 // We're setting this within this lambda because it's run on
69 // the correct thread and avoids a race.
70 sNumCPUs = num_cpus;
72 // This reads the sPrefConcurrentGCsMax and
73 // sPrefConcurrentGCsCPUDivisor values set below, it will
74 // run after the code that sets those.
75 CalculateNumIdleTasks();
76 });
78 thread->Dispatch(runnable, NS_DISPATCH_NORMAL);
81 });
82 NS_DispatchBackgroundTask(runnable.forget(), NS_DISPATCH_EVENT_MAY_BLOCK);
85 if (sPrefConcurrentGCsMax != max_gcs_pref ||
86 sPrefConcurrentGCsCPUDivisor != cpu_divisor_pref) {
87 // We execute this if these preferences have changed. We also want to make
88 // sure it executes for the first IdleSchedulerParent, which it does because
89 // sPrefConcurrentGCsMax and sPrefConcurrentGCsCPUDivisor are initially
90 // zero.
91 sPrefConcurrentGCsMax = max_gcs_pref;
92 sPrefConcurrentGCsCPUDivisor = cpu_divisor_pref;
94 CalculateNumIdleTasks();
98 void IdleSchedulerParent::CalculateNumIdleTasks() {
99 MOZ_ASSERT(sNumCPUs);
100 MOZ_ASSERT(sPrefConcurrentGCsMax);
101 MOZ_ASSERT(sPrefConcurrentGCsCPUDivisor);
103 // On one and two processor (or hardware thread) systems this will
104 // allow one concurrent idle task.
105 sMaxConcurrentIdleTasksInChildProcesses = int32_t(std::max(sNumCPUs, 1u));
106 sMaxConcurrentGCs =
107 std::min(std::max(sNumCPUs / sPrefConcurrentGCsCPUDivisor, 1u),
108 sPrefConcurrentGCsMax);
110 if (sActiveChildCounter && sActiveChildCounter->memory()) {
111 static_cast<Atomic<int32_t>*>(
112 sActiveChildCounter->memory())[NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] =
113 static_cast<int32_t>(sMaxConcurrentIdleTasksInChildProcesses);
115 IdleSchedulerParent::Schedule(nullptr);
118 IdleSchedulerParent::~IdleSchedulerParent() {
119 // We can't know if an active process just crashed, so we just always expect
120 // that is the case.
121 if (mChildId) {
122 sInUseChildCounters[mChildId] = false;
123 if (sActiveChildCounter && sActiveChildCounter->memory() &&
124 static_cast<Atomic<int32_t>*>(
125 sActiveChildCounter->memory())[mChildId]) {
126 --static_cast<Atomic<int32_t>*>(
127 sActiveChildCounter
128 ->memory())[NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER];
129 static_cast<Atomic<int32_t>*>(sActiveChildCounter->memory())[mChildId] =
134 if (mRunningPrioritizedOperation) {
135 --sChildProcessesRunningPrioritizedOperation;
138 if (mDoingGC) {
139 // Give back our GC token.
140 sActiveGCs--;
143 if (mRequestingGC) {
144 mRequestingGC.value()(false);
145 mRequestingGC = Nothing();
148 // Remove from the scheduler's queue.
149 if (isInList()) {
150 remove();
153 MOZ_ASSERT(sChildProcessesAlive > 0);
154 sChildProcessesAlive--;
155 if (sChildProcessesAlive == 0) {
156 MOZ_ASSERT(sIdleAndGCRequests.isEmpty());
157 delete sActiveChildCounter;
158 sActiveChildCounter = nullptr;
160 if (sStarvationPreventer) {
161 sStarvationPreventer->Cancel();
162 NS_RELEASE(sStarvationPreventer);
166 Schedule(nullptr);
169 IPCResult IdleSchedulerParent::RecvInitForIdleUse(
170 InitForIdleUseResolver&& aResolve) {
171 // This must already be non-zero, if it is zero then the cleanup code for the
172 // shared memory (initialised below) will never run. The invariant is that if
173 // the shared memory is initialsed, then this is non-zero.
174 MOZ_ASSERT(sChildProcessesAlive > 0);
176 MOZ_ASSERT(IsNotDoingIdleTask());
178 // Create a shared memory object which is shared across all the relevant
179 // processes.
180 if (!sActiveChildCounter) {
181 sActiveChildCounter = new base::SharedMemory();
182 size_t shmemSize = NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT * sizeof(int32_t);
183 if (sActiveChildCounter->Create(shmemSize) &&
184 sActiveChildCounter->Map(shmemSize)) {
185 memset(sActiveChildCounter->memory(), 0, shmemSize);
186 sInUseChildCounters[NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER] = true;
187 sInUseChildCounters[NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] = true;
188 static_cast<Atomic<int32_t>*>(
189 sActiveChildCounter
190 ->memory())[NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] =
191 static_cast<int32_t>(sMaxConcurrentIdleTasksInChildProcesses);
192 } else {
193 delete sActiveChildCounter;
194 sActiveChildCounter = nullptr;
197 Maybe<SharedMemoryHandle> activeCounter;
198 if (SharedMemoryHandle handle =
199 sActiveChildCounter ? sActiveChildCounter->CloneHandle() : nullptr) {
200 activeCounter.emplace(std::move(handle));
203 uint32_t unusedId = 0;
204 for (uint32_t i = 0; i < NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT; ++i) {
205 if (!sInUseChildCounters[i]) {
206 sInUseChildCounters[i] = true;
207 unusedId = i;
208 break;
212 // If there wasn't an empty item, we'll fallback to 0.
213 mChildId = unusedId;
215 aResolve(std::tuple<mozilla::Maybe<SharedMemoryHandle>&&, const uint32_t&>(
216 std::move(activeCounter), mChildId));
217 return IPC_OK();
220 IPCResult IdleSchedulerParent::RecvRequestIdleTime(uint64_t aId,
221 TimeDuration aBudget) {
222 MOZ_ASSERT(aBudget);
223 MOZ_ASSERT(IsNotDoingIdleTask());
225 mCurrentRequestId = aId;
226 mRequestedIdleBudget = aBudget;
228 if (!isInList()) {
229 sIdleAndGCRequests.insertBack(this);
232 Schedule(this);
233 return IPC_OK();
236 IPCResult IdleSchedulerParent::RecvIdleTimeUsed(uint64_t aId) {
237 // The client can either signal that they've used the idle time or they're
238 // canceling the request. We cannot use a seperate cancel message because it
239 // could arrive after the parent has granted the request.
240 MOZ_ASSERT(IsWaitingForIdle() || IsDoingIdleTask());
242 // The parent process will always know the ID of the current request (since
243 // the IPC channel is reliable). The IDs are provided so that the client can
244 // check them (it's possible for the client to race ahead of the server).
245 MOZ_ASSERT(mCurrentRequestId == aId);
247 if (IsWaitingForIdle() && !mRequestingGC) {
248 remove();
250 mRequestedIdleBudget = TimeDuration();
251 Schedule(nullptr);
252 return IPC_OK();
255 IPCResult IdleSchedulerParent::RecvSchedule() {
256 Schedule(nullptr);
257 return IPC_OK();
260 IPCResult IdleSchedulerParent::RecvRunningPrioritizedOperation() {
261 ++mRunningPrioritizedOperation;
262 if (mRunningPrioritizedOperation == 1) {
263 ++sChildProcessesRunningPrioritizedOperation;
265 return IPC_OK();
268 IPCResult IdleSchedulerParent::RecvPrioritizedOperationDone() {
269 MOZ_ASSERT(mRunningPrioritizedOperation);
271 --mRunningPrioritizedOperation;
272 if (mRunningPrioritizedOperation == 0) {
273 --sChildProcessesRunningPrioritizedOperation;
274 Schedule(nullptr);
276 return IPC_OK();
279 IPCResult IdleSchedulerParent::RecvRequestGC(RequestGCResolver&& aResolver) {
280 MOZ_ASSERT(!mDoingGC);
281 MOZ_ASSERT(!mRequestingGC);
283 mRequestingGC = Some(aResolver);
284 if (!isInList()) {
285 sIdleAndGCRequests.insertBack(this);
288 Schedule(nullptr);
289 return IPC_OK();
292 IPCResult IdleSchedulerParent::RecvStartedGC() {
293 if (mDoingGC) {
294 return IPC_OK();
297 mDoingGC = true;
298 sActiveGCs++;
300 if (mRequestingGC) {
301 // We have to respond to the request before dropping it, even though the
302 // content process is already doing the GC.
303 mRequestingGC.value()(true);
304 mRequestingGC = Nothing();
305 if (!IsWaitingForIdle()) {
306 remove();
310 return IPC_OK();
313 IPCResult IdleSchedulerParent::RecvDoneGC() {
314 MOZ_ASSERT(mDoingGC);
315 sActiveGCs--;
316 mDoingGC = false;
317 Schedule(nullptr);
318 return IPC_OK();
321 int32_t IdleSchedulerParent::ActiveCount() {
322 if (sActiveChildCounter) {
323 return (static_cast<Atomic<int32_t>*>(
324 sActiveChildCounter
325 ->memory())[NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER]);
327 return 0;
330 bool IdleSchedulerParent::HasSpareCycles(int32_t aActiveCount) {
331 // We can run a new task if we have a spare core. If we're running a
332 // prioritised operation we halve the number of regular spare cores.
334 // sMaxConcurrentIdleTasksInChildProcesses will always be >0 so on 1 and 2
335 // core systems this will allow 1 idle tasks (0 if running a prioritized
336 // operation).
337 MOZ_ASSERT(sMaxConcurrentIdleTasksInChildProcesses > 0);
338 return sChildProcessesRunningPrioritizedOperation
339 ? sMaxConcurrentIdleTasksInChildProcesses / 2 > aActiveCount
340 : sMaxConcurrentIdleTasksInChildProcesses > aActiveCount;
343 bool IdleSchedulerParent::HasSpareGCCycles() {
344 return sMaxConcurrentGCs > sActiveGCs;
347 void IdleSchedulerParent::SendIdleTime() {
348 // We would assert that IsWaitingForIdle() except after potentially removing
349 // the task from it's list this will return false. Instead check
350 // mRequestedIdleBudget.
351 MOZ_ASSERT(mRequestedIdleBudget);
352 Unused << SendIdleTime(mCurrentRequestId, mRequestedIdleBudget);
355 void IdleSchedulerParent::SendMayGC() {
356 MOZ_ASSERT(mRequestingGC);
357 mRequestingGC.value()(true);
358 mRequestingGC = Nothing();
359 mDoingGC = true;
360 sActiveGCs++;
363 void IdleSchedulerParent::Schedule(IdleSchedulerParent* aRequester) {
364 // Tasks won't update the active count until after they receive their message
365 // and start to run, so make a copy of it here and increment it for every task
366 // we schedule. It will become an estimate of how many tasks will be active
367 // shortly.
368 int32_t activeCount = ActiveCount();
370 if (aRequester && aRequester->mRunningPrioritizedOperation) {
371 // Prioritised operations are requested only for idle time requests, so this
372 // must be an idle time request.
373 MOZ_ASSERT(aRequester->IsWaitingForIdle());
375 // If the requester is prioritized, just let it run itself.
376 if (aRequester->isInList() && !aRequester->mRequestingGC) {
377 aRequester->remove();
379 aRequester->SendIdleTime();
380 activeCount++;
383 RefPtr<IdleSchedulerParent> idleRequester = sIdleAndGCRequests.getFirst();
385 bool has_spare_cycles = HasSpareCycles(activeCount);
386 bool has_spare_gc_cycles = HasSpareGCCycles();
388 while (idleRequester && (has_spare_cycles || has_spare_gc_cycles)) {
389 // Get the next element before potentially removing the current one from the
390 // list.
391 RefPtr<IdleSchedulerParent> next = idleRequester->getNext();
393 if (has_spare_cycles && idleRequester->IsWaitingForIdle()) {
394 // We can run an idle task.
395 activeCount++;
396 if (!idleRequester->mRequestingGC) {
397 idleRequester->remove();
399 idleRequester->SendIdleTime();
400 has_spare_cycles = HasSpareCycles(activeCount);
403 if (has_spare_gc_cycles && idleRequester->mRequestingGC) {
404 if (!idleRequester->IsWaitingForIdle()) {
405 idleRequester->remove();
407 idleRequester->SendMayGC();
408 has_spare_gc_cycles = HasSpareGCCycles();
411 idleRequester = next;
414 if (!sIdleAndGCRequests.isEmpty() && HasSpareCycles(activeCount)) {
415 EnsureStarvationTimer();
419 void IdleSchedulerParent::EnsureStarvationTimer() {
420 // Even though idle runnables aren't really guaranteed to get run ever (which
421 // is why most of them have the timer fallback), try to not let any child
422 // process' idle handling to starve forever in case other processes are busy
423 if (!sStarvationPreventer) {
424 // Reuse StaticPrefs::page_load_deprioritization_period(), since that
425 // is used on child side when deciding the minimum idle period.
426 NS_NewTimerWithFuncCallback(
427 &sStarvationPreventer, StarvationCallback, nullptr,
428 StaticPrefs::page_load_deprioritization_period(),
429 nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY, "StarvationCallback");
433 void IdleSchedulerParent::StarvationCallback(nsITimer* aTimer, void* aData) {
434 RefPtr<IdleSchedulerParent> idleRequester = sIdleAndGCRequests.getFirst();
435 while (idleRequester) {
436 if (idleRequester->IsWaitingForIdle()) {
437 // Treat the first process waiting for idle time as running prioritized
438 // operation so that it gets run.
439 ++idleRequester->mRunningPrioritizedOperation;
440 ++sChildProcessesRunningPrioritizedOperation;
441 Schedule(idleRequester);
442 --idleRequester->mRunningPrioritizedOperation;
443 --sChildProcessesRunningPrioritizedOperation;
444 break;
447 idleRequester = idleRequester->getNext();
449 NS_RELEASE(sStarvationPreventer);
452 } // namespace mozilla::ipc