Bug 1874684 - Part 4: Prefer const references instead of copying Instant values....
[gecko.git] / xpcom / threads / TaskController.cpp
blobb9559bdb74a463e329bddd6b175dabf9d62a38b5
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "TaskController.h"
8 #include "nsIIdleRunnable.h"
9 #include "nsIRunnable.h"
10 #include "nsThreadUtils.h"
11 #include <algorithm>
12 #include "GeckoProfiler.h"
13 #include "mozilla/BackgroundHangMonitor.h"
14 #include "mozilla/EventQueue.h"
15 #include "mozilla/Hal.h"
16 #include "mozilla/InputTaskManager.h"
17 #include "mozilla/VsyncTaskManager.h"
18 #include "mozilla/IOInterposer.h"
19 #include "mozilla/Perfetto.h"
20 #include "mozilla/StaticPtr.h"
21 #include "mozilla/SchedulerGroup.h"
22 #include "mozilla/ScopeExit.h"
23 #include "nsIThreadInternal.h"
24 #include "nsThread.h"
25 #include "prenv.h"
26 #include "prsystem.h"
28 namespace mozilla {
30 StaticAutoPtr<TaskController> TaskController::sSingleton;
32 thread_local size_t mThreadPoolIndex = -1;
33 std::atomic<uint64_t> Task::sCurrentTaskSeqNo = 0;
35 const int32_t kMinimumPoolThreadCount = 2;
36 const int32_t kMaximumPoolThreadCount = 8;
38 /* static */
39 int32_t TaskController::GetPoolThreadCount() {
40 if (PR_GetEnv("MOZ_TASKCONTROLLER_THREADCOUNT")) {
41 return strtol(PR_GetEnv("MOZ_TASKCONTROLLER_THREADCOUNT"), nullptr, 0);
44 int32_t numCores = 0;
45 #if defined(XP_MACOSX) && defined(__aarch64__)
46 if (const auto& cpuInfo = hal::GetHeterogeneousCpuInfo()) {
47 // -1 because of the main thread.
48 numCores = cpuInfo->mBigCpus.Count() + cpuInfo->mMediumCpus.Count() - 1;
49 } else
50 #endif
52 numCores = std::max<int32_t>(1, PR_GetNumberOfProcessors());
55 return std::clamp<int32_t>(numCores, kMinimumPoolThreadCount,
56 kMaximumPoolThreadCount);
59 #if defined(MOZ_COLLECTING_RUNNABLE_TELEMETRY)
61 struct TaskMarker : BaseMarkerType<TaskMarker> {
62 static constexpr const char* Name = "Task";
63 static constexpr const char* Description =
64 "Marker representing a task being executed in TaskController.";
66 using MS = MarkerSchema;
67 static constexpr MS::PayloadField PayloadFields[] = {
68 {"name", MS::InputType::CString, "Task Name", MS::Format::String,
69 MS::PayloadFlags::Searchable},
70 {"priority", MS::InputType::Uint32, "Priority level",
71 MS::Format::Integer},
72 {"priorityName", MS::InputType::CString, "Priority Name"}};
74 static constexpr MS::Location Locations[] = {MS::Location::MarkerChart,
75 MS::Location::MarkerTable};
76 static constexpr const char* ChartLabel = "{marker.data.name}";
77 static constexpr const char* TableLabel =
78 "{marker.name} - {marker.data.name} - priority: "
79 "{marker.data.priorityName} ({marker.data.priority})";
81 static constexpr MS::ETWMarkerGroup Group = MS::ETWMarkerGroup::Scheduling;
83 static void TranslateMarkerInputToSchema(void* aContext,
84 const nsCString& aName,
85 uint32_t aPriority) {
86 ETW::OutputMarkerSchema(aContext, TaskMarker{}, aName, aPriority,
87 ProfilerStringView(""));
90 static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter,
91 const nsCString& aName, uint32_t aPriority) {
92 aWriter.StringProperty("name", aName);
93 aWriter.IntProperty("priority", aPriority);
95 # define EVENT_PRIORITY(NAME, VALUE) \
96 if (aPriority == (VALUE)) { \
97 aWriter.StringProperty("priorityName", #NAME); \
98 } else
99 EVENT_QUEUE_PRIORITY_LIST(EVENT_PRIORITY)
100 # undef EVENT_PRIORITY
102 aWriter.StringProperty("priorityName", "Invalid Value");
107 class MOZ_RAII AutoProfileTask {
108 public:
109 explicit AutoProfileTask(nsACString& aName, uint64_t aPriority)
110 : mName(aName), mPriority(aPriority) {
111 if (profiler_is_collecting_markers()) {
112 mStartTime = TimeStamp::Now();
116 ~AutoProfileTask() {
117 if (!profiler_thread_is_being_profiled_for_markers()) {
118 return;
121 AUTO_PROFILER_LABEL("AutoProfileTask", PROFILER);
122 AUTO_PROFILER_STATS(AUTO_PROFILE_TASK);
123 profiler_add_marker("Runnable", ::mozilla::baseprofiler::category::OTHER,
124 mStartTime.IsNull()
125 ? MarkerTiming::IntervalEnd()
126 : MarkerTiming::IntervalUntilNowFrom(mStartTime),
127 TaskMarker{}, mName, mPriority);
130 private:
131 TimeStamp mStartTime;
132 nsAutoCString mName;
133 uint32_t mPriority;
136 # define AUTO_PROFILE_FOLLOWING_TASK(task) \
137 nsAutoCString name; \
138 (task)->GetName(name); \
139 PERFETTO_TRACE_EVENT("task", perfetto::DynamicString{name.get()}); \
140 AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING_NONSENSITIVE("Task", OTHER, name); \
141 mozilla::AutoProfileTask PROFILER_RAII(name, (task)->GetPriority());
142 #else
143 # define AUTO_PROFILE_FOLLOWING_TASK(task)
144 #endif
146 bool TaskManager::
147 UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
148 const MutexAutoLock& aProofOfLock, IterationType aIterationType) {
149 mCurrentSuspended = IsSuspended(aProofOfLock);
151 if (aIterationType == IterationType::EVENT_LOOP_TURN && !mCurrentSuspended) {
152 int32_t oldModifier = mCurrentPriorityModifier;
153 mCurrentPriorityModifier =
154 GetPriorityModifierForEventLoopTurn(aProofOfLock);
156 if (mCurrentPriorityModifier != oldModifier) {
157 return true;
160 return false;
163 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
164 class MOZ_RAII AutoSetMainThreadRunnableName {
165 public:
166 explicit AutoSetMainThreadRunnableName(const nsCString& aName) {
167 MOZ_ASSERT(NS_IsMainThread());
168 // We want to record our current runnable's name in a static so
169 // that BHR can record it.
170 mRestoreRunnableName = nsThread::sMainThreadRunnableName;
172 // Copy the name into sMainThreadRunnableName's buffer, and append a
173 // terminating null.
174 uint32_t length = std::min((uint32_t)nsThread::kRunnableNameBufSize - 1,
175 (uint32_t)aName.Length());
176 memcpy(nsThread::sMainThreadRunnableName.begin(), aName.BeginReading(),
177 length);
178 nsThread::sMainThreadRunnableName[length] = '\0';
181 ~AutoSetMainThreadRunnableName() {
182 nsThread::sMainThreadRunnableName = mRestoreRunnableName;
185 private:
186 Array<char, nsThread::kRunnableNameBufSize> mRestoreRunnableName;
188 #endif
190 Task* Task::GetHighestPriorityDependency() {
191 Task* currentTask = this;
193 while (!currentTask->mDependencies.empty()) {
194 auto iter = currentTask->mDependencies.begin();
196 while (iter != currentTask->mDependencies.end()) {
197 if ((*iter)->mCompleted) {
198 auto oldIter = iter;
199 iter++;
200 // Completed tasks are removed here to prevent needlessly keeping them
201 // alive or iterating over them in the future.
202 currentTask->mDependencies.erase(oldIter);
203 continue;
206 currentTask = iter->get();
207 break;
211 return currentTask == this ? nullptr : currentTask;
214 void TaskController::Initialize() {
215 MOZ_ASSERT(!sSingleton);
216 sSingleton = new TaskController();
219 void ThreadFuncPoolThread(void* aIndex) {
220 mThreadPoolIndex = *reinterpret_cast<int32_t*>(aIndex);
221 delete reinterpret_cast<int32_t*>(aIndex);
222 TaskController::Get()->RunPoolThread();
225 TaskController::TaskController()
226 : mGraphMutex("TaskController::mGraphMutex"),
227 mThreadPoolCV(mGraphMutex, "TaskController::mThreadPoolCV"),
228 mMainThreadCV(mGraphMutex, "TaskController::mMainThreadCV"),
229 mRunOutOfMTTasksCounter(0) {
230 InputTaskManager::Init();
231 VsyncTaskManager::Init();
232 mMTProcessingRunnable = NS_NewRunnableFunction(
233 "TaskController::ExecutePendingMTTasks()",
234 []() { TaskController::Get()->ProcessPendingMTTask(); });
235 mMTBlockingProcessingRunnable = NS_NewRunnableFunction(
236 "TaskController::ExecutePendingMTTasks()",
237 []() { TaskController::Get()->ProcessPendingMTTask(true); });
240 // We want our default stack size limit to be approximately 2MB, to be safe for
241 // JS helper tasks that can use a lot of stack, but expect most threads to use
242 // much less. On Linux, however, requesting a stack of 2MB or larger risks the
243 // kernel allocating an entire 2MB huge page for it on first access, which we do
244 // not want. To avoid this possibility, we subtract 2 standard VM page sizes
245 // from our default.
246 constexpr PRUint32 sBaseStackSize = 2048 * 1024 - 2 * 4096;
248 // TSan enforces a minimum stack size that's just slightly larger than our
249 // default helper stack size. It does this to store blobs of TSan-specific data
250 // on each thread's stack. Unfortunately, that means that even though we'll
251 // actually receive a larger stack than we requested, the effective usable space
252 // of that stack is significantly less than what we expect. To offset TSan
253 // stealing our stack space from underneath us, double the default.
255 // Similarly, ASan requires more stack space due to red-zones.
256 #if defined(MOZ_TSAN) || defined(MOZ_ASAN)
257 constexpr PRUint32 sStackSize = 2 * sBaseStackSize;
258 #else
259 constexpr PRUint32 sStackSize = sBaseStackSize;
260 #endif
262 void TaskController::InitializeThreadPool() {
263 mPoolInitializationMutex.AssertCurrentThreadOwns();
264 MOZ_ASSERT(!mThreadPoolInitialized);
265 mThreadPoolInitialized = true;
267 int32_t poolSize = GetPoolThreadCount();
268 for (int32_t i = 0; i < poolSize; i++) {
269 int32_t* index = new int32_t(i);
270 mPoolThreads.push_back(
271 {PR_CreateThread(PR_USER_THREAD, ThreadFuncPoolThread, index,
272 PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
273 PR_JOINABLE_THREAD, sStackSize),
274 nullptr});
278 /* static */
279 size_t TaskController::GetThreadStackSize() { return sStackSize; }
281 void TaskController::SetPerformanceCounterState(
282 PerformanceCounterState* aPerformanceCounterState) {
283 mPerformanceCounterState = aPerformanceCounterState;
286 /* static */
287 void TaskController::Shutdown() {
288 InputTaskManager::Cleanup();
289 VsyncTaskManager::Cleanup();
290 if (sSingleton) {
291 sSingleton->ShutdownThreadPoolInternal();
292 sSingleton = nullptr;
294 MOZ_ASSERT(!sSingleton);
297 void TaskController::ShutdownThreadPoolInternal() {
299 // Prevent race condition on mShuttingDown and wait.
300 MutexAutoLock lock(mGraphMutex);
301 mShuttingDown = true;
302 mThreadPoolCV.NotifyAll();
304 for (PoolThread& thread : mPoolThreads) {
305 PR_JoinThread(thread.mThread);
309 void TaskController::RunPoolThread() {
310 IOInterposer::RegisterCurrentThread();
312 // This is used to hold on to a task to make sure it is released outside the
313 // lock. This is required since it's perfectly feasible for task destructors
314 // to post events themselves.
315 RefPtr<Task> lastTask;
317 nsAutoCString threadName;
318 threadName.AppendLiteral("TaskController #");
319 threadName.AppendInt(static_cast<int64_t>(mThreadPoolIndex));
320 AUTO_PROFILER_REGISTER_THREAD(threadName.BeginReading());
322 MutexAutoLock lock(mGraphMutex);
323 while (true) {
324 bool ranTask = false;
326 if (!mThreadableTasks.empty()) {
327 for (auto iter = mThreadableTasks.begin(); iter != mThreadableTasks.end();
328 ++iter) {
329 // Search for the highest priority dependency of the highest priority
330 // task.
332 // We work with rawptrs to avoid needless refcounting. All our tasks
333 // are always kept alive by the graph. If one is removed from the graph
334 // it is kept alive by mPoolThreads[mThreadPoolIndex].mCurrentTask.
335 Task* task = iter->get();
337 MOZ_ASSERT(!task->mTaskManager);
339 mPoolThreads[mThreadPoolIndex].mEffectiveTaskPriority =
340 task->GetPriority();
342 Task* nextTask;
343 while ((nextTask = task->GetHighestPriorityDependency())) {
344 task = nextTask;
347 if (task->GetKind() == Task::Kind::MainThreadOnly ||
348 task->mInProgress) {
349 continue;
352 mPoolThreads[mThreadPoolIndex].mCurrentTask = task;
353 mThreadableTasks.erase(task->mIterator);
354 task->mIterator = mThreadableTasks.end();
355 task->mInProgress = true;
357 if (!mThreadableTasks.empty()) {
358 // Ensure at least one additional thread is woken up if there are
359 // more threadable tasks to process. Notifying all threads at once
360 // isn't actually better for performance since they all need the
361 // GraphMutex to proceed anyway.
362 mThreadPoolCV.Notify();
365 bool taskCompleted = false;
367 MutexAutoUnlock unlock(mGraphMutex);
368 lastTask = nullptr;
369 AUTO_PROFILE_FOLLOWING_TASK(task);
370 taskCompleted = task->Run() == Task::TaskResult::Complete;
371 ranTask = true;
374 task->mInProgress = false;
376 if (!taskCompleted) {
377 // Presumably this task was interrupted, leave its dependencies
378 // unresolved and reinsert into the queue.
379 auto insertion = mThreadableTasks.insert(
380 mPoolThreads[mThreadPoolIndex].mCurrentTask);
381 MOZ_ASSERT(insertion.second);
382 task->mIterator = insertion.first;
383 } else {
384 task->mCompleted = true;
385 #ifdef DEBUG
386 task->mIsInGraph = false;
387 #endif
388 task->mDependencies.clear();
389 // This may have unblocked a main thread task. We could do this only
390 // if there was a main thread task before this one in the dependency
391 // chain.
392 mMayHaveMainThreadTask = true;
393 // Since this could have multiple dependencies thare are restricted
394 // to the main thread. Let's make sure that's awake.
395 EnsureMainThreadTasksScheduled();
397 MaybeInterruptTask(GetHighestPriorityMTTask());
400 // Store last task for release next time we release the lock or enter
401 // wait state.
402 lastTask = mPoolThreads[mThreadPoolIndex].mCurrentTask.forget();
403 break;
407 // Ensure the last task is released before we enter the wait state.
408 if (lastTask) {
409 MutexAutoUnlock unlock(mGraphMutex);
410 lastTask = nullptr;
412 // Run another loop iteration, while we were unlocked there was an
413 // opportunity for another task to be posted or shutdown to be initiated.
414 continue;
417 if (!ranTask) {
418 if (mShuttingDown) {
419 IOInterposer::UnregisterCurrentThread();
420 MOZ_ASSERT(mThreadableTasks.empty());
421 return;
424 AUTO_PROFILER_LABEL("TaskController::RunPoolThread", IDLE);
425 mThreadPoolCV.Wait();
430 void TaskController::AddTask(already_AddRefed<Task>&& aTask) {
431 RefPtr<Task> task(aTask);
433 if (task->GetKind() == Task::Kind::OffMainThreadOnly) {
434 MutexAutoLock lock(mPoolInitializationMutex);
435 if (!mThreadPoolInitialized) {
436 InitializeThreadPool();
440 MutexAutoLock lock(mGraphMutex);
442 if (TaskManager* manager = task->GetManager()) {
443 if (manager->mTaskCount == 0) {
444 mTaskManagers.insert(manager);
446 manager->DidQueueTask();
448 // Set this here since if this manager's priority modifier doesn't change
449 // we will not reprioritize when iterating over the queue.
450 task->mPriorityModifier = manager->mCurrentPriorityModifier;
453 if (profiler_is_active_and_unpaused()) {
454 task->mInsertionTime = TimeStamp::Now();
457 #ifdef DEBUG
458 task->mIsInGraph = true;
460 for (const RefPtr<Task>& otherTask : task->mDependencies) {
461 MOZ_ASSERT(!otherTask->mTaskManager ||
462 otherTask->mTaskManager == task->mTaskManager);
464 #endif
466 LogTask::LogDispatch(task);
468 std::pair<std::set<RefPtr<Task>, Task::PriorityCompare>::iterator, bool>
469 insertion;
470 switch (task->GetKind()) {
471 case Task::Kind::MainThreadOnly:
472 if (task->GetPriority() >=
473 static_cast<uint32_t>(EventQueuePriority::Normal) &&
474 !mMainThreadTasks.empty()) {
475 insertion = std::pair(
476 mMainThreadTasks.insert(--mMainThreadTasks.end(), std::move(task)),
477 true);
478 } else {
479 insertion = mMainThreadTasks.insert(std::move(task));
481 break;
482 case Task::Kind::OffMainThreadOnly:
483 insertion = mThreadableTasks.insert(std::move(task));
484 break;
486 (*insertion.first)->mIterator = insertion.first;
487 MOZ_ASSERT(insertion.second);
489 MaybeInterruptTask(*insertion.first);
492 void TaskController::WaitForTaskOrMessage() {
493 MutexAutoLock lock(mGraphMutex);
494 while (!mMayHaveMainThreadTask) {
495 AUTO_PROFILER_LABEL("TaskController::WaitForTaskOrMessage", IDLE);
496 mMainThreadCV.Wait();
500 void TaskController::ExecuteNextTaskOnlyMainThread() {
501 MOZ_ASSERT(NS_IsMainThread());
502 MutexAutoLock lock(mGraphMutex);
503 ExecuteNextTaskOnlyMainThreadInternal(lock);
506 void TaskController::ProcessPendingMTTask(bool aMayWait) {
507 MOZ_ASSERT(NS_IsMainThread());
508 MutexAutoLock lock(mGraphMutex);
510 for (;;) {
511 // We only ever process one event here. However we may sometimes
512 // not actually process a real event because of suspended tasks.
513 // This loop allows us to wait until we've processed something
514 // in that scenario.
516 mMTTaskRunnableProcessedTask = ExecuteNextTaskOnlyMainThreadInternal(lock);
518 if (mMTTaskRunnableProcessedTask || !aMayWait) {
519 break;
522 #ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
523 // Unlock before calling into the BackgroundHangMonitor API as it uses
524 // the timer API.
526 MutexAutoUnlock unlock(mGraphMutex);
527 BackgroundHangMonitor().NotifyWait();
529 #endif
532 // ProcessNextEvent will also have attempted to wait, however we may have
533 // given it a Runnable when all the tasks in our task graph were suspended
534 // but we weren't able to cheaply determine that.
535 AUTO_PROFILER_LABEL("TaskController::ProcessPendingMTTask", IDLE);
536 mMainThreadCV.Wait();
539 #ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
541 MutexAutoUnlock unlock(mGraphMutex);
542 BackgroundHangMonitor().NotifyActivity();
544 #endif
547 if (mMayHaveMainThreadTask) {
548 EnsureMainThreadTasksScheduled();
552 void TaskController::ReprioritizeTask(Task* aTask, uint32_t aPriority) {
553 MutexAutoLock lock(mGraphMutex);
554 std::set<RefPtr<Task>, Task::PriorityCompare>* queue = &mMainThreadTasks;
555 if (aTask->GetKind() == Task::Kind::OffMainThreadOnly) {
556 queue = &mThreadableTasks;
559 MOZ_ASSERT(aTask->mIterator != queue->end());
560 queue->erase(aTask->mIterator);
562 aTask->mPriority = aPriority;
564 auto insertion = queue->insert(aTask);
565 MOZ_ASSERT(insertion.second);
566 aTask->mIterator = insertion.first;
568 MaybeInterruptTask(aTask);
571 // Code supporting runnable compatibility.
572 // Task that wraps a runnable.
573 class RunnableTask : public Task {
574 public:
575 RunnableTask(already_AddRefed<nsIRunnable>&& aRunnable, int32_t aPriority,
576 Kind aKind)
577 : Task(aKind, aPriority), mRunnable(aRunnable) {}
579 virtual TaskResult Run() override {
580 mRunnable->Run();
581 mRunnable = nullptr;
582 return TaskResult::Complete;
585 void SetIdleDeadline(TimeStamp aDeadline) override {
586 nsCOMPtr<nsIIdleRunnable> idleRunnable = do_QueryInterface(mRunnable);
587 if (idleRunnable) {
588 idleRunnable->SetDeadline(aDeadline);
592 virtual bool GetName(nsACString& aName) override {
593 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
594 if (nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable)) {
595 MOZ_ALWAYS_TRUE(NS_SUCCEEDED(named->GetName(aName)));
596 } else {
597 aName.AssignLiteral("non-nsINamed runnable");
599 if (aName.IsEmpty()) {
600 aName.AssignLiteral("anonymous runnable");
602 return true;
603 #else
604 return false;
605 #endif
608 private:
609 RefPtr<nsIRunnable> mRunnable;
612 void TaskController::DispatchRunnable(already_AddRefed<nsIRunnable>&& aRunnable,
613 uint32_t aPriority,
614 TaskManager* aManager) {
615 RefPtr<RunnableTask> task = new RunnableTask(std::move(aRunnable), aPriority,
616 Task::Kind::MainThreadOnly);
618 task->SetManager(aManager);
619 TaskController::Get()->AddTask(task.forget());
622 nsIRunnable* TaskController::GetRunnableForMTTask(bool aReallyWait) {
623 MutexAutoLock lock(mGraphMutex);
625 while (mMainThreadTasks.empty()) {
626 if (!aReallyWait) {
627 return nullptr;
630 AUTO_PROFILER_LABEL("TaskController::GetRunnableForMTTask::Wait", IDLE);
631 mMainThreadCV.Wait();
634 return aReallyWait ? mMTBlockingProcessingRunnable : mMTProcessingRunnable;
637 bool TaskController::HasMainThreadPendingTasks() {
638 MOZ_ASSERT(NS_IsMainThread());
639 auto resetIdleState = MakeScopeExit([&idleManager = mIdleTaskManager] {
640 if (idleManager) {
641 idleManager->State().ClearCachedIdleDeadline();
645 for (bool considerIdle : {false, true}) {
646 if (considerIdle && !mIdleTaskManager) {
647 continue;
650 MutexAutoLock lock(mGraphMutex);
652 if (considerIdle) {
653 mIdleTaskManager->State().ForgetPendingTaskGuarantee();
654 // Temporarily unlock so we can peek our idle deadline.
655 // XXX We could do this _before_ we take the lock if the API would let us.
656 // We do want to do this before looking at mMainThreadTasks, in case
657 // someone adds one while we're unlocked.
659 MutexAutoUnlock unlock(mGraphMutex);
660 mIdleTaskManager->State().CachePeekedIdleDeadline(unlock);
664 // Return early if there's no tasks at all.
665 if (mMainThreadTasks.empty()) {
666 return false;
669 // We can cheaply count how many tasks are suspended.
670 uint64_t totalSuspended = 0;
671 for (TaskManager* manager : mTaskManagers) {
672 DebugOnly<bool> modifierChanged =
673 manager
674 ->UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
675 lock, TaskManager::IterationType::NOT_EVENT_LOOP_TURN);
676 MOZ_ASSERT(!modifierChanged);
678 // The idle manager should be suspended unless we're doing the idle pass.
679 MOZ_ASSERT(manager != mIdleTaskManager || manager->mCurrentSuspended ||
680 considerIdle,
681 "Why are idle tasks not suspended here?");
683 if (manager->mCurrentSuspended) {
684 // XXX - If managers manage off-main-thread tasks this breaks! This
685 // scenario is explicitly not supported.
687 // This is only incremented inside the lock -or- decremented on the main
688 // thread so this is safe.
689 totalSuspended += manager->mTaskCount;
693 // This would break down if we have a non-suspended task depending on a
694 // suspended task. This is why for the moment we do not allow tasks
695 // to be dependent on tasks managed by another taskmanager.
696 if (mMainThreadTasks.size() > totalSuspended) {
697 // If mIdleTaskManager->mTaskCount is 0, we never updated the suspended
698 // state of mIdleTaskManager above, hence shouldn't even check it here.
699 // But in that case idle tasks are not contributing to our suspended task
700 // count anyway.
701 if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
702 !mIdleTaskManager->mCurrentSuspended) {
703 MOZ_ASSERT(considerIdle, "Why is mIdleTaskManager not suspended?");
704 // Check whether the idle tasks were really needed to make our "we have
705 // an unsuspended task" decision. If they were, we need to force-enable
706 // idle tasks until we run our next task.
707 if (mMainThreadTasks.size() - mIdleTaskManager->mTaskCount <=
708 totalSuspended) {
709 mIdleTaskManager->State().EnforcePendingTaskGuarantee();
712 return true;
715 return false;
718 uint64_t TaskController::PendingMainthreadTaskCountIncludingSuspended() {
719 MutexAutoLock lock(mGraphMutex);
720 return mMainThreadTasks.size();
723 bool TaskController::ExecuteNextTaskOnlyMainThreadInternal(
724 const MutexAutoLock& aProofOfLock) {
725 MOZ_ASSERT(NS_IsMainThread());
726 mGraphMutex.AssertCurrentThreadOwns();
727 // Block to make it easier to jump to our cleanup.
728 bool taskRan = false;
729 do {
730 taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock);
731 if (taskRan) {
732 if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
733 mIdleTaskManager->IsSuspended(aProofOfLock)) {
734 uint32_t activeTasks = mMainThreadTasks.size();
735 for (TaskManager* manager : mTaskManagers) {
736 if (manager->IsSuspended(aProofOfLock)) {
737 activeTasks -= manager->mTaskCount;
738 } else {
739 break;
743 if (!activeTasks) {
744 // We have only idle (and maybe other suspended) tasks left, so need
745 // to update the idle state. We need to temporarily release the lock
746 // while we do that.
747 MutexAutoUnlock unlock(mGraphMutex);
748 mIdleTaskManager->State().RequestIdleDeadlineIfNeeded(unlock);
751 break;
754 if (!mIdleTaskManager) {
755 break;
758 if (mIdleTaskManager->mTaskCount) {
759 // We have idle tasks that we may not have gotten above because
760 // our idle state is not up to date. We need to update the idle state
761 // and try again. We need to temporarily release the lock while we do
762 // that.
763 MutexAutoUnlock unlock(mGraphMutex);
764 mIdleTaskManager->State().UpdateCachedIdleDeadline(unlock);
765 } else {
766 MutexAutoUnlock unlock(mGraphMutex);
767 mIdleTaskManager->State().RanOutOfTasks(unlock);
770 // When we unlocked, someone may have queued a new task on us. So try to
771 // see whether we can run things again.
772 taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock);
773 } while (false);
775 if (mIdleTaskManager) {
776 // The pending task guarantee is not needed anymore, since we just tried
777 // running a task
778 mIdleTaskManager->State().ForgetPendingTaskGuarantee();
780 if (mMainThreadTasks.empty()) {
781 ++mRunOutOfMTTasksCounter;
783 // XXX the IdlePeriodState API demands we have a MutexAutoUnlock for it.
784 // Otherwise we could perhaps just do this after we exit the locked block,
785 // by pushing the lock down into this method. Though it's not clear that
786 // we could check mMainThreadTasks.size() once we unlock, and whether we
787 // could maybe substitute mMayHaveMainThreadTask for that check.
788 MutexAutoUnlock unlock(mGraphMutex);
789 mIdleTaskManager->State().RanOutOfTasks(unlock);
793 return taskRan;
796 bool TaskController::DoExecuteNextTaskOnlyMainThreadInternal(
797 const MutexAutoLock& aProofOfLock) {
798 mGraphMutex.AssertCurrentThreadOwns();
800 nsCOMPtr<nsIThread> mainIThread;
801 NS_GetMainThread(getter_AddRefs(mainIThread));
803 nsThread* mainThread = static_cast<nsThread*>(mainIThread.get());
804 if (mainThread) {
805 mainThread->SetRunningEventDelay(TimeDuration(), TimeStamp());
808 uint32_t totalSuspended = 0;
809 for (TaskManager* manager : mTaskManagers) {
810 bool modifierChanged =
811 manager
812 ->UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
813 aProofOfLock, TaskManager::IterationType::EVENT_LOOP_TURN);
814 if (modifierChanged) {
815 ProcessUpdatedPriorityModifier(manager);
817 if (manager->mCurrentSuspended) {
818 totalSuspended += manager->mTaskCount;
822 MOZ_ASSERT(mMainThreadTasks.size() >= totalSuspended);
824 // This would break down if we have a non-suspended task depending on a
825 // suspended task. This is why for the moment we do not allow tasks
826 // to be dependent on tasks managed by another taskmanager.
827 if (mMainThreadTasks.size() > totalSuspended) {
828 for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();
829 iter++) {
830 Task* task = iter->get();
832 if (task->mTaskManager && task->mTaskManager->mCurrentSuspended) {
833 // Even though we may want to run some dependencies of this task, we
834 // will run them at their own priority level and not the priority
835 // level of their dependents.
836 continue;
839 task = GetFinalDependency(task);
841 if (task->GetKind() == Task::Kind::OffMainThreadOnly ||
842 task->mInProgress ||
843 (task->mTaskManager && task->mTaskManager->mCurrentSuspended)) {
844 continue;
847 mCurrentTasksMT.push(task);
848 mMainThreadTasks.erase(task->mIterator);
849 task->mIterator = mMainThreadTasks.end();
850 task->mInProgress = true;
851 TaskManager* manager = task->GetManager();
852 bool result = false;
855 MutexAutoUnlock unlock(mGraphMutex);
856 if (manager) {
857 manager->WillRunTask();
858 if (manager != mIdleTaskManager) {
859 // Notify the idle period state that we're running a non-idle task.
860 // This needs to happen while our mutex is not locked!
861 mIdleTaskManager->State().FlagNotIdle();
862 } else {
863 TimeStamp idleDeadline =
864 mIdleTaskManager->State().GetCachedIdleDeadline();
865 MOZ_ASSERT(
866 idleDeadline,
867 "How can we not have a deadline if our manager is enabled?");
868 task->SetIdleDeadline(idleDeadline);
871 if (mIdleTaskManager) {
872 // We found a task to run; we can clear the idle deadline on our idle
873 // task manager. This _must_ be done before we actually run the task,
874 // because running the task could reenter via spinning the event loop
875 // and we want to make sure there's no cached idle deadline at that
876 // point. But we have to make sure we do it after out SetIdleDeadline
877 // call above, in the case when the task is actually an idle task.
878 mIdleTaskManager->State().ClearCachedIdleDeadline();
881 TimeStamp now = TimeStamp::Now();
883 if (mainThread) {
884 if (task->GetPriority() < uint32_t(EventQueuePriority::InputHigh) ||
885 task->mInsertionTime.IsNull()) {
886 mainThread->SetRunningEventDelay(TimeDuration(), now);
887 } else {
888 mainThread->SetRunningEventDelay(now - task->mInsertionTime, now);
892 nsAutoCString name;
893 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
894 task->GetName(name);
895 #endif
897 PerformanceCounterState::Snapshot snapshot =
898 mPerformanceCounterState->RunnableWillRun(
899 now, manager == mIdleTaskManager);
902 LogTask::Run log(task);
903 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
904 AutoSetMainThreadRunnableName nameGuard(name);
905 #endif
906 AUTO_PROFILE_FOLLOWING_TASK(task);
907 result = task->Run() == Task::TaskResult::Complete;
910 // Task itself should keep manager alive.
911 if (manager) {
912 manager->DidRunTask();
915 mPerformanceCounterState->RunnableDidRun(name, std::move(snapshot));
918 // Task itself should keep manager alive.
919 if (manager && result && manager->mTaskCount == 0) {
920 mTaskManagers.erase(manager);
923 task->mInProgress = false;
925 if (!result) {
926 // Presumably this task was interrupted, leave its dependencies
927 // unresolved and reinsert into the queue.
928 auto insertion =
929 mMainThreadTasks.insert(std::move(mCurrentTasksMT.top()));
930 MOZ_ASSERT(insertion.second);
931 task->mIterator = insertion.first;
932 if (manager) {
933 manager->WillRunTask();
935 } else {
936 task->mCompleted = true;
937 #ifdef DEBUG
938 task->mIsInGraph = false;
939 #endif
940 // Clear dependencies to release references.
941 task->mDependencies.clear();
943 if (!mThreadableTasks.empty()) {
944 // We're going to wake up a single thread in our pool. This thread
945 // is responsible for waking up additional threads in the situation
946 // where more than one task became available.
947 mThreadPoolCV.Notify();
951 mCurrentTasksMT.pop();
952 return true;
956 mMayHaveMainThreadTask = false;
957 if (mIdleTaskManager) {
958 // We did not find a task to run. We still need to clear the cached idle
959 // deadline on our idle state, because that deadline was only relevant to
960 // the execution of this function. Had we found a task, we would have
961 // cleared the deadline before running that task.
962 mIdleTaskManager->State().ClearCachedIdleDeadline();
964 return false;
967 Task* TaskController::GetFinalDependency(Task* aTask) {
968 Task* nextTask;
970 while ((nextTask = aTask->GetHighestPriorityDependency())) {
971 aTask = nextTask;
974 return aTask;
977 void TaskController::MaybeInterruptTask(Task* aTask) {
978 mGraphMutex.AssertCurrentThreadOwns();
980 if (!aTask) {
981 return;
984 // This optimization prevents many slow lookups in long chains of similar
985 // priority.
986 if (!aTask->mDependencies.empty()) {
987 Task* firstDependency = aTask->mDependencies.begin()->get();
988 if (aTask->GetPriority() <= firstDependency->GetPriority() &&
989 !firstDependency->mCompleted &&
990 aTask->GetKind() == firstDependency->GetKind()) {
991 // This task has the same or a higher priority as one of its dependencies,
992 // never any need to interrupt.
993 return;
997 Task* finalDependency = GetFinalDependency(aTask);
999 if (finalDependency->mInProgress) {
1000 // No need to wake anything, we can't schedule this task right now anyway.
1001 return;
1004 if (aTask->GetKind() == Task::Kind::MainThreadOnly) {
1005 mMayHaveMainThreadTask = true;
1007 EnsureMainThreadTasksScheduled();
1009 if (mCurrentTasksMT.empty()) {
1010 return;
1013 // We could go through the steps above here and interrupt an off main
1014 // thread task in case it has a lower priority.
1015 if (finalDependency->GetKind() == Task::Kind::OffMainThreadOnly) {
1016 return;
1019 if (mCurrentTasksMT.top()->GetPriority() < aTask->GetPriority()) {
1020 mCurrentTasksMT.top()->RequestInterrupt(aTask->GetPriority());
1022 } else {
1023 Task* lowestPriorityTask = nullptr;
1024 for (PoolThread& thread : mPoolThreads) {
1025 if (!thread.mCurrentTask) {
1026 mThreadPoolCV.Notify();
1027 // There's a free thread, no need to interrupt anything.
1028 return;
1031 if (!lowestPriorityTask) {
1032 lowestPriorityTask = thread.mCurrentTask.get();
1033 continue;
1036 // This should possibly select the lowest priority task which was started
1037 // the latest. But for now we ignore that optimization.
1038 // This also doesn't guarantee a task is interruptable, so that's an
1039 // avenue for improvements as well.
1040 if (lowestPriorityTask->GetPriority() > thread.mEffectiveTaskPriority) {
1041 lowestPriorityTask = thread.mCurrentTask.get();
1045 if (lowestPriorityTask->GetPriority() < aTask->GetPriority()) {
1046 lowestPriorityTask->RequestInterrupt(aTask->GetPriority());
1049 // We choose not to interrupt main thread tasks for tasks which may be
1050 // executed off the main thread.
1054 Task* TaskController::GetHighestPriorityMTTask() {
1055 mGraphMutex.AssertCurrentThreadOwns();
1057 if (!mMainThreadTasks.empty()) {
1058 return mMainThreadTasks.begin()->get();
1060 return nullptr;
1063 void TaskController::EnsureMainThreadTasksScheduled() {
1064 if (mObserver) {
1065 mObserver->OnDispatchedEvent();
1067 if (mExternalCondVar) {
1068 mExternalCondVar->Notify();
1070 mMainThreadCV.Notify();
1073 void TaskController::ProcessUpdatedPriorityModifier(TaskManager* aManager) {
1074 mGraphMutex.AssertCurrentThreadOwns();
1076 MOZ_ASSERT(NS_IsMainThread());
1078 int32_t modifier = aManager->mCurrentPriorityModifier;
1080 std::vector<RefPtr<Task>> storedTasks;
1081 // Find all relevant tasks.
1082 for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();) {
1083 if ((*iter)->mTaskManager == aManager) {
1084 storedTasks.push_back(*iter);
1085 iter = mMainThreadTasks.erase(iter);
1086 } else {
1087 iter++;
1091 // Reinsert found tasks with their new priorities.
1092 for (RefPtr<Task>& ref : storedTasks) {
1093 // Kept alive at first by the vector and then by mMainThreadTasks.
1094 Task* task = ref;
1095 task->mPriorityModifier = modifier;
1096 auto insertion = mMainThreadTasks.insert(std::move(ref));
1097 MOZ_ASSERT(insertion.second);
1098 task->mIterator = insertion.first;
1102 } // namespace mozilla