Bug 1833854 - Part 2: Common up GCSchedulingTunables invariant checks r=sfink
[gecko.git] / xpcom / threads / ThreadEventQueue.cpp
blobdec317beefe99110b0ff3e1acae5b158ac0d262c
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/ThreadEventQueue.h"
8 #include "mozilla/EventQueue.h"
10 #include "LeakRefPtr.h"
11 #include "nsComponentManagerUtils.h"
12 #include "nsITargetShutdownTask.h"
13 #include "nsIThreadInternal.h"
14 #include "nsThreadUtils.h"
15 #include "nsThread.h"
16 #include "ThreadEventTarget.h"
17 #include "mozilla/ProfilerLabels.h"
18 #include "mozilla/TaskController.h"
19 #include "mozilla/StaticPrefs_threads.h"
21 using namespace mozilla;
23 class ThreadEventQueue::NestedSink : public ThreadTargetSink {
24 public:
25 NestedSink(EventQueue* aQueue, ThreadEventQueue* aOwner)
26 : mQueue(aQueue), mOwner(aOwner) {}
28 bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
29 EventQueuePriority aPriority) final {
30 return mOwner->PutEventInternal(std::move(aEvent), aPriority, this);
33 void Disconnect(const MutexAutoLock& aProofOfLock) final { mQueue = nullptr; }
35 nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) final {
36 return NS_ERROR_NOT_IMPLEMENTED;
38 nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) final {
39 return NS_ERROR_NOT_IMPLEMENTED;
42 size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) {
43 if (mQueue) {
44 return mQueue->SizeOfIncludingThis(aMallocSizeOf);
46 return 0;
49 private:
50 friend class ThreadEventQueue;
52 // This is a non-owning reference. It must live at least until Disconnect is
53 // called to clear it out.
54 EventQueue* mQueue;
55 RefPtr<ThreadEventQueue> mOwner;
58 ThreadEventQueue::ThreadEventQueue(UniquePtr<EventQueue> aQueue,
59 bool aIsMainThread)
60 : mBaseQueue(std::move(aQueue)),
61 mLock("ThreadEventQueue"),
62 mEventsAvailable(mLock, "EventsAvail"),
63 mIsMainThread(aIsMainThread) {
64 if (aIsMainThread) {
65 TaskController::Get()->SetConditionVariable(&mEventsAvailable);
69 ThreadEventQueue::~ThreadEventQueue() { MOZ_ASSERT(mNestedQueues.IsEmpty()); }
71 bool ThreadEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
72 EventQueuePriority aPriority) {
73 return PutEventInternal(std::move(aEvent), aPriority, nullptr);
76 bool ThreadEventQueue::PutEventInternal(already_AddRefed<nsIRunnable>&& aEvent,
77 EventQueuePriority aPriority,
78 NestedSink* aSink) {
79 // We want to leak the reference when we fail to dispatch it, so that
80 // we won't release the event in a wrong thread.
81 LeakRefPtr<nsIRunnable> event(std::move(aEvent));
82 nsCOMPtr<nsIThreadObserver> obs;
85 // Check if the runnable wants to override the passed-in priority.
86 // Do this outside the lock, so runnables implemented in JS can QI
87 // (and possibly GC) outside of the lock.
88 if (mIsMainThread) {
89 auto* e = event.get(); // can't do_QueryInterface on LeakRefPtr.
90 if (nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(e)) {
91 uint32_t prio = nsIRunnablePriority::PRIORITY_NORMAL;
92 runnablePrio->GetPriority(&prio);
93 if (prio == nsIRunnablePriority::PRIORITY_CONTROL) {
94 aPriority = EventQueuePriority::Control;
95 } else if (prio == nsIRunnablePriority::PRIORITY_RENDER_BLOCKING) {
96 aPriority = EventQueuePriority::RenderBlocking;
97 } else if (prio == nsIRunnablePriority::PRIORITY_VSYNC) {
98 aPriority = EventQueuePriority::Vsync;
99 } else if (prio == nsIRunnablePriority::PRIORITY_INPUT_HIGH) {
100 aPriority = EventQueuePriority::InputHigh;
101 } else if (prio == nsIRunnablePriority::PRIORITY_MEDIUMHIGH) {
102 aPriority = EventQueuePriority::MediumHigh;
103 } else if (prio == nsIRunnablePriority::PRIORITY_DEFERRED_TIMERS) {
104 aPriority = EventQueuePriority::DeferredTimers;
105 } else if (prio == nsIRunnablePriority::PRIORITY_IDLE) {
106 aPriority = EventQueuePriority::Idle;
107 } else if (prio == nsIRunnablePriority::PRIORITY_LOW) {
108 aPriority = EventQueuePriority::Low;
112 if (aPriority == EventQueuePriority::Control &&
113 !StaticPrefs::threads_control_event_queue_enabled()) {
114 aPriority = EventQueuePriority::MediumHigh;
118 MutexAutoLock lock(mLock);
120 if (mEventsAreDoomed) {
121 return false;
124 if (aSink) {
125 if (!aSink->mQueue) {
126 return false;
129 aSink->mQueue->PutEvent(event.take(), aPriority, lock);
130 } else {
131 mBaseQueue->PutEvent(event.take(), aPriority, lock);
134 mEventsAvailable.Notify();
136 // Make sure to grab the observer before dropping the lock, otherwise the
137 // event that we just placed into the queue could run and eventually delete
138 // this nsThread before the calling thread is scheduled again. We would then
139 // crash while trying to access a dead nsThread.
140 obs = mObserver;
143 if (obs) {
144 obs->OnDispatchedEvent();
147 return true;
150 already_AddRefed<nsIRunnable> ThreadEventQueue::GetEvent(
151 bool aMayWait, mozilla::TimeDuration* aLastEventDelay) {
152 nsCOMPtr<nsIRunnable> event;
154 // Scope for lock. When we are about to return, we will exit this
155 // scope so we can do some work after releasing the lock but
156 // before returning.
157 MutexAutoLock lock(mLock);
159 for (;;) {
160 const bool noNestedQueue = mNestedQueues.IsEmpty();
161 if (noNestedQueue) {
162 event = mBaseQueue->GetEvent(lock, aLastEventDelay);
163 } else {
164 // We always get events from the topmost queue when there are nested
165 // queues.
166 event =
167 mNestedQueues.LastElement().mQueue->GetEvent(lock, aLastEventDelay);
170 if (event) {
171 break;
174 // No runnable available. Sleep waiting for one if if we're supposed to.
175 // Otherwise just go ahead and return null.
176 if (!aMayWait) {
177 break;
180 AUTO_PROFILER_LABEL("ThreadEventQueue::GetEvent::Wait", IDLE);
181 mEventsAvailable.Wait();
185 return event.forget();
188 bool ThreadEventQueue::HasPendingEvent() {
189 MutexAutoLock lock(mLock);
191 // We always get events from the topmost queue when there are nested queues.
192 if (mNestedQueues.IsEmpty()) {
193 return mBaseQueue->HasReadyEvent(lock);
194 } else {
195 return mNestedQueues.LastElement().mQueue->HasReadyEvent(lock);
199 bool ThreadEventQueue::ShutdownIfNoPendingEvents() {
200 MutexAutoLock lock(mLock);
201 if (mNestedQueues.IsEmpty() && mBaseQueue->IsEmpty(lock)) {
202 mEventsAreDoomed = true;
203 return true;
205 return false;
208 already_AddRefed<nsISerialEventTarget> ThreadEventQueue::PushEventQueue() {
209 auto queue = MakeUnique<EventQueue>();
210 RefPtr<NestedSink> sink = new NestedSink(queue.get(), this);
211 RefPtr<ThreadEventTarget> eventTarget =
212 new ThreadEventTarget(sink, NS_IsMainThread(), false);
214 MutexAutoLock lock(mLock);
216 mNestedQueues.AppendElement(NestedQueueItem(std::move(queue), eventTarget));
217 return eventTarget.forget();
220 void ThreadEventQueue::PopEventQueue(nsIEventTarget* aTarget) {
221 MutexAutoLock lock(mLock);
223 MOZ_ASSERT(!mNestedQueues.IsEmpty());
225 NestedQueueItem& item = mNestedQueues.LastElement();
227 MOZ_ASSERT(aTarget == item.mEventTarget);
229 // Disconnect the event target that will be popped.
230 item.mEventTarget->Disconnect(lock);
232 EventQueue* prevQueue =
233 mNestedQueues.Length() == 1
234 ? mBaseQueue.get()
235 : mNestedQueues[mNestedQueues.Length() - 2].mQueue.get();
237 // Move events from the old queue to the new one.
238 nsCOMPtr<nsIRunnable> event;
239 TimeDuration delay;
240 while ((event = item.mQueue->GetEvent(lock, &delay))) {
241 // preserve the event delay so far
242 prevQueue->PutEvent(event.forget(), EventQueuePriority::Normal, lock,
243 &delay);
246 mNestedQueues.RemoveLastElement();
249 size_t ThreadEventQueue::SizeOfExcludingThis(
250 mozilla::MallocSizeOf aMallocSizeOf) {
251 size_t n = 0;
254 MutexAutoLock lock(mLock);
255 n += mBaseQueue->SizeOfIncludingThis(aMallocSizeOf);
256 n += mNestedQueues.ShallowSizeOfExcludingThis(aMallocSizeOf);
257 for (auto& queue : mNestedQueues) {
258 n += queue.mEventTarget->SizeOfIncludingThis(aMallocSizeOf);
262 return SynchronizedEventQueue::SizeOfExcludingThis(aMallocSizeOf) + n;
265 already_AddRefed<nsIThreadObserver> ThreadEventQueue::GetObserver() {
266 MutexAutoLock lock(mLock);
267 return do_AddRef(mObserver);
270 already_AddRefed<nsIThreadObserver> ThreadEventQueue::GetObserverOnThread()
271 MOZ_NO_THREAD_SAFETY_ANALYSIS {
272 // only written on this thread
273 return do_AddRef(mObserver);
276 void ThreadEventQueue::SetObserver(nsIThreadObserver* aObserver) {
277 // Always called from the thread - single writer.
278 nsCOMPtr<nsIThreadObserver> observer = aObserver;
280 MutexAutoLock lock(mLock);
281 mObserver.swap(observer);
283 if (NS_IsMainThread()) {
284 TaskController::Get()->SetThreadObserver(aObserver);
288 nsresult ThreadEventQueue::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
289 NS_ENSURE_ARG(aTask);
290 MutexAutoLock lock(mLock);
291 if (mEventsAreDoomed || mShutdownTasksRun) {
292 return NS_ERROR_UNEXPECTED;
294 MOZ_ASSERT(!mShutdownTasks.Contains(aTask));
295 mShutdownTasks.AppendElement(aTask);
296 return NS_OK;
299 nsresult ThreadEventQueue::UnregisterShutdownTask(
300 nsITargetShutdownTask* aTask) {
301 NS_ENSURE_ARG(aTask);
302 MutexAutoLock lock(mLock);
303 if (mEventsAreDoomed || mShutdownTasksRun) {
304 return NS_ERROR_UNEXPECTED;
306 return mShutdownTasks.RemoveElement(aTask) ? NS_OK : NS_ERROR_UNEXPECTED;
309 void ThreadEventQueue::RunShutdownTasks() {
310 nsTArray<nsCOMPtr<nsITargetShutdownTask>> shutdownTasks;
312 MutexAutoLock lock(mLock);
313 shutdownTasks = std::move(mShutdownTasks);
314 mShutdownTasks.Clear();
315 mShutdownTasksRun = true;
317 for (auto& task : shutdownTasks) {
318 task->TargetShutdown();
322 ThreadEventQueue::NestedQueueItem::NestedQueueItem(
323 UniquePtr<EventQueue> aQueue, ThreadEventTarget* aEventTarget)
324 : mQueue(std::move(aQueue)), mEventTarget(aEventTarget) {}