Bug 1867190 - Initialise the PHC allocate delay later r=glandium
[gecko.git] / xpcom / threads / ThrottledEventQueue.cpp
blob9e4219b305934aab2f32ad2aa016dac26b7cea7b
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "ThrottledEventQueue.h"
9 #include "mozilla/Atomics.h"
10 #include "mozilla/ClearOnShutdown.h"
11 #include "mozilla/CondVar.h"
12 #include "mozilla/EventQueue.h"
13 #include "mozilla/Mutex.h"
14 #include "mozilla/Unused.h"
15 #include "nsThreadUtils.h"
17 namespace mozilla {
19 namespace {} // anonymous namespace
21 // The ThrottledEventQueue is designed with inner and outer objects:
23 // XPCOM code base event target
24 // | |
25 // v v
26 // +-------+ +--------+
27 // | Outer | +-->|executor|
28 // +-------+ | +--------+
29 // | | |
30 // | +-------+ |
31 // +-->| Inner |<--+
32 // +-------+
34 // Client code references the outer nsIEventTarget which in turn references
35 // an inner object, which actually holds the queue of runnables.
37 // Whenever the queue is non-empty (and not paused), it keeps an "executor"
38 // runnable dispatched to the base event target. Each time the executor is run,
39 // it draws the next event from Inner's queue and runs it. If that queue has
40 // more events, the executor is dispatched to the base again.
42 // The executor holds a strong reference to the Inner object. This means that if
43 // the outer object is dereferenced and destroyed, the Inner object will remain
44 // live for as long as the executor exists - that is, until the Inner's queue is
45 // empty.
47 // A Paused ThrottledEventQueue does not enqueue an executor when new events are
48 // added. Any executor previously queued on the base event target draws no
49 // events from a Paused ThrottledEventQueue, and returns without re-enqueueing
50 // itself. Since there is no executor keeping the Inner object alive until its
51 // queue is empty, dropping a Paused ThrottledEventQueue may drop the Inner
52 // while it still owns events. This is the correct behavior: if there are no
53 // references to it, it will never be Resumed, and thus it will never dispatch
54 // events again.
56 // Resuming a ThrottledEventQueue must dispatch an executor, so calls to Resume
57 // are fallible for the same reasons as calls to Dispatch.
59 // The xpcom shutdown process drains the main thread's event queue several
60 // times, so if a ThrottledEventQueue is being driven by the main thread, it
61 // should get emptied out by the time we reach the "eventq shutdown" phase.
62 class ThrottledEventQueue::Inner final : public nsISupports {
63 // The runnable which is dispatched to the underlying base target. Since
64 // we only execute one event at a time we just re-use a single instance
65 // of this class while there are events left in the queue.
66 class Executor final : public Runnable, public nsIRunnablePriority {
67 // The Inner whose runnables we execute. mInner->mExecutor points
68 // to this executor, forming a reference loop.
69 RefPtr<Inner> mInner;
71 ~Executor() = default;
73 public:
74 explicit Executor(Inner* aInner)
75 : Runnable("ThrottledEventQueue::Inner::Executor"), mInner(aInner) {}
77 NS_DECL_ISUPPORTS_INHERITED
79 NS_IMETHODIMP
80 Run() override {
81 mInner->ExecuteRunnable();
82 return NS_OK;
85 NS_IMETHODIMP
86 GetPriority(uint32_t* aPriority) override {
87 *aPriority = mInner->mPriority;
88 return NS_OK;
91 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
92 NS_IMETHODIMP
93 GetName(nsACString& aName) override { return mInner->CurrentName(aName); }
94 #endif
97 mutable Mutex mMutex;
98 mutable CondVar mIdleCondVar MOZ_GUARDED_BY(mMutex);
100 // As-of-yet unexecuted runnables queued on this ThrottledEventQueue.
102 // Used from any thread; protected by mMutex. Signals mIdleCondVar when
103 // emptied.
104 EventQueueSized<64> mEventQueue MOZ_GUARDED_BY(mMutex);
106 // The event target we dispatch our events (actually, just our Executor) to.
108 // Written only during construction. Readable by any thread without locking.
109 const nsCOMPtr<nsISerialEventTarget> mBaseTarget;
111 // The Executor that we dispatch to mBaseTarget to draw runnables from our
112 // queue. mExecutor->mInner points to this Inner, forming a reference loop.
114 // Used from any thread; protected by mMutex.
115 nsCOMPtr<nsIRunnable> mExecutor MOZ_GUARDED_BY(mMutex);
117 const char* const mName;
119 const uint32_t mPriority;
121 // True if this queue is currently paused.
122 // Used from any thread; protected by mMutex.
123 bool mIsPaused MOZ_GUARDED_BY(mMutex);
125 explicit Inner(nsISerialEventTarget* aBaseTarget, const char* aName,
126 uint32_t aPriority)
127 : mMutex("ThrottledEventQueue"),
128 mIdleCondVar(mMutex, "ThrottledEventQueue:Idle"),
129 mBaseTarget(aBaseTarget),
130 mName(aName),
131 mPriority(aPriority),
132 mIsPaused(false) {
133 MOZ_ASSERT(mName, "Must pass a valid name!");
136 ~Inner() {
137 #ifdef DEBUG
138 MutexAutoLock lock(mMutex);
140 // As long as an executor exists, it had better keep us alive, since it's
141 // going to call ExecuteRunnable on us.
142 MOZ_ASSERT(!mExecutor);
144 // If we have any events in our queue, there should be an executor queued
145 // for them, and that should have kept us alive. The exception is that, if
146 // we're paused, we don't enqueue an executor.
147 MOZ_ASSERT(mEventQueue.IsEmpty(lock) || IsPaused(lock));
149 // Some runnables are only safe to drop on the main thread, so if our queue
150 // isn't empty, we'd better be on the main thread.
151 MOZ_ASSERT_IF(!mEventQueue.IsEmpty(lock), NS_IsMainThread());
152 #endif
155 // Make sure an executor has been queued on our base target. If we already
156 // have one, do nothing; otherwise, create and dispatch it.
157 nsresult EnsureExecutor(MutexAutoLock& lock) MOZ_REQUIRES(mMutex) {
158 if (mExecutor) return NS_OK;
160 // Note, this creates a ref cycle keeping the inner alive
161 // until the queue is drained.
162 mExecutor = new Executor(this);
163 nsresult rv = mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL);
164 if (NS_WARN_IF(NS_FAILED(rv))) {
165 mExecutor = nullptr;
166 return rv;
169 return NS_OK;
172 nsresult CurrentName(nsACString& aName) {
173 nsCOMPtr<nsIRunnable> event;
175 #ifdef DEBUG
176 bool currentThread = false;
177 mBaseTarget->IsOnCurrentThread(&currentThread);
178 MOZ_ASSERT(currentThread);
179 #endif
182 MutexAutoLock lock(mMutex);
183 event = mEventQueue.PeekEvent(lock);
184 // It is possible that mEventQueue wasn't empty when the executor
185 // was added to the queue, but someone processed events from mEventQueue
186 // before the executor, this is why mEventQueue is empty here
187 if (!event) {
188 aName.AssignLiteral("no runnables left in the ThrottledEventQueue");
189 return NS_OK;
193 if (nsCOMPtr<nsINamed> named = do_QueryInterface(event)) {
194 nsresult rv = named->GetName(aName);
195 return rv;
198 aName.AssignASCII(mName);
199 return NS_OK;
202 void ExecuteRunnable() {
203 // Any thread
204 nsCOMPtr<nsIRunnable> event;
206 #ifdef DEBUG
207 bool currentThread = false;
208 mBaseTarget->IsOnCurrentThread(&currentThread);
209 MOZ_ASSERT(currentThread);
210 #endif
213 MutexAutoLock lock(mMutex);
215 // Normally, a paused queue doesn't dispatch any executor, but we might
216 // have been paused after the executor was already in flight. There's no
217 // way to yank the executor out of the base event target, so we just check
218 // for a paused queue here and return without running anything. We'll
219 // create a new executor when we're resumed.
220 if (IsPaused(lock)) {
221 // Note, this breaks a ref cycle.
222 mExecutor = nullptr;
223 return;
226 // We only dispatch an executor runnable when we know there is something
227 // in the queue, so this should never fail.
228 event = mEventQueue.GetEvent(lock);
229 MOZ_ASSERT(event);
231 // If there are more events in the queue, then dispatch the next
232 // executor. We do this now, before running the event, because
233 // the event might spin the event loop and we don't want to stall
234 // the queue.
235 if (mEventQueue.HasReadyEvent(lock)) {
236 // Dispatch the next base target runnable to attempt to execute
237 // the next throttled event. We must do this before executing
238 // the event in case the event spins the event loop.
239 MOZ_ALWAYS_SUCCEEDS(
240 mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL));
243 // Otherwise the queue is empty and we can stop dispatching the
244 // executor.
245 else {
246 // Break the Executor::mInner / Inner::mExecutor reference loop.
247 mExecutor = nullptr;
248 mIdleCondVar.NotifyAll();
252 // Execute the event now that we have unlocked.
253 LogRunnable::Run log(event);
254 Unused << event->Run();
256 // To cover the event's destructor code in the LogRunnable log
257 event = nullptr;
260 public:
261 static already_AddRefed<Inner> Create(nsISerialEventTarget* aBaseTarget,
262 const char* aName, uint32_t aPriority) {
263 MOZ_ASSERT(NS_IsMainThread());
264 // FIXME: This assertion only worked when `sCurrentShutdownPhase` was not
265 // being updated.
266 // MOZ_ASSERT(ClearOnShutdown_Internal::sCurrentShutdownPhase ==
267 // ShutdownPhase::NotInShutdown);
269 RefPtr<Inner> ref = new Inner(aBaseTarget, aName, aPriority);
270 return ref.forget();
273 bool IsEmpty() const {
274 // Any thread
275 return Length() == 0;
278 uint32_t Length() const {
279 // Any thread
280 MutexAutoLock lock(mMutex);
281 return mEventQueue.Count(lock);
284 already_AddRefed<nsIRunnable> GetEvent() {
285 MutexAutoLock lock(mMutex);
286 return mEventQueue.GetEvent(lock);
289 void AwaitIdle() const {
290 // Any thread, except the main thread or our base target. Blocking the
291 // main thread is forbidden. Blocking the base target is guaranteed to
292 // produce a deadlock.
293 MOZ_ASSERT(!NS_IsMainThread());
294 #ifdef DEBUG
295 bool onBaseTarget = false;
296 Unused << mBaseTarget->IsOnCurrentThread(&onBaseTarget);
297 MOZ_ASSERT(!onBaseTarget);
298 #endif
300 MutexAutoLock lock(mMutex);
301 while (mExecutor || IsPaused(lock)) {
302 mIdleCondVar.Wait();
306 bool IsPaused() const {
307 MutexAutoLock lock(mMutex);
308 return IsPaused(lock);
311 bool IsPaused(const MutexAutoLock& aProofOfLock) const MOZ_REQUIRES(mMutex) {
312 return mIsPaused;
315 nsresult SetIsPaused(bool aIsPaused) {
316 MutexAutoLock lock(mMutex);
318 // If we will be unpaused, and we have events in our queue, make sure we
319 // have an executor queued on the base event target to run them. Do this
320 // before we actually change mIsPaused, since this is fallible.
321 if (!aIsPaused && !mEventQueue.IsEmpty(lock)) {
322 nsresult rv = EnsureExecutor(lock);
323 if (NS_FAILED(rv)) {
324 return rv;
328 mIsPaused = aIsPaused;
329 return NS_OK;
332 nsresult DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
333 // Any thread
334 nsCOMPtr<nsIRunnable> r = aEvent;
335 return Dispatch(r.forget(), aFlags);
338 nsresult Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
339 MOZ_ASSERT(aFlags == NS_DISPATCH_NORMAL || aFlags == NS_DISPATCH_AT_END);
341 // Any thread
342 MutexAutoLock lock(mMutex);
344 if (!IsPaused(lock)) {
345 // Make sure we have an executor in flight to process events. This is
346 // fallible, so do it first. Our lock will prevent the executor from
347 // accessing the event queue before we add the event below.
348 nsresult rv = EnsureExecutor(lock);
349 if (NS_FAILED(rv)) return rv;
352 // Only add the event to the underlying queue if are able to
353 // dispatch to our base target.
354 nsCOMPtr<nsIRunnable> event(aEvent);
355 LogRunnable::LogDispatch(event);
356 mEventQueue.PutEvent(event.forget(), EventQueuePriority::Normal, lock);
357 return NS_OK;
360 nsresult DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
361 uint32_t aDelay) {
362 // The base target may implement this, but we don't. Always fail
363 // to provide consistent behavior.
364 return NS_ERROR_NOT_IMPLEMENTED;
367 nsresult RegisterShutdownTask(nsITargetShutdownTask* aTask) {
368 return mBaseTarget->RegisterShutdownTask(aTask);
371 nsresult UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
372 return mBaseTarget->UnregisterShutdownTask(aTask);
375 bool IsOnCurrentThread() { return mBaseTarget->IsOnCurrentThread(); }
377 NS_DECL_THREADSAFE_ISUPPORTS
380 NS_IMPL_ISUPPORTS(ThrottledEventQueue::Inner, nsISupports);
382 NS_IMPL_ISUPPORTS_INHERITED(ThrottledEventQueue::Inner::Executor, Runnable,
383 nsIRunnablePriority)
385 NS_IMPL_ISUPPORTS(ThrottledEventQueue, ThrottledEventQueue, nsIEventTarget,
386 nsISerialEventTarget);
388 ThrottledEventQueue::ThrottledEventQueue(already_AddRefed<Inner> aInner)
389 : mInner(aInner) {
390 MOZ_ASSERT(mInner);
393 already_AddRefed<ThrottledEventQueue> ThrottledEventQueue::Create(
394 nsISerialEventTarget* aBaseTarget, const char* aName, uint32_t aPriority) {
395 MOZ_ASSERT(NS_IsMainThread());
396 MOZ_ASSERT(aBaseTarget);
398 RefPtr<Inner> inner = Inner::Create(aBaseTarget, aName, aPriority);
400 RefPtr<ThrottledEventQueue> ref = new ThrottledEventQueue(inner.forget());
401 return ref.forget();
404 bool ThrottledEventQueue::IsEmpty() const { return mInner->IsEmpty(); }
406 uint32_t ThrottledEventQueue::Length() const { return mInner->Length(); }
408 // Get the next runnable from the queue
409 already_AddRefed<nsIRunnable> ThrottledEventQueue::GetEvent() {
410 return mInner->GetEvent();
413 void ThrottledEventQueue::AwaitIdle() const { return mInner->AwaitIdle(); }
415 nsresult ThrottledEventQueue::SetIsPaused(bool aIsPaused) {
416 return mInner->SetIsPaused(aIsPaused);
419 bool ThrottledEventQueue::IsPaused() const { return mInner->IsPaused(); }
421 NS_IMETHODIMP
422 ThrottledEventQueue::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
423 return mInner->DispatchFromScript(aEvent, aFlags);
426 NS_IMETHODIMP
427 ThrottledEventQueue::Dispatch(already_AddRefed<nsIRunnable> aEvent,
428 uint32_t aFlags) {
429 return mInner->Dispatch(std::move(aEvent), aFlags);
432 NS_IMETHODIMP
433 ThrottledEventQueue::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
434 uint32_t aFlags) {
435 return mInner->DelayedDispatch(std::move(aEvent), aFlags);
438 NS_IMETHODIMP
439 ThrottledEventQueue::RegisterShutdownTask(nsITargetShutdownTask* aTask) {
440 return mInner->RegisterShutdownTask(aTask);
443 NS_IMETHODIMP
444 ThrottledEventQueue::UnregisterShutdownTask(nsITargetShutdownTask* aTask) {
445 return mInner->UnregisterShutdownTask(aTask);
448 NS_IMETHODIMP
449 ThrottledEventQueue::IsOnCurrentThread(bool* aResult) {
450 *aResult = mInner->IsOnCurrentThread();
451 return NS_OK;
454 NS_IMETHODIMP_(bool)
455 ThrottledEventQueue::IsOnCurrentThreadInfallible() {
456 return mInner->IsOnCurrentThread();
459 } // namespace mozilla