Bug 1867190 - Initialise the PHC allocate delay later r=glandium
[gecko.git] / xpcom / threads / nsThreadUtils.cpp
blob68d36605b388027458b556420634a3d4dd04baa6
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsThreadUtils.h"
9 #include "chrome/common/ipc_message.h" // for IPC::Message
10 #include "LeakRefPtr.h"
11 #include "mozilla/Attributes.h"
12 #include "mozilla/Likely.h"
13 #include "mozilla/TaskQueue.h"
14 #include "mozilla/TimeStamp.h"
15 #include "nsComponentManagerUtils.h"
16 #include "nsExceptionHandler.h"
17 #include "nsIEventTarget.h"
18 #include "nsITimer.h"
19 #include "nsString.h"
20 #include "nsThreadSyncDispatch.h"
21 #include "nsTimerImpl.h"
22 #include "prsystem.h"
24 #include "nsThreadManager.h"
25 #include "nsThreadPool.h"
26 #include "TaskController.h"
28 #ifdef XP_WIN
29 # include <windows.h>
30 #elif defined(XP_MACOSX)
31 # include <sys/resource.h>
32 #endif
34 #if defined(ANDROID)
35 # include <sys/prctl.h>
36 #endif
38 static mozilla::LazyLogModule sEventDispatchAndRunLog("events");
39 #ifdef LOG1
40 # undef LOG1
41 #endif
42 #define LOG1(args) \
43 MOZ_LOG(sEventDispatchAndRunLog, mozilla::LogLevel::Error, args)
44 #define LOG1_ENABLED() \
45 MOZ_LOG_TEST(sEventDispatchAndRunLog, mozilla::LogLevel::Error)
47 using namespace mozilla;
49 #ifndef XPCOM_GLUE_AVOID_NSPR
51 NS_IMPL_ISUPPORTS(IdlePeriod, nsIIdlePeriod)
53 NS_IMETHODIMP
54 IdlePeriod::GetIdlePeriodHint(TimeStamp* aIdleDeadline) {
55 *aIdleDeadline = TimeStamp();
56 return NS_OK;
59 // NS_IMPL_NAMED_* relies on the mName field, which is not present on
60 // release or beta. Instead, fall back to using "Runnable" for all
61 // runnables.
62 # ifndef MOZ_COLLECTING_RUNNABLE_TELEMETRY
63 NS_IMPL_ISUPPORTS(Runnable, nsIRunnable)
64 # else
65 NS_IMPL_NAMED_ADDREF(Runnable, mName)
66 NS_IMPL_NAMED_RELEASE(Runnable, mName)
67 NS_IMPL_QUERY_INTERFACE(Runnable, nsIRunnable, nsINamed)
68 # endif
70 NS_IMETHODIMP
71 Runnable::Run() {
72 // Do nothing
73 return NS_OK;
76 # ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
77 NS_IMETHODIMP
78 Runnable::GetName(nsACString& aName) {
79 if (mName) {
80 aName.AssignASCII(mName);
81 } else {
82 aName.Truncate();
84 return NS_OK;
86 # endif
88 NS_IMPL_ISUPPORTS_INHERITED(DiscardableRunnable, Runnable,
89 nsIDiscardableRunnable)
91 NS_IMPL_ISUPPORTS_INHERITED(CancelableRunnable, DiscardableRunnable,
92 nsICancelableRunnable)
94 void CancelableRunnable::OnDiscard() {
95 // Tasks that implement Cancel() can be safely cleaned up if it turns out
96 // that the task will not run.
97 (void)NS_WARN_IF(NS_FAILED(Cancel()));
100 NS_IMPL_ISUPPORTS_INHERITED(IdleRunnable, DiscardableRunnable, nsIIdleRunnable)
102 NS_IMPL_ISUPPORTS_INHERITED(CancelableIdleRunnable, CancelableRunnable,
103 nsIIdleRunnable)
105 NS_IMPL_ISUPPORTS_INHERITED(PrioritizableRunnable, Runnable,
106 nsIRunnablePriority)
108 PrioritizableRunnable::PrioritizableRunnable(
109 already_AddRefed<nsIRunnable>&& aRunnable, uint32_t aPriority)
110 // Real runnable name is managed by overridding the GetName function.
111 : Runnable("PrioritizableRunnable"),
112 mRunnable(std::move(aRunnable)),
113 mPriority(aPriority) {
114 # if DEBUG
115 nsCOMPtr<nsIRunnablePriority> runnablePrio = do_QueryInterface(mRunnable);
116 MOZ_ASSERT(!runnablePrio);
117 # endif
120 # ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
121 NS_IMETHODIMP
122 PrioritizableRunnable::GetName(nsACString& aName) {
123 // Try to get a name from the underlying runnable.
124 nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable);
125 if (named) {
126 named->GetName(aName);
128 return NS_OK;
130 # endif
132 NS_IMETHODIMP
133 PrioritizableRunnable::Run() {
134 MOZ_RELEASE_ASSERT(NS_IsMainThread());
135 return mRunnable->Run();
138 NS_IMETHODIMP
139 PrioritizableRunnable::GetPriority(uint32_t* aPriority) {
140 *aPriority = mPriority;
141 return NS_OK;
144 already_AddRefed<nsIRunnable> mozilla::CreateRenderBlockingRunnable(
145 already_AddRefed<nsIRunnable>&& aRunnable) {
146 nsCOMPtr<nsIRunnable> runnable = new PrioritizableRunnable(
147 std::move(aRunnable), nsIRunnablePriority::PRIORITY_RENDER_BLOCKING);
148 return runnable.forget();
151 NS_IMPL_ISUPPORTS_INHERITED(PrioritizableCancelableRunnable, CancelableRunnable,
152 nsIRunnablePriority)
154 NS_IMETHODIMP
155 PrioritizableCancelableRunnable::GetPriority(uint32_t* aPriority) {
156 *aPriority = mPriority;
157 return NS_OK;
160 #endif // XPCOM_GLUE_AVOID_NSPR
162 //-----------------------------------------------------------------------------
164 nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
165 nsIRunnable* aInitialEvent,
166 nsIThreadManager::ThreadCreationOptions aOptions) {
167 nsCOMPtr<nsIRunnable> event = aInitialEvent;
168 return NS_NewNamedThread(aName, aResult, event.forget(), aOptions);
171 nsresult NS_NewNamedThread(const nsACString& aName, nsIThread** aResult,
172 already_AddRefed<nsIRunnable> aInitialEvent,
173 nsIThreadManager::ThreadCreationOptions aOptions) {
174 nsCOMPtr<nsIRunnable> event = std::move(aInitialEvent);
175 nsCOMPtr<nsIThread> thread;
176 nsresult rv = nsThreadManager::get().nsThreadManager::NewNamedThread(
177 aName, aOptions, getter_AddRefs(thread));
178 if (NS_WARN_IF(NS_FAILED(rv))) {
179 return rv;
182 if (event) {
183 rv = thread->Dispatch(event.forget(), NS_DISPATCH_IGNORE_BLOCK_DISPATCH);
184 if (NS_WARN_IF(NS_FAILED(rv))) {
185 return rv;
189 *aResult = nullptr;
190 thread.swap(*aResult);
191 return NS_OK;
194 nsresult NS_GetCurrentThread(nsIThread** aResult) {
195 return nsThreadManager::get().nsThreadManager::GetCurrentThread(aResult);
198 nsresult NS_GetMainThread(nsIThread** aResult) {
199 return nsThreadManager::get().nsThreadManager::GetMainThread(aResult);
202 nsresult NS_DispatchToCurrentThread(already_AddRefed<nsIRunnable>&& aEvent) {
203 nsresult rv;
204 nsCOMPtr<nsIRunnable> event(aEvent);
205 // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
206 nsISerialEventTarget* thread = NS_GetCurrentThread();
207 if (!thread) {
208 return NS_ERROR_UNEXPECTED;
210 // To keep us from leaking the runnable if dispatch method fails,
211 // we grab the reference on failures and release it.
212 nsIRunnable* temp = event.get();
213 rv = thread->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
214 if (NS_WARN_IF(NS_FAILED(rv))) {
215 // Dispatch() leaked the reference to the event, but due to caller's
216 // assumptions, we shouldn't leak here. And given we are on the same
217 // thread as the dispatch target, it's mostly safe to do it here.
218 NS_RELEASE(temp);
220 return rv;
223 // It is common to call NS_DispatchToCurrentThread with a newly
224 // allocated runnable with a refcount of zero. To keep us from leaking
225 // the runnable if the dispatch method fails, we take a death grip.
226 nsresult NS_DispatchToCurrentThread(nsIRunnable* aEvent) {
227 nsCOMPtr<nsIRunnable> event(aEvent);
228 return NS_DispatchToCurrentThread(event.forget());
231 nsresult NS_DispatchToMainThread(already_AddRefed<nsIRunnable>&& aEvent,
232 uint32_t aDispatchFlags) {
233 LeakRefPtr<nsIRunnable> event(std::move(aEvent));
234 nsCOMPtr<nsIThread> thread;
235 nsresult rv = NS_GetMainThread(getter_AddRefs(thread));
236 if (NS_WARN_IF(NS_FAILED(rv))) {
237 NS_ASSERTION(false,
238 "Failed NS_DispatchToMainThread() in shutdown; leaking");
239 // NOTE: if you stop leaking here, adjust Promise::MaybeReportRejected(),
240 // which assumes a leak here, or split into leaks and no-leaks versions
241 return rv;
243 return thread->Dispatch(event.take(), aDispatchFlags);
246 // In the case of failure with a newly allocated runnable with a
247 // refcount of zero, we intentionally leak the runnable, because it is
248 // likely that the runnable is being dispatched to the main thread
249 // because it owns main thread only objects, so it is not safe to
250 // release them here.
251 nsresult NS_DispatchToMainThread(nsIRunnable* aEvent, uint32_t aDispatchFlags) {
252 nsCOMPtr<nsIRunnable> event(aEvent);
253 return NS_DispatchToMainThread(event.forget(), aDispatchFlags);
256 nsresult NS_DelayedDispatchToCurrentThread(
257 already_AddRefed<nsIRunnable>&& aEvent, uint32_t aDelayMs) {
258 nsCOMPtr<nsIRunnable> event(aEvent);
260 // XXX: Consider using GetCurrentSerialEventTarget() to support TaskQueues.
261 nsISerialEventTarget* thread = NS_GetCurrentThread();
262 if (!thread) {
263 return NS_ERROR_UNEXPECTED;
266 return thread->DelayedDispatch(event.forget(), aDelayMs);
269 nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
270 nsIThread* aThread,
271 EventQueuePriority aQueue) {
272 nsresult rv;
273 nsCOMPtr<nsIRunnable> event(aEvent);
274 NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
275 if (!aThread) {
276 return NS_ERROR_UNEXPECTED;
278 // To keep us from leaking the runnable if dispatch method fails,
279 // we grab the reference on failures and release it.
280 nsIRunnable* temp = event.get();
281 rv = aThread->DispatchToQueue(event.forget(), aQueue);
282 if (NS_WARN_IF(NS_FAILED(rv))) {
283 // Dispatch() leaked the reference to the event, but due to caller's
284 // assumptions, we shouldn't leak here. And given we are on the same
285 // thread as the dispatch target, it's mostly safe to do it here.
286 NS_RELEASE(temp);
289 return rv;
292 nsresult NS_DispatchToCurrentThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
293 EventQueuePriority aQueue) {
294 return NS_DispatchToThreadQueue(std::move(aEvent), NS_GetCurrentThread(),
295 aQueue);
298 extern nsresult NS_DispatchToMainThreadQueue(
299 already_AddRefed<nsIRunnable>&& aEvent, EventQueuePriority aQueue) {
300 nsCOMPtr<nsIThread> mainThread;
301 nsresult rv = NS_GetMainThread(getter_AddRefs(mainThread));
302 if (NS_SUCCEEDED(rv)) {
303 return NS_DispatchToThreadQueue(std::move(aEvent), mainThread, aQueue);
305 return rv;
308 class IdleRunnableWrapper final : public Runnable,
309 public nsIDiscardableRunnable,
310 public nsIIdleRunnable {
311 public:
312 explicit IdleRunnableWrapper(already_AddRefed<nsIRunnable>&& aEvent)
313 : Runnable("IdleRunnableWrapper"),
314 mRunnable(std::move(aEvent)),
315 mDiscardable(do_QueryInterface(mRunnable)) {}
317 NS_DECL_ISUPPORTS_INHERITED
319 NS_IMETHOD Run() override {
320 if (!mRunnable) {
321 return NS_OK;
323 CancelTimer();
324 // Don't clear mDiscardable because that would cause QueryInterface to
325 // change behavior during the lifetime of an instance.
326 nsCOMPtr<nsIRunnable> runnable = std::move(mRunnable);
327 return runnable->Run();
330 // nsIDiscardableRunnable
331 void OnDiscard() override {
332 if (!mRunnable) {
333 // Run() was already called from TimedOut().
334 return;
336 mDiscardable->OnDiscard();
337 mRunnable = nullptr;
340 static void TimedOut(nsITimer* aTimer, void* aClosure) {
341 RefPtr<IdleRunnableWrapper> runnable =
342 static_cast<IdleRunnableWrapper*>(aClosure);
343 LogRunnable::Run log(runnable);
344 runnable->Run();
345 runnable = nullptr;
348 void SetTimer(uint32_t aDelay, nsIEventTarget* aTarget) override {
349 MOZ_ASSERT(aTarget);
350 MOZ_ASSERT(!mTimer);
351 NS_NewTimerWithFuncCallback(getter_AddRefs(mTimer), TimedOut, this, aDelay,
352 nsITimer::TYPE_ONE_SHOT,
353 "IdleRunnableWrapper::SetTimer", aTarget);
356 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
357 NS_IMETHOD GetName(nsACString& aName) override {
358 aName.AssignLiteral("IdleRunnableWrapper");
359 if (nsCOMPtr<nsINamed> named = do_QueryInterface(mRunnable)) {
360 nsAutoCString name;
361 named->GetName(name);
362 if (!name.IsEmpty()) {
363 aName.AppendLiteral(" for ");
364 aName.Append(name);
367 return NS_OK;
369 #endif
371 private:
372 ~IdleRunnableWrapper() { CancelTimer(); }
374 void CancelTimer() {
375 if (mTimer) {
376 mTimer->Cancel();
380 nsCOMPtr<nsITimer> mTimer;
381 nsCOMPtr<nsIRunnable> mRunnable;
382 nsCOMPtr<nsIDiscardableRunnable> mDiscardable;
385 NS_IMPL_ADDREF_INHERITED(IdleRunnableWrapper, Runnable)
386 NS_IMPL_RELEASE_INHERITED(IdleRunnableWrapper, Runnable)
388 NS_INTERFACE_MAP_BEGIN(IdleRunnableWrapper)
389 NS_INTERFACE_MAP_ENTRY(nsIIdleRunnable)
390 NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsIDiscardableRunnable, mDiscardable)
391 NS_INTERFACE_MAP_END_INHERITING(Runnable)
393 extern nsresult NS_DispatchToThreadQueue(already_AddRefed<nsIRunnable>&& aEvent,
394 uint32_t aTimeout, nsIThread* aThread,
395 EventQueuePriority aQueue) {
396 nsCOMPtr<nsIRunnable> event(std::move(aEvent));
397 NS_ENSURE_TRUE(event, NS_ERROR_INVALID_ARG);
398 MOZ_ASSERT(aQueue == EventQueuePriority::Idle ||
399 aQueue == EventQueuePriority::DeferredTimers);
400 if (!aThread) {
401 return NS_ERROR_UNEXPECTED;
404 nsCOMPtr<nsIIdleRunnable> idleEvent = do_QueryInterface(event);
406 if (!idleEvent) {
407 idleEvent = new IdleRunnableWrapper(event.forget());
408 event = do_QueryInterface(idleEvent);
409 MOZ_DIAGNOSTIC_ASSERT(event);
411 idleEvent->SetTimer(aTimeout, aThread);
413 nsresult rv = NS_DispatchToThreadQueue(event.forget(), aThread, aQueue);
414 if (NS_SUCCEEDED(rv)) {
415 // This is intended to bind with the "DISP" log made from inside
416 // NS_DispatchToThreadQueue for the `event`. There is no possibly to inject
417 // another "DISP" for a different event on this thread.
418 LOG1(("TIMEOUT %u", aTimeout));
421 return rv;
424 extern nsresult NS_DispatchToCurrentThreadQueue(
425 already_AddRefed<nsIRunnable>&& aEvent, uint32_t aTimeout,
426 EventQueuePriority aQueue) {
427 return NS_DispatchToThreadQueue(std::move(aEvent), aTimeout,
428 NS_GetCurrentThread(), aQueue);
431 #ifndef XPCOM_GLUE_AVOID_NSPR
432 nsresult NS_ProcessPendingEvents(nsIThread* aThread, PRIntervalTime aTimeout) {
433 nsresult rv = NS_OK;
435 if (!aThread) {
436 aThread = NS_GetCurrentThread();
437 if (NS_WARN_IF(!aThread)) {
438 return NS_ERROR_UNEXPECTED;
442 PRIntervalTime start = PR_IntervalNow();
443 for (;;) {
444 bool processedEvent;
445 rv = aThread->ProcessNextEvent(false, &processedEvent);
446 if (NS_FAILED(rv) || !processedEvent) {
447 break;
449 if (PR_IntervalNow() - start > aTimeout) {
450 break;
453 return rv;
455 #endif // XPCOM_GLUE_AVOID_NSPR
457 inline bool hasPendingEvents(nsIThread* aThread) {
458 bool val;
459 return NS_SUCCEEDED(aThread->HasPendingEvents(&val)) && val;
462 bool NS_HasPendingEvents(nsIThread* aThread) {
463 if (!aThread) {
464 aThread = NS_GetCurrentThread();
465 if (NS_WARN_IF(!aThread)) {
466 return false;
469 return hasPendingEvents(aThread);
472 bool NS_ProcessNextEvent(nsIThread* aThread, bool aMayWait) {
473 if (!aThread) {
474 aThread = NS_GetCurrentThread();
475 if (NS_WARN_IF(!aThread)) {
476 return false;
479 bool val;
480 return NS_SUCCEEDED(aThread->ProcessNextEvent(aMayWait, &val)) && val;
483 void NS_SetCurrentThreadName(const char* aName) {
484 PR_SetCurrentThreadName(aName);
485 #if defined(ANDROID) && defined(DEBUG)
486 // Check nspr does the right thing on Android.
487 char buffer[16] = {'\0'};
488 prctl(PR_GET_NAME, buffer);
489 MOZ_ASSERT(0 == strncmp(buffer, aName, 15));
490 #endif
491 if (nsThreadManager::get().IsNSThread()) {
492 nsThread* thread = nsThreadManager::get().GetCurrentThread();
493 thread->SetThreadNameInternal(nsDependentCString(aName));
497 nsIThread* NS_GetCurrentThread() {
498 return nsThreadManager::get().GetCurrentThread();
501 nsIThread* NS_GetCurrentThreadNoCreate() {
502 if (nsThreadManager::get().IsNSThread()) {
503 return NS_GetCurrentThread();
505 return nullptr;
508 // nsThreadPoolNaming
509 nsCString nsThreadPoolNaming::GetNextThreadName(const nsACString& aPoolName) {
510 nsCString name(aPoolName);
511 name.AppendLiteral(" #");
512 name.AppendInt(++mCounter, 10); // The counter is declared as atomic
513 return name;
516 nsresult NS_DispatchBackgroundTask(already_AddRefed<nsIRunnable> aEvent,
517 uint32_t aDispatchFlags) {
518 nsCOMPtr<nsIRunnable> event(aEvent);
519 return nsThreadManager::get().DispatchToBackgroundThread(event,
520 aDispatchFlags);
523 // nsAutoLowPriorityIO
524 nsAutoLowPriorityIO::nsAutoLowPriorityIO() {
525 #if defined(XP_WIN)
526 lowIOPrioritySet =
527 SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN);
528 #elif defined(XP_MACOSX)
529 oldPriority = getiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD);
530 lowIOPrioritySet =
531 oldPriority != -1 &&
532 setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, IOPOL_THROTTLE) != -1;
533 #else
534 lowIOPrioritySet = false;
535 #endif
538 nsAutoLowPriorityIO::~nsAutoLowPriorityIO() {
539 #if defined(XP_WIN)
540 if (MOZ_LIKELY(lowIOPrioritySet)) {
541 // On Windows the old thread priority is automatically restored
542 SetThreadPriority(GetCurrentThread(), THREAD_MODE_BACKGROUND_END);
544 #elif defined(XP_MACOSX)
545 if (MOZ_LIKELY(lowIOPrioritySet)) {
546 setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD, oldPriority);
548 #endif
551 namespace mozilla {
553 nsISerialEventTarget* GetCurrentSerialEventTarget() {
554 if (nsISerialEventTarget* current =
555 SerialEventTargetGuard::GetCurrentSerialEventTarget()) {
556 return current;
559 MOZ_DIAGNOSTIC_ASSERT(!nsThreadPool::GetCurrentThreadPool(),
560 "Call to GetCurrentSerialEventTarget() from thread "
561 "pool without an active TaskQueue");
563 nsCOMPtr<nsIThread> thread;
564 nsresult rv = NS_GetCurrentThread(getter_AddRefs(thread));
565 if (NS_FAILED(rv)) {
566 return nullptr;
569 return thread;
572 nsISerialEventTarget* GetMainThreadSerialEventTarget() {
573 return static_cast<nsThread*>(nsThreadManager::get().GetMainThreadWeak());
576 size_t GetNumberOfProcessors() {
577 #if defined(XP_LINUX) && defined(MOZ_SANDBOX)
578 static const PRInt32 procs = PR_GetNumberOfProcessors();
579 #else
580 PRInt32 procs = PR_GetNumberOfProcessors();
581 #endif
582 MOZ_ASSERT(procs > 0);
583 return static_cast<size_t>(procs);
586 template <typename T>
587 void LogTaskBase<T>::LogDispatch(T* aEvent) {
588 LOG1(("DISP %p", aEvent));
590 template <typename T>
591 void LogTaskBase<T>::LogDispatch(T* aEvent, void* aContext) {
592 LOG1(("DISP %p (%p)", aEvent, aContext));
595 template <>
596 void LogTaskBase<IPC::Message>::LogDispatchWithPid(IPC::Message* aEvent,
597 int32_t aPid) {
598 if (aEvent->seqno() && aPid > 0) {
599 LOG1(("SEND %p %d %d", aEvent, aEvent->seqno(), aPid));
603 template <typename T>
604 LogTaskBase<T>::Run::Run(T* aEvent, bool aWillRunAgain)
605 : mWillRunAgain(aWillRunAgain) {
606 // Logging address of this RAII so that we can use it to identify the DONE log
607 // while not keeping any ref to the event that could be invalid at the dtor
608 // time.
609 LOG1(("EXEC %p %p", aEvent, this));
611 template <typename T>
612 LogTaskBase<T>::Run::Run(T* aEvent, void* aContext, bool aWillRunAgain)
613 : mWillRunAgain(aWillRunAgain) {
614 LOG1(("EXEC %p (%p) %p", aEvent, aContext, this));
617 template <>
618 LogTaskBase<nsIRunnable>::Run::Run(nsIRunnable* aEvent, bool aWillRunAgain)
619 : mWillRunAgain(aWillRunAgain) {
620 if (!LOG1_ENABLED()) {
621 return;
624 nsCOMPtr<nsINamed> named(do_QueryInterface(aEvent));
625 if (!named) {
626 LOG1(("EXEC %p %p", aEvent, this));
627 return;
630 nsAutoCString name;
631 named->GetName(name);
632 LOG1(("EXEC %p %p [%s]", aEvent, this, name.BeginReading()));
635 template <>
636 LogTaskBase<Task>::Run::Run(Task* aTask, bool aWillRunAgain)
637 : mWillRunAgain(aWillRunAgain) {
638 if (!LOG1_ENABLED()) {
639 return;
642 nsAutoCString name;
643 if (!aTask->GetName(name)) {
644 LOG1(("EXEC %p %p", aTask, this));
645 return;
648 LOG1(("EXEC %p %p [%s]", aTask, this, name.BeginReading()));
651 template <>
652 LogTaskBase<IPC::Message>::Run::Run(IPC::Message* aMessage, bool aWillRunAgain)
653 : mWillRunAgain(aWillRunAgain) {
654 LOG1(("RECV %p %p %d [%s]", aMessage, this, aMessage->seqno(),
655 aMessage->name()));
658 template <>
659 LogTaskBase<nsTimerImpl>::Run::Run(nsTimerImpl* aEvent, bool aWillRunAgain)
660 : mWillRunAgain(aWillRunAgain) {
661 // The name of the timer will be logged when running it on the target thread.
662 // Logging it here (on the `Timer` thread) would be redundant.
663 LOG1(("EXEC %p %p [nsTimerImpl]", aEvent, this));
666 template <typename T>
667 LogTaskBase<T>::Run::~Run() {
668 LOG1((mWillRunAgain ? "INTERRUPTED %p" : "DONE %p", this));
671 template class LogTaskBase<nsIRunnable>;
672 template class LogTaskBase<MicroTaskRunnable>;
673 template class LogTaskBase<IPC::Message>;
674 template class LogTaskBase<nsTimerImpl>;
675 template class LogTaskBase<Task>;
676 template class LogTaskBase<PresShell>;
677 template class LogTaskBase<dom::FrameRequestCallback>;
679 MOZ_THREAD_LOCAL(nsISerialEventTarget*)
680 SerialEventTargetGuard::sCurrentThreadTLS;
681 void SerialEventTargetGuard::InitTLS() {
682 MOZ_ASSERT(NS_IsMainThread());
683 if (!sCurrentThreadTLS.init()) {
684 MOZ_CRASH();
688 } // namespace mozilla
690 bool nsIEventTarget::IsOnCurrentThread() {
691 if (mThread) {
692 return mThread == PR_GetCurrentThread();
694 return IsOnCurrentThreadInfallible();
697 extern "C" {
698 // These functions use the C language linkage because they're exposed to Rust
699 // via the xpcom/rust/moz_task crate, which wraps them in safe Rust functions
700 // that enable Rust code to get/create threads and dispatch runnables on them.
702 nsresult NS_GetCurrentThreadRust(nsIThread** aResult) {
703 return NS_GetCurrentThread(aResult);
706 nsresult NS_GetMainThreadRust(nsIThread** aResult) {
707 return NS_GetMainThread(aResult);
710 // NS_NewNamedThread's aStackSize parameter has the default argument
711 // nsIThreadManager::DEFAULT_STACK_SIZE, but we can't omit default arguments
712 // when calling a C++ function from Rust, and we can't access
713 // nsIThreadManager::DEFAULT_STACK_SIZE in Rust to pass it explicitly,
714 // since it is defined in a %{C++ ... %} block within nsIThreadManager.idl.
715 // So we indirect through this function.
716 nsresult NS_NewNamedThreadWithDefaultStackSize(const nsACString& aName,
717 nsIThread** aResult,
718 nsIRunnable* aEvent) {
719 return NS_NewNamedThread(aName, aResult, aEvent);
722 bool NS_IsOnCurrentThread(nsIEventTarget* aTarget) {
723 return aTarget->IsOnCurrentThread();
726 nsresult NS_DispatchBackgroundTask(nsIRunnable* aEvent,
727 uint32_t aDispatchFlags) {
728 return nsThreadManager::get().DispatchToBackgroundThread(aEvent,
729 aDispatchFlags);
732 nsresult NS_CreateBackgroundTaskQueue(const char* aName,
733 nsISerialEventTarget** aTarget) {
734 nsCOMPtr<nsISerialEventTarget> target =
735 nsThreadManager::get().CreateBackgroundTaskQueue(aName);
736 if (!target) {
737 return NS_ERROR_FAILURE;
740 target.forget(aTarget);
741 return NS_OK;
744 } // extern "C"
746 nsresult NS_DispatchAndSpinEventLoopUntilComplete(
747 const nsACString& aVeryGoodReasonToDoThis, nsIEventTarget* aEventTarget,
748 already_AddRefed<nsIRunnable> aEvent) {
749 // NOTE: Get the current thread specifically, as `SpinEventLoopUntil` can
750 // only spin that event target's loop. The reply will specify
751 // NS_DISPATCH_IGNORE_BLOCK_DISPATCH to ensure the reply is received even if
752 // the caller is a threadpool thread.
753 nsCOMPtr<nsIThread> current = NS_GetCurrentThread();
754 if (NS_WARN_IF(!current)) {
755 return NS_ERROR_NOT_AVAILABLE;
758 RefPtr<nsThreadSyncDispatch> wrapper =
759 new nsThreadSyncDispatch(current.forget(), std::move(aEvent));
760 nsresult rv = aEventTarget->Dispatch(do_AddRef(wrapper));
761 if (NS_WARN_IF(NS_FAILED(rv))) {
762 // FIXME: Consider avoiding leaking the `nsThreadSyncDispatch` as well by
763 // using a fallible version of `Dispatch` once that is added.
764 return rv;
767 wrapper->SpinEventLoopUntilComplete(aVeryGoodReasonToDoThis);
768 return NS_OK;