Bug 1755481: correct documentation of `nsIClipboard::getData`. r=mccr8
[gecko.git] / xpcom / threads / nsThread.cpp
blob651c70e5919084f3616ca8b0e65d64d78705be8f
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsThread.h"
9 #include "base/message_loop.h"
10 #include "base/platform_thread.h"
12 // Chromium's logging can sometimes leak through...
13 #ifdef LOG
14 # undef LOG
15 #endif
17 #include "mozilla/ReentrantMonitor.h"
18 #include "nsMemoryPressure.h"
19 #include "nsThreadManager.h"
20 #include "nsIClassInfoImpl.h"
21 #include "nsCOMPtr.h"
22 #include "nsQueryObject.h"
23 #include "pratom.h"
24 #include "mozilla/BackgroundHangMonitor.h"
25 #include "mozilla/CycleCollectedJSContext.h"
26 #include "mozilla/DebugOnly.h"
27 #include "mozilla/Logging.h"
28 #include "nsIObserverService.h"
29 #include "mozilla/IOInterposer.h"
30 #include "mozilla/ipc/MessageChannel.h"
31 #include "mozilla/ipc/BackgroundChild.h"
32 #include "mozilla/Preferences.h"
33 #include "mozilla/ProfilerRunnable.h"
34 #include "mozilla/SchedulerGroup.h"
35 #include "mozilla/Services.h"
36 #include "mozilla/SpinEventLoopUntil.h"
37 #include "mozilla/StaticLocalPtr.h"
38 #include "mozilla/StaticPrefs_threads.h"
39 #include "mozilla/TaskController.h"
40 #include "nsXPCOMPrivate.h"
41 #include "mozilla/ChaosMode.h"
42 #include "mozilla/Telemetry.h"
43 #include "mozilla/TimeStamp.h"
44 #include "mozilla/Unused.h"
45 #include "mozilla/dom/DocGroup.h"
46 #include "mozilla/dom/ScriptSettings.h"
47 #include "nsThreadSyncDispatch.h"
48 #include "nsServiceManagerUtils.h"
49 #include "GeckoProfiler.h"
50 #include "InputEventStatistics.h"
51 #include "ThreadEventQueue.h"
52 #include "ThreadEventTarget.h"
53 #include "ThreadDelay.h"
55 #include <limits>
57 #ifdef XP_LINUX
58 # ifdef __GLIBC__
59 # include <gnu/libc-version.h>
60 # endif
61 # include <sys/mman.h>
62 # include <sys/time.h>
63 # include <sys/resource.h>
64 # include <sched.h>
65 # include <stdio.h>
66 #endif
68 #ifdef XP_WIN
69 # include "mozilla/DynamicallyLinkedFunctionPtr.h"
71 # include <winbase.h>
73 using GetCurrentThreadStackLimitsFn = void(WINAPI*)(PULONG_PTR LowLimit,
74 PULONG_PTR HighLimit);
75 #endif
77 #define HAVE_UALARM \
78 _BSD_SOURCE || \
79 (_XOPEN_SOURCE >= 500 || _XOPEN_SOURCE && _XOPEN_SOURCE_EXTENDED) && \
80 !(_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)
82 #if defined(XP_LINUX) && !defined(ANDROID) && defined(_GNU_SOURCE)
83 # define HAVE_SCHED_SETAFFINITY
84 #endif
86 #ifdef XP_MACOSX
87 # include <mach/mach.h>
88 # include <mach/thread_policy.h>
89 #endif
91 #ifdef MOZ_CANARY
92 # include <unistd.h>
93 # include <execinfo.h>
94 # include <signal.h>
95 # include <fcntl.h>
96 # include "nsXULAppAPI.h"
97 #endif
99 using namespace mozilla;
101 extern void InitThreadLocalVariables();
103 static LazyLogModule sThreadLog("nsThread");
104 #ifdef LOG
105 # undef LOG
106 #endif
107 #define LOG(args) MOZ_LOG(sThreadLog, mozilla::LogLevel::Debug, args)
109 NS_DECL_CI_INTERFACE_GETTER(nsThread)
111 Array<char, nsThread::kRunnableNameBufSize> nsThread::sMainThreadRunnableName;
113 #ifdef EARLY_BETA_OR_EARLIER
114 const uint32_t kTelemetryWakeupCountLimit = 100;
115 #endif
117 //-----------------------------------------------------------------------------
118 // Because we do not have our own nsIFactory, we have to implement nsIClassInfo
119 // somewhat manually.
121 class nsThreadClassInfo : public nsIClassInfo {
122 public:
123 NS_DECL_ISUPPORTS_INHERITED // no mRefCnt
124 NS_DECL_NSICLASSINFO
126 nsThreadClassInfo() = default;
129 NS_IMETHODIMP_(MozExternalRefCountType)
130 nsThreadClassInfo::AddRef() { return 2; }
131 NS_IMETHODIMP_(MozExternalRefCountType)
132 nsThreadClassInfo::Release() { return 1; }
133 NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo, nsIClassInfo)
135 NS_IMETHODIMP
136 nsThreadClassInfo::GetInterfaces(nsTArray<nsIID>& aArray) {
137 return NS_CI_INTERFACE_GETTER_NAME(nsThread)(aArray);
140 NS_IMETHODIMP
141 nsThreadClassInfo::GetScriptableHelper(nsIXPCScriptable** aResult) {
142 *aResult = nullptr;
143 return NS_OK;
146 NS_IMETHODIMP
147 nsThreadClassInfo::GetContractID(nsACString& aResult) {
148 aResult.SetIsVoid(true);
149 return NS_OK;
152 NS_IMETHODIMP
153 nsThreadClassInfo::GetClassDescription(nsACString& aResult) {
154 aResult.SetIsVoid(true);
155 return NS_OK;
158 NS_IMETHODIMP
159 nsThreadClassInfo::GetClassID(nsCID** aResult) {
160 *aResult = nullptr;
161 return NS_OK;
164 NS_IMETHODIMP
165 nsThreadClassInfo::GetFlags(uint32_t* aResult) {
166 *aResult = THREADSAFE;
167 return NS_OK;
170 NS_IMETHODIMP
171 nsThreadClassInfo::GetClassIDNoAlloc(nsCID* aResult) {
172 return NS_ERROR_NOT_AVAILABLE;
175 //-----------------------------------------------------------------------------
177 NS_IMPL_ADDREF(nsThread)
178 NS_IMPL_RELEASE(nsThread)
179 NS_INTERFACE_MAP_BEGIN(nsThread)
180 NS_INTERFACE_MAP_ENTRY(nsIThread)
181 NS_INTERFACE_MAP_ENTRY(nsIThreadInternal)
182 NS_INTERFACE_MAP_ENTRY(nsIEventTarget)
183 NS_INTERFACE_MAP_ENTRY(nsISerialEventTarget)
184 NS_INTERFACE_MAP_ENTRY(nsISupportsPriority)
185 NS_INTERFACE_MAP_ENTRY_CONDITIONAL(nsIDelayedRunnableObserver, mEventTarget)
186 NS_INTERFACE_MAP_ENTRY(nsIDirectTaskDispatcher)
187 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIThread)
188 if (aIID.Equals(NS_GET_IID(nsIClassInfo))) {
189 static nsThreadClassInfo sThreadClassInfo;
190 foundInterface = static_cast<nsIClassInfo*>(&sThreadClassInfo);
191 } else
192 NS_INTERFACE_MAP_END
193 NS_IMPL_CI_INTERFACE_GETTER(nsThread, nsIThread, nsIThreadInternal,
194 nsIEventTarget, nsISerialEventTarget,
195 nsISupportsPriority)
197 //-----------------------------------------------------------------------------
199 // This event is responsible for notifying nsThread::Shutdown that it is time
200 // to call PR_JoinThread. It implements nsICancelableRunnable so that it can
201 // run on a DOM Worker thread (where all events must implement
202 // nsICancelableRunnable.)
203 class nsThreadShutdownAckEvent : public CancelableRunnable {
204 public:
205 explicit nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext*> aCtx)
206 : CancelableRunnable("nsThreadShutdownAckEvent"),
207 mShutdownContext(aCtx) {}
208 NS_IMETHOD Run() override {
209 mShutdownContext->mTerminatingThread->ShutdownComplete(mShutdownContext);
210 return NS_OK;
212 nsresult Cancel() override { return Run(); }
214 private:
215 virtual ~nsThreadShutdownAckEvent() = default;
217 NotNull<RefPtr<nsThreadShutdownContext>> mShutdownContext;
220 // This event is responsible for setting mShutdownContext
221 class nsThreadShutdownEvent : public Runnable {
222 public:
223 nsThreadShutdownEvent(NotNull<nsThread*> aThr,
224 NotNull<nsThreadShutdownContext*> aCtx)
225 : Runnable("nsThreadShutdownEvent"),
226 mThread(aThr),
227 mShutdownContext(aCtx) {}
228 NS_IMETHOD Run() override {
229 // Creates a cycle between `mThread` and the shutdown context which will be
230 // broken when the thread exits.
231 mThread->mShutdownContext = mShutdownContext;
232 if (mThread->mEventTarget) {
233 mThread->mEventTarget->NotifyShutdown();
235 MessageLoop::current()->Quit();
236 return NS_OK;
239 private:
240 NotNull<RefPtr<nsThread>> mThread;
241 NotNull<RefPtr<nsThreadShutdownContext>> mShutdownContext;
244 //-----------------------------------------------------------------------------
246 static void SetThreadAffinity(unsigned int cpu) {
247 #ifdef HAVE_SCHED_SETAFFINITY
248 cpu_set_t cpus;
249 CPU_ZERO(&cpus);
250 CPU_SET(cpu, &cpus);
251 sched_setaffinity(0, sizeof(cpus), &cpus);
252 // Don't assert sched_setaffinity's return value because it intermittently (?)
253 // fails with EINVAL on Linux x64 try runs.
254 #elif defined(XP_MACOSX)
255 // OS X does not provide APIs to pin threads to specific processors, but you
256 // can tag threads as belonging to the same "affinity set" and the OS will try
257 // to run them on the same processor. To run threads on different processors,
258 // tag them as belonging to different affinity sets. Tag 0, the default, means
259 // "no affinity" so let's pretend each CPU has its own tag `cpu+1`.
260 thread_affinity_policy_data_t policy;
261 policy.affinity_tag = cpu + 1;
262 MOZ_ALWAYS_TRUE(thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
263 &policy.affinity_tag, 1) == KERN_SUCCESS);
264 #elif defined(XP_WIN)
265 MOZ_ALWAYS_TRUE(SetThreadIdealProcessor(GetCurrentThread(), cpu) !=
266 (DWORD)-1);
267 #endif
270 static void SetupCurrentThreadForChaosMode() {
271 if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
272 return;
275 #ifdef XP_LINUX
276 // PR_SetThreadPriority doesn't really work since priorities >
277 // PR_PRIORITY_NORMAL can't be set by non-root users. Instead we'll just use
278 // setpriority(2) to set random 'nice values'. In regular Linux this is only
279 // a dynamic adjustment so it still doesn't really do what we want, but tools
280 // like 'rr' can be more aggressive about honoring these values.
281 // Some of these calls may fail due to trying to lower the priority
282 // (e.g. something may have already called setpriority() for this thread).
283 // This makes it hard to have non-main threads with higher priority than the
284 // main thread, but that's hard to fix. Tools like rr can choose to honor the
285 // requested values anyway.
286 // Use just 4 priorities so there's a reasonable chance of any two threads
287 // having equal priority.
288 setpriority(PRIO_PROCESS, 0, ChaosMode::randomUint32LessThan(4));
289 #else
290 // We should set the affinity here but NSPR doesn't provide a way to expose
291 // it.
292 uint32_t priority = ChaosMode::randomUint32LessThan(PR_PRIORITY_LAST + 1);
293 PR_SetThreadPriority(PR_GetCurrentThread(), PRThreadPriority(priority));
294 #endif
296 // Force half the threads to CPU 0 so they compete for CPU
297 if (ChaosMode::randomUint32LessThan(2)) {
298 SetThreadAffinity(0);
302 namespace {
304 struct ThreadInitData {
305 nsThread* thread;
306 nsCString name;
309 } // namespace
311 /* static */ mozilla::OffTheBooksMutex& nsThread::ThreadListMutex() {
312 static StaticLocalAutoPtr<OffTheBooksMutex> sMutex(
313 new OffTheBooksMutex("nsThread::ThreadListMutex"));
314 return *sMutex;
317 /* static */ LinkedList<nsThread>& nsThread::ThreadList() {
318 static StaticLocalAutoPtr<LinkedList<nsThread>> sList(
319 new LinkedList<nsThread>());
320 return *sList;
323 /* static */
324 void nsThread::ClearThreadList() {
325 OffTheBooksMutexAutoLock mal(ThreadListMutex());
326 while (ThreadList().popFirst()) {
330 /* static */
331 nsThreadEnumerator nsThread::Enumerate() { return {}; }
333 void nsThread::AddToThreadList() {
334 OffTheBooksMutexAutoLock mal(ThreadListMutex());
335 MOZ_ASSERT(!isInList());
337 ThreadList().insertBack(this);
340 void nsThread::MaybeRemoveFromThreadList() {
341 OffTheBooksMutexAutoLock mal(ThreadListMutex());
342 if (isInList()) {
343 removeFrom(ThreadList());
347 /*static*/
348 void nsThread::ThreadFunc(void* aArg) {
349 using mozilla::ipc::BackgroundChild;
351 UniquePtr<ThreadInitData> initData(static_cast<ThreadInitData*>(aArg));
352 nsThread* self = initData->thread; // strong reference
354 MOZ_ASSERT(self->mEventTarget);
355 MOZ_ASSERT(self->mEvents);
357 // Note: see the comment in nsThread::Init, where we set these same values.
358 DebugOnly<PRThread*> prev = self->mThread.exchange(PR_GetCurrentThread());
359 MOZ_ASSERT(!prev || prev == PR_GetCurrentThread());
360 self->mEventTarget->SetCurrentThread(self->mThread);
361 SetupCurrentThreadForChaosMode();
363 if (!initData->name.IsEmpty()) {
364 NS_SetCurrentThreadName(initData->name.BeginReading());
367 self->InitCommon();
369 // Inform the ThreadManager
370 nsThreadManager::get().RegisterCurrentThread(*self);
372 mozilla::IOInterposer::RegisterCurrentThread();
374 // This must come after the call to nsThreadManager::RegisterCurrentThread(),
375 // because that call is needed to properly set up this thread as an nsThread,
376 // which profiler_register_thread() requires. See bug 1347007.
377 const bool registerWithProfiler = !initData->name.IsEmpty();
378 if (registerWithProfiler) {
379 PROFILER_REGISTER_THREAD(initData->name.BeginReading());
383 // Scope for MessageLoop.
384 MessageLoop loop(MessageLoop::TYPE_MOZILLA_NONMAINTHREAD, self);
386 // Now, process incoming events...
387 loop.Run();
389 BackgroundChild::CloseForCurrentThread();
391 // NB: The main thread does not shut down here! It shuts down via
392 // nsThreadManager::Shutdown.
394 // Do NS_ProcessPendingEvents but with special handling to set
395 // mEventsAreDoomed atomically with the removal of the last event. The key
396 // invariant here is that we will never permit PutEvent to succeed if the
397 // event would be left in the queue after our final call to
398 // NS_ProcessPendingEvents. We also have to keep processing events as long
399 // as we have outstanding mRequestedShutdownContexts.
400 while (true) {
401 // Check and see if we're waiting on any threads.
402 self->WaitForAllAsynchronousShutdowns();
404 if (self->mEvents->ShutdownIfNoPendingEvents()) {
405 break;
407 NS_ProcessPendingEvents(self);
411 mozilla::IOInterposer::UnregisterCurrentThread();
413 // Inform the threadmanager that this thread is going away
414 nsThreadManager::get().UnregisterCurrentThread(*self);
416 // The thread should only unregister itself if it was registered above.
417 if (registerWithProfiler) {
418 PROFILER_UNREGISTER_THREAD();
421 NotNull<RefPtr<nsThreadShutdownContext>> context =
422 WrapNotNull(self->mShutdownContext);
423 self->mShutdownContext = nullptr;
424 MOZ_ASSERT(context->mTerminatingThread == self);
426 // Take the joining thread from our shutdown context. This may have been
427 // cleared by the joining thread if it decided to cancel waiting on us, in
428 // which case we won't notify our caller, and leak.
429 RefPtr<nsThread> joiningThread;
431 auto lock = context->mJoiningThread.Lock();
432 joiningThread = lock->forget();
434 if (joiningThread) {
435 // Dispatch shutdown ACK
436 nsCOMPtr<nsIRunnable> event = new nsThreadShutdownAckEvent(context);
437 nsresult dispatch_ack_rv =
438 joiningThread->Dispatch(event, NS_DISPATCH_NORMAL);
440 // We do not expect this to ever happen, but If we cannot dispatch
441 // the ack event, someone probably blocks waiting on us and will
442 // crash with a hang later anyways. The best we can do is to tell
443 // the world what happened right here.
444 MOZ_RELEASE_ASSERT(NS_SUCCEEDED(dispatch_ack_rv));
445 } else {
446 NS_WARNING(
447 "nsThread exiting after StopWaitingAndLeakThread was called, thread "
448 "resources will be leaked!");
451 // Release any observer of the thread here.
452 self->SetObserver(nullptr);
454 // The PRThread will be deleted in PR_JoinThread(), so clear references.
455 self->mThread = nullptr;
456 self->mEventTarget->ClearCurrentThread();
457 NS_RELEASE(self);
460 void nsThread::InitCommon() {
461 mThreadId = uint32_t(PlatformThread::CurrentId());
464 #if defined(XP_LINUX)
465 pthread_attr_t attr;
466 pthread_attr_init(&attr);
467 pthread_getattr_np(pthread_self(), &attr);
469 size_t stackSize;
470 pthread_attr_getstack(&attr, &mStackBase, &stackSize);
472 // Glibc prior to 2.27 reports the stack size and base including the guard
473 // region, so we need to compensate for it to get accurate accounting.
474 // Also, this behavior difference isn't guarded by a versioned symbol, so we
475 // actually need to check the runtime glibc version, not the version we were
476 // compiled against.
477 static bool sAdjustForGuardSize = ({
478 # ifdef __GLIBC__
479 unsigned major, minor;
480 sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
481 major < 2 || (major == 2 && minor < 27);
482 # else
483 false;
484 # endif
486 if (sAdjustForGuardSize) {
487 size_t guardSize;
488 pthread_attr_getguardsize(&attr, &guardSize);
490 // Note: This assumes that the stack grows down, as is the case on all of
491 // our tier 1 platforms. On platforms where the stack grows up, the
492 // mStackBase adjustment is unnecessary, but doesn't cause any harm other
493 // than under-counting stack memory usage by one page.
494 mStackBase = reinterpret_cast<char*>(mStackBase) + guardSize;
495 stackSize -= guardSize;
498 mStackSize = stackSize;
500 // This is a bit of a hack.
502 // We really do want the NOHUGEPAGE flag on our thread stacks, since we
503 // don't expect any of them to need anywhere near 2MB of space. But setting
504 // it here is too late to have an effect, since the first stack page has
505 // already been faulted in existence, and NSPR doesn't give us a way to set
506 // it beforehand.
508 // What this does get us, however, is a different set of VM flags on our
509 // thread stacks compared to normal heap memory. Which makes the Linux
510 // kernel report them as separate regions, even when they are adjacent to
511 // heap memory. This allows us to accurately track the actual memory
512 // consumption of our allocated stacks.
513 madvise(mStackBase, stackSize, MADV_NOHUGEPAGE);
515 pthread_attr_destroy(&attr);
516 #elif defined(XP_WIN)
517 static const StaticDynamicallyLinkedFunctionPtr<
518 GetCurrentThreadStackLimitsFn>
519 sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
521 if (sGetStackLimits) {
522 ULONG_PTR stackBottom, stackTop;
523 sGetStackLimits(&stackBottom, &stackTop);
524 mStackBase = reinterpret_cast<void*>(stackBottom);
525 mStackSize = stackTop - stackBottom;
527 #endif
530 InitThreadLocalVariables();
531 AddToThreadList();
534 //-----------------------------------------------------------------------------
536 #ifdef MOZ_CANARY
537 int sCanaryOutputFD = -1;
538 #endif
540 nsThread::nsThread(NotNull<SynchronizedEventQueue*> aQueue,
541 MainThreadFlag aMainThread, uint32_t aStackSize)
542 : mEvents(aQueue.get()),
543 mEventTarget(
544 new ThreadEventTarget(mEvents.get(), aMainThread == MAIN_THREAD)),
545 mOutstandingShutdownContexts(0),
546 mShutdownContext(nullptr),
547 mScriptObserver(nullptr),
548 mThreadName("<uninitialized>"),
549 mStackSize(aStackSize),
550 mNestedEventLoopDepth(0),
551 mShutdownRequired(false),
552 mPriority(PRIORITY_NORMAL),
553 mIsMainThread(aMainThread == MAIN_THREAD),
554 mUseHangMonitor(aMainThread == MAIN_THREAD),
555 mIsAPoolThreadFree(nullptr),
556 mCanInvokeJS(false),
557 #ifdef EARLY_BETA_OR_EARLIER
558 mLastWakeupCheckTime(TimeStamp::Now()),
559 #endif
560 mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
561 if (mIsMainThread) {
562 mozilla::TaskController::Get()->SetPerformanceCounterState(
563 &mPerformanceCounterState);
567 nsThread::nsThread()
568 : mEvents(nullptr),
569 mEventTarget(nullptr),
570 mOutstandingShutdownContexts(0),
571 mShutdownContext(nullptr),
572 mScriptObserver(nullptr),
573 mThreadName("<uninitialized>"),
574 mStackSize(0),
575 mNestedEventLoopDepth(0),
576 mShutdownRequired(false),
577 mPriority(PRIORITY_NORMAL),
578 mIsMainThread(false),
579 mUseHangMonitor(false),
580 mCanInvokeJS(false),
581 #ifdef EARLY_BETA_OR_EARLIER
582 mLastWakeupCheckTime(TimeStamp::Now()),
583 #endif
584 mPerformanceCounterState(mNestedEventLoopDepth, mIsMainThread) {
585 MOZ_ASSERT(!NS_IsMainThread());
588 nsThread::~nsThread() {
589 NS_ASSERTION(mOutstandingShutdownContexts == 0,
590 "shouldn't be waiting on other threads to shutdown");
592 MaybeRemoveFromThreadList();
595 nsresult nsThread::Init(const nsACString& aName) {
596 MOZ_ASSERT(mEvents);
597 MOZ_ASSERT(mEventTarget);
598 MOZ_ASSERT(!mThread);
600 NS_ADDREF_THIS();
602 SetThreadNameInternal(aName);
604 mShutdownRequired = true;
606 UniquePtr<ThreadInitData> initData(
607 new ThreadInitData{this, nsCString(aName)});
609 PRThread* thread = nullptr;
610 // ThreadFunc is responsible for setting mThread
611 if (!(thread = PR_CreateThread(PR_USER_THREAD, ThreadFunc, initData.get(),
612 PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
613 PR_JOINABLE_THREAD, mStackSize))) {
614 NS_RELEASE_THIS();
615 return NS_ERROR_OUT_OF_MEMORY;
618 // The created thread now owns initData, so release our ownership of it.
619 Unused << initData.release();
621 // Note: we set these both here and inside ThreadFunc, to what should be
622 // the same value. This is because calls within ThreadFunc need these values
623 // to be set, and our callers need these values to be set.
624 DebugOnly<PRThread*> prev = mThread.exchange(thread);
625 MOZ_ASSERT(!prev || prev == thread);
627 mEventTarget->SetCurrentThread(thread);
628 return NS_OK;
631 nsresult nsThread::InitCurrentThread() {
632 mThread = PR_GetCurrentThread();
633 SetupCurrentThreadForChaosMode();
634 InitCommon();
636 nsThreadManager::get().RegisterCurrentThread(*this);
637 return NS_OK;
640 void nsThread::GetThreadName(nsACString& aNameBuffer) {
641 auto lock = mThreadName.Lock();
642 aNameBuffer = lock.ref();
645 void nsThread::SetThreadNameInternal(const nsACString& aName) {
646 auto lock = mThreadName.Lock();
647 lock->Assign(aName);
650 //-----------------------------------------------------------------------------
651 // nsIEventTarget
653 NS_IMETHODIMP
654 nsThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) {
655 MOZ_ASSERT(mEventTarget);
656 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
658 nsCOMPtr<nsIRunnable> event(aEvent);
659 return mEventTarget->Dispatch(event.forget(), aFlags);
662 NS_IMETHODIMP
663 nsThread::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) {
664 MOZ_ASSERT(mEventTarget);
665 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
667 LOG(("THRD(%p) Dispatch [%p %x]\n", this, /* XXX aEvent */ nullptr, aFlags));
669 return mEventTarget->Dispatch(std::move(aEvent), aFlags);
672 NS_IMETHODIMP
673 nsThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
674 uint32_t aDelayMs) {
675 MOZ_ASSERT(mEventTarget);
676 NS_ENSURE_TRUE(mEventTarget, NS_ERROR_NOT_IMPLEMENTED);
678 return mEventTarget->DelayedDispatch(std::move(aEvent), aDelayMs);
681 NS_IMETHODIMP
682 nsThread::GetRunningEventDelay(TimeDuration* aDelay, TimeStamp* aStart) {
683 if (mIsAPoolThreadFree && *mIsAPoolThreadFree) {
684 // if there are unstarted threads in the pool, a new event to the
685 // pool would not be delayed at all (beyond thread start time)
686 *aDelay = TimeDuration();
687 *aStart = TimeStamp();
688 } else {
689 *aDelay = mLastEventDelay;
690 *aStart = mLastEventStart;
692 return NS_OK;
695 NS_IMETHODIMP
696 nsThread::SetRunningEventDelay(TimeDuration aDelay, TimeStamp aStart) {
697 mLastEventDelay = aDelay;
698 mLastEventStart = aStart;
699 return NS_OK;
702 NS_IMETHODIMP
703 nsThread::IsOnCurrentThread(bool* aResult) {
704 if (mEventTarget) {
705 return mEventTarget->IsOnCurrentThread(aResult);
707 *aResult = PR_GetCurrentThread() == mThread;
708 return NS_OK;
711 NS_IMETHODIMP_(bool)
712 nsThread::IsOnCurrentThreadInfallible() {
713 // This method is only going to be called if `mThread` is null, which
714 // only happens when the thread has exited the event loop. Therefore, when
715 // we are called, we can never be on this thread.
716 return false;
719 //-----------------------------------------------------------------------------
720 // nsIThread
722 NS_IMETHODIMP
723 nsThread::GetPRThread(PRThread** aResult) {
724 PRThread* thread = mThread; // atomic load
725 *aResult = thread;
726 return thread ? NS_OK : NS_ERROR_NOT_AVAILABLE;
729 NS_IMETHODIMP
730 nsThread::GetCanInvokeJS(bool* aResult) {
731 *aResult = mCanInvokeJS;
732 return NS_OK;
735 NS_IMETHODIMP
736 nsThread::SetCanInvokeJS(bool aCanInvokeJS) {
737 mCanInvokeJS = aCanInvokeJS;
738 return NS_OK;
741 NS_IMETHODIMP
742 nsThread::GetLastLongTaskEnd(TimeStamp* _retval) {
743 *_retval = mPerformanceCounterState.LastLongTaskEnd();
744 return NS_OK;
747 NS_IMETHODIMP
748 nsThread::GetLastLongNonIdleTaskEnd(TimeStamp* _retval) {
749 *_retval = mPerformanceCounterState.LastLongNonIdleTaskEnd();
750 return NS_OK;
753 NS_IMETHODIMP
754 nsThread::SetNameForWakeupTelemetry(const nsACString& aName) {
755 #ifdef EARLY_BETA_OR_EARLIER
756 mNameForWakeupTelemetry = aName;
757 #endif
758 return NS_OK;
761 NS_IMETHODIMP
762 nsThread::AsyncShutdown() {
763 LOG(("THRD(%p) async shutdown\n", this));
765 nsCOMPtr<nsIThreadShutdown> shutdown;
766 BeginShutdown(getter_AddRefs(shutdown));
767 return NS_OK;
770 NS_IMETHODIMP
771 nsThread::BeginShutdown(nsIThreadShutdown** aShutdown) {
772 LOG(("THRD(%p) begin shutdown\n", this));
774 MOZ_ASSERT(mEvents);
775 MOZ_ASSERT(mEventTarget);
776 MOZ_ASSERT(mThread != PR_GetCurrentThread());
777 if (NS_WARN_IF(mThread == PR_GetCurrentThread())) {
778 return NS_ERROR_UNEXPECTED;
781 // Prevent multiple calls to this method.
782 if (!mShutdownRequired.compareExchange(true, false)) {
783 return NS_ERROR_UNEXPECTED;
785 MOZ_ASSERT(mThread);
787 MaybeRemoveFromThreadList();
789 RefPtr<nsThread> currentThread = nsThreadManager::get().GetCurrentThread();
791 MOZ_DIAGNOSTIC_ASSERT(currentThread->EventQueue(),
792 "Shutdown() may only be called from an XPCOM thread");
794 // Allocate a shutdown context, and record that we're waiting for it.
795 RefPtr<nsThreadShutdownContext> context =
796 new nsThreadShutdownContext(WrapNotNull(this), currentThread);
798 ++currentThread->mOutstandingShutdownContexts;
799 nsCOMPtr<nsIRunnable> clearOutstanding = NS_NewRunnableFunction(
800 "nsThread::ClearOutstandingShutdownContext",
801 [currentThread] { --currentThread->mOutstandingShutdownContexts; });
802 context->OnCompletion(clearOutstanding);
804 // Set mShutdownContext and wake up the thread in case it is waiting for
805 // events to process.
806 nsCOMPtr<nsIRunnable> event =
807 new nsThreadShutdownEvent(WrapNotNull(this), WrapNotNull(context));
808 if (!mEvents->PutEvent(event.forget(), EventQueuePriority::Normal)) {
809 // We do not expect this to happen. Let's collect some diagnostics.
810 nsAutoCString threadName;
811 GetThreadName(threadName);
812 MOZ_CRASH_UNSAFE_PRINTF("Attempt to shutdown an already dead thread: %s",
813 threadName.get());
816 // We could still end up with other events being added after the shutdown
817 // task, but that's okay because we process pending events in ThreadFunc
818 // after setting mShutdownContext just before exiting.
819 context.forget(aShutdown);
820 return NS_OK;
823 void nsThread::ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext) {
824 MOZ_ASSERT(mEvents);
825 MOZ_ASSERT(mEventTarget);
826 MOZ_ASSERT(aContext->mTerminatingThread == this);
828 MaybeRemoveFromThreadList();
830 // Now, it should be safe to join without fear of dead-locking.
831 PR_JoinThread(aContext->mTerminatingPRThread);
832 MOZ_ASSERT(!mThread);
834 #ifdef DEBUG
835 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
836 MOZ_ASSERT(!obs, "Should have been cleared at shutdown!");
837 #endif
839 aContext->MarkCompleted();
842 void nsThread::WaitForAllAsynchronousShutdowns() {
843 // This is the motivating example for why SpinEventLoopUntil
844 // has the template parameter we are providing here.
845 SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
846 "nsThread::WaitForAllAsynchronousShutdowns"_ns,
847 [&]() { return mOutstandingShutdownContexts == 0; }, this);
850 NS_IMETHODIMP
851 nsThread::Shutdown() {
852 LOG(("THRD(%p) sync shutdown\n", this));
854 nsCOMPtr<nsIThreadShutdown> context;
855 nsresult rv = BeginShutdown(getter_AddRefs(context));
856 if (NS_FAILED(rv)) {
857 return NS_OK; // The thread has already shut down.
860 // If we are going to hang here we want to see the thread's name
861 nsAutoCString threadName;
862 GetThreadName(threadName);
864 // Process events on the current thread until we receive a shutdown ACK.
865 // Allows waiting; ensure no locks are held that would deadlock us!
866 SpinEventLoopUntil("nsThread::Shutdown: "_ns + threadName,
867 [&]() { return context->GetCompleted(); });
869 return NS_OK;
872 NS_IMETHODIMP
873 nsThread::HasPendingEvents(bool* aResult) {
874 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
875 return NS_ERROR_NOT_SAME_THREAD;
878 if (mIsMainThread && !mIsInLocalExecutionMode) {
879 *aResult = TaskController::Get()->HasMainThreadPendingTasks();
880 } else {
881 *aResult = mEvents->HasPendingEvent();
883 return NS_OK;
886 NS_IMETHODIMP
887 nsThread::HasPendingHighPriorityEvents(bool* aResult) {
888 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
889 return NS_ERROR_NOT_SAME_THREAD;
892 // This function appears to never be called anymore.
893 *aResult = false;
894 return NS_OK;
897 NS_IMETHODIMP
898 nsThread::DispatchToQueue(already_AddRefed<nsIRunnable> aEvent,
899 EventQueuePriority aQueue) {
900 nsCOMPtr<nsIRunnable> event = aEvent;
902 if (NS_WARN_IF(!event)) {
903 return NS_ERROR_INVALID_ARG;
906 if (!mEvents->PutEvent(event.forget(), aQueue)) {
907 NS_WARNING(
908 "An idle event was posted to a thread that will never run it "
909 "(rejected)");
910 return NS_ERROR_UNEXPECTED;
913 return NS_OK;
916 #ifdef MOZ_CANARY
917 void canary_alarm_handler(int signum);
919 class Canary {
920 // XXX ToDo: support nested loops
921 public:
922 Canary() {
923 if (sCanaryOutputFD > 0 && EventLatencyIsImportant()) {
924 signal(SIGALRM, canary_alarm_handler);
925 ualarm(15000, 0);
929 ~Canary() {
930 if (sCanaryOutputFD != 0 && EventLatencyIsImportant()) {
931 ualarm(0, 0);
935 static bool EventLatencyIsImportant() {
936 return NS_IsMainThread() && XRE_IsParentProcess();
940 void canary_alarm_handler(int signum) {
941 void* array[30];
942 const char msg[29] = "event took too long to run:\n";
943 // use write to be safe in the signal handler
944 write(sCanaryOutputFD, msg, sizeof(msg));
945 backtrace_symbols_fd(array, backtrace(array, 30), sCanaryOutputFD);
948 #endif
950 #define NOTIFY_EVENT_OBSERVERS(observers_, func_, params_) \
951 do { \
952 if (!observers_.IsEmpty()) { \
953 for (nsCOMPtr<nsIThreadObserver> obs_ : observers_.ForwardRange()) { \
954 obs_->func_ params_; \
957 } while (0)
959 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
960 // static
961 bool nsThread::GetLabeledRunnableName(nsIRunnable* aEvent, nsACString& aName,
962 EventQueuePriority aPriority) {
963 bool labeled = false;
964 if (RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent)) {
965 labeled = true;
966 MOZ_ALWAYS_TRUE(NS_SUCCEEDED(groupRunnable->GetName(aName)));
967 } else if (nsCOMPtr<nsINamed> named = do_QueryInterface(aEvent)) {
968 MOZ_ALWAYS_TRUE(NS_SUCCEEDED(named->GetName(aName)));
969 } else {
970 aName.AssignLiteral("non-nsINamed runnable");
972 if (aName.IsEmpty()) {
973 aName.AssignLiteral("anonymous runnable");
976 if (!labeled && aPriority > EventQueuePriority::InputHigh) {
977 aName.AppendLiteral("(unlabeled)");
980 return labeled;
982 #endif
984 mozilla::PerformanceCounter* nsThread::GetPerformanceCounter(
985 nsIRunnable* aEvent) const {
986 return GetPerformanceCounterBase(aEvent);
989 // static
990 mozilla::PerformanceCounter* nsThread::GetPerformanceCounterBase(
991 nsIRunnable* aEvent) {
992 RefPtr<SchedulerGroup::Runnable> docRunnable = do_QueryObject(aEvent);
993 if (docRunnable) {
994 return docRunnable->GetPerformanceCounter();
996 return nullptr;
999 size_t nsThread::ShallowSizeOfIncludingThis(
1000 mozilla::MallocSizeOf aMallocSizeOf) const {
1001 size_t n = 0;
1002 if (mShutdownContext) {
1003 n += aMallocSizeOf(mShutdownContext);
1005 return aMallocSizeOf(this) + aMallocSizeOf(mThread) + n;
1008 size_t nsThread::SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const {
1009 size_t n = 0;
1010 if (mEventTarget) {
1011 // The size of mEvents is reported by mEventTarget.
1012 n += mEventTarget->SizeOfIncludingThis(aMallocSizeOf);
1014 return n;
1017 size_t nsThread::SizeOfIncludingThis(
1018 mozilla::MallocSizeOf aMallocSizeOf) const {
1019 return ShallowSizeOfIncludingThis(aMallocSizeOf) +
1020 SizeOfEventQueues(aMallocSizeOf);
1023 NS_IMETHODIMP
1024 nsThread::ProcessNextEvent(bool aMayWait, bool* aResult) {
1025 MOZ_ASSERT(mEvents);
1026 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1028 LOG(("THRD(%p) ProcessNextEvent [%u %u]\n", this, aMayWait,
1029 mNestedEventLoopDepth));
1031 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1032 return NS_ERROR_NOT_SAME_THREAD;
1035 // The toplevel event loop normally blocks waiting for the next event, but
1036 // if we're trying to shut this thread down, we must exit the event loop
1037 // when the event queue is empty. This only applys to the toplevel event
1038 // loop! Nested event loops (e.g. during sync dispatch) are waiting for
1039 // some state change and must be able to block even if something has
1040 // requested shutdown of the thread. Otherwise we'll just busywait as we
1041 // endlessly look for an event, fail to find one, and repeat the nested
1042 // event loop since its state change hasn't happened yet.
1043 bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown());
1045 if (mIsInLocalExecutionMode) {
1046 if (nsCOMPtr<nsIRunnable> event = mEvents->GetEvent(reallyWait)) {
1047 *aResult = true;
1048 LogRunnable::Run log(event);
1049 event->Run();
1050 event = nullptr;
1051 } else {
1052 *aResult = false;
1054 return NS_OK;
1057 Maybe<dom::AutoNoJSAPI> noJSAPI;
1059 if (mUseHangMonitor && reallyWait) {
1060 BackgroundHangMonitor().NotifyWait();
1063 if (mIsMainThread) {
1064 DoMainThreadSpecificProcessing();
1067 ++mNestedEventLoopDepth;
1069 // We only want to create an AutoNoJSAPI on threads that actually do DOM
1070 // stuff (including workers). Those are exactly the threads that have an
1071 // mScriptObserver.
1072 bool callScriptObserver = !!mScriptObserver;
1073 if (callScriptObserver) {
1074 noJSAPI.emplace();
1075 mScriptObserver->BeforeProcessTask(reallyWait);
1078 #ifdef EARLY_BETA_OR_EARLIER
1079 // Need to capture mayWaitForWakeup state before OnProcessNextEvent,
1080 // since on the main thread OnProcessNextEvent ends up waiting for the new
1081 // events.
1082 bool mayWaitForWakeup = reallyWait && !mEvents->HasPendingEvent();
1083 #endif
1085 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserverOnThread();
1086 if (obs) {
1087 obs->OnProcessNextEvent(this, reallyWait);
1090 NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), OnProcessNextEvent,
1091 (this, reallyWait));
1093 #ifdef MOZ_CANARY
1094 Canary canary;
1095 #endif
1096 nsresult rv = NS_OK;
1099 // Scope for |event| to make sure that its destructor fires while
1100 // mNestedEventLoopDepth has been incremented, since that destructor can
1101 // also do work.
1102 nsCOMPtr<nsIRunnable> event;
1103 bool usingTaskController = mIsMainThread;
1104 if (usingTaskController) {
1105 event = TaskController::Get()->GetRunnableForMTTask(reallyWait);
1106 } else {
1107 event = mEvents->GetEvent(reallyWait, &mLastEventDelay);
1110 *aResult = (event.get() != nullptr);
1112 if (event) {
1113 #ifdef EARLY_BETA_OR_EARLIER
1114 if (mayWaitForWakeup && mThread) {
1115 ++mWakeupCount;
1116 if (mWakeupCount == kTelemetryWakeupCountLimit) {
1117 TimeStamp now = TimeStamp::Now();
1118 double ms = (now - mLastWakeupCheckTime).ToMilliseconds();
1119 if (ms < 0) {
1120 ms = 0;
1122 const char* name = !mNameForWakeupTelemetry.IsEmpty()
1123 ? mNameForWakeupTelemetry.get()
1124 : PR_GetThreadName(mThread);
1125 if (!name) {
1126 name = mIsMainThread ? "MainThread" : "(nameless thread)";
1128 nsDependentCString key(name);
1129 Telemetry::Accumulate(Telemetry::THREAD_WAKEUP, key,
1130 static_cast<uint32_t>(ms));
1131 mLastWakeupCheckTime = now;
1132 mWakeupCount = 0;
1135 #endif
1137 LOG(("THRD(%p) running [%p]\n", this, event.get()));
1139 Maybe<LogRunnable::Run> log;
1141 if (!usingTaskController) {
1142 log.emplace(event);
1145 // Delay event processing to encourage whoever dispatched this event
1146 // to run.
1147 DelayForChaosMode(ChaosFeature::TaskRunning, 1000);
1149 mozilla::TimeStamp now = mozilla::TimeStamp::Now();
1151 if (mUseHangMonitor) {
1152 BackgroundHangMonitor().NotifyActivity();
1155 Maybe<PerformanceCounterState::Snapshot> snapshot;
1156 if (!usingTaskController) {
1157 snapshot.emplace(mPerformanceCounterState.RunnableWillRun(
1158 GetPerformanceCounter(event), now, false));
1161 mLastEventStart = now;
1163 if (!usingTaskController) {
1164 AUTO_PROFILE_FOLLOWING_RUNNABLE(event);
1165 event->Run();
1166 } else {
1167 // Avoid generating "Runnable" profiler markers for the
1168 // "TaskController::ExecutePendingMTTasks" runnables created
1169 // by TaskController, which already adds "Runnable" markers
1170 // when executing tasks.
1171 event->Run();
1174 if (usingTaskController) {
1175 *aResult = TaskController::Get()->MTTaskRunnableProcessedTask();
1176 } else {
1177 mPerformanceCounterState.RunnableDidRun(std::move(snapshot.ref()));
1180 // To cover the event's destructor code inside the LogRunnable span.
1181 event = nullptr;
1182 } else {
1183 mLastEventDelay = TimeDuration();
1184 mLastEventStart = TimeStamp();
1185 if (aMayWait) {
1186 MOZ_ASSERT(ShuttingDown(),
1187 "This should only happen when shutting down");
1188 rv = NS_ERROR_UNEXPECTED;
1193 DrainDirectTasks();
1195 NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), AfterProcessNextEvent,
1196 (this, *aResult));
1198 if (obs) {
1199 obs->AfterProcessNextEvent(this, *aResult);
1202 // In case some EventObserver dispatched some direct tasks; process them
1203 // now.
1204 DrainDirectTasks();
1206 if (callScriptObserver) {
1207 if (mScriptObserver) {
1208 mScriptObserver->AfterProcessTask(mNestedEventLoopDepth);
1210 noJSAPI.reset();
1213 --mNestedEventLoopDepth;
1215 return rv;
1218 //-----------------------------------------------------------------------------
1219 // nsISupportsPriority
1221 NS_IMETHODIMP
1222 nsThread::GetPriority(int32_t* aPriority) {
1223 *aPriority = mPriority;
1224 return NS_OK;
1227 NS_IMETHODIMP
1228 nsThread::SetPriority(int32_t aPriority) {
1229 if (NS_WARN_IF(!mThread)) {
1230 return NS_ERROR_NOT_INITIALIZED;
1233 // NSPR defines the following four thread priorities:
1234 // PR_PRIORITY_LOW
1235 // PR_PRIORITY_NORMAL
1236 // PR_PRIORITY_HIGH
1237 // PR_PRIORITY_URGENT
1238 // We map the priority values defined on nsISupportsPriority to these
1239 // values.
1241 mPriority = aPriority;
1243 PRThreadPriority pri;
1244 if (mPriority <= PRIORITY_HIGHEST) {
1245 pri = PR_PRIORITY_URGENT;
1246 } else if (mPriority < PRIORITY_NORMAL) {
1247 pri = PR_PRIORITY_HIGH;
1248 } else if (mPriority > PRIORITY_NORMAL) {
1249 pri = PR_PRIORITY_LOW;
1250 } else {
1251 pri = PR_PRIORITY_NORMAL;
1253 // If chaos mode is active, retain the randomly chosen priority
1254 if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
1255 PR_SetThreadPriority(mThread, pri);
1258 return NS_OK;
1261 NS_IMETHODIMP
1262 nsThread::AdjustPriority(int32_t aDelta) {
1263 return SetPriority(mPriority + aDelta);
1266 //-----------------------------------------------------------------------------
1267 // nsIThreadInternal
1269 NS_IMETHODIMP
1270 nsThread::GetObserver(nsIThreadObserver** aObs) {
1271 MOZ_ASSERT(mEvents);
1272 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1274 nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver();
1275 obs.forget(aObs);
1276 return NS_OK;
1279 NS_IMETHODIMP
1280 nsThread::SetObserver(nsIThreadObserver* aObs) {
1281 MOZ_ASSERT(mEvents);
1282 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1284 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1285 return NS_ERROR_NOT_SAME_THREAD;
1288 mEvents->SetObserver(aObs);
1289 return NS_OK;
1292 uint32_t nsThread::RecursionDepth() const {
1293 MOZ_ASSERT(PR_GetCurrentThread() == mThread);
1294 return mNestedEventLoopDepth;
1297 NS_IMETHODIMP
1298 nsThread::AddObserver(nsIThreadObserver* aObserver) {
1299 MOZ_ASSERT(mEvents);
1300 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1302 if (NS_WARN_IF(!aObserver)) {
1303 return NS_ERROR_INVALID_ARG;
1305 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1306 return NS_ERROR_NOT_SAME_THREAD;
1309 EventQueue()->AddObserver(aObserver);
1311 return NS_OK;
1314 NS_IMETHODIMP
1315 nsThread::RemoveObserver(nsIThreadObserver* aObserver) {
1316 MOZ_ASSERT(mEvents);
1317 NS_ENSURE_TRUE(mEvents, NS_ERROR_NOT_IMPLEMENTED);
1319 if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
1320 return NS_ERROR_NOT_SAME_THREAD;
1323 EventQueue()->RemoveObserver(aObserver);
1325 return NS_OK;
1328 void nsThread::SetScriptObserver(
1329 mozilla::CycleCollectedJSContext* aScriptObserver) {
1330 if (!aScriptObserver) {
1331 mScriptObserver = nullptr;
1332 return;
1335 MOZ_ASSERT(!mScriptObserver);
1336 mScriptObserver = aScriptObserver;
1339 void NS_DispatchMemoryPressure();
1341 void nsThread::DoMainThreadSpecificProcessing() const {
1342 MOZ_ASSERT(mIsMainThread);
1344 ipc::CancelCPOWs();
1346 // Fire a memory pressure notification, if one is pending.
1347 if (!ShuttingDown()) {
1348 NS_DispatchMemoryPressure();
1352 NS_IMETHODIMP
1353 nsThread::GetEventTarget(nsIEventTarget** aEventTarget) {
1354 nsCOMPtr<nsIEventTarget> target = this;
1355 target.forget(aEventTarget);
1356 return NS_OK;
1359 //-----------------------------------------------------------------------------
1360 // nsIDirectTaskDispatcher
1362 NS_IMETHODIMP
1363 nsThread::DispatchDirectTask(already_AddRefed<nsIRunnable> aEvent) {
1364 if (!IsOnCurrentThread()) {
1365 return NS_ERROR_FAILURE;
1367 mDirectTasks.AddTask(std::move(aEvent));
1368 return NS_OK;
1371 NS_IMETHODIMP nsThread::DrainDirectTasks() {
1372 if (!IsOnCurrentThread()) {
1373 return NS_ERROR_FAILURE;
1375 mDirectTasks.DrainTasks();
1376 return NS_OK;
1379 NS_IMETHODIMP nsThread::HaveDirectTasks(bool* aValue) {
1380 if (!IsOnCurrentThread()) {
1381 return NS_ERROR_FAILURE;
1384 *aValue = mDirectTasks.HaveTasks();
1385 return NS_OK;
1388 nsIEventTarget* nsThread::EventTarget() { return this; }
1390 nsISerialEventTarget* nsThread::SerialEventTarget() { return this; }
1392 void nsThread::OnDelayedRunnableCreated(mozilla::DelayedRunnable* aRunnable) {
1393 mEventTarget->OnDelayedRunnableCreated(aRunnable);
1396 void nsThread::OnDelayedRunnableScheduled(mozilla::DelayedRunnable* aRunnable) {
1397 mEventTarget->OnDelayedRunnableScheduled(aRunnable);
1400 void nsThread::OnDelayedRunnableRan(mozilla::DelayedRunnable* aRunnable) {
1401 mEventTarget->OnDelayedRunnableRan(aRunnable);
1404 nsLocalExecutionRecord nsThread::EnterLocalExecution() {
1405 MOZ_RELEASE_ASSERT(!mIsInLocalExecutionMode);
1406 MOZ_ASSERT(IsOnCurrentThread());
1407 MOZ_ASSERT(EventQueue());
1408 return nsLocalExecutionRecord(*EventQueue(), mIsInLocalExecutionMode);
1411 nsLocalExecutionGuard::nsLocalExecutionGuard(
1412 nsLocalExecutionRecord&& aLocalExecutionRecord)
1413 : mEventQueueStack(aLocalExecutionRecord.mEventQueueStack),
1414 mLocalEventTarget(mEventQueueStack.PushEventQueue()),
1415 mLocalExecutionFlag(aLocalExecutionRecord.mLocalExecutionFlag) {
1416 MOZ_ASSERT(mLocalEventTarget);
1417 MOZ_ASSERT(!mLocalExecutionFlag);
1418 mLocalExecutionFlag = true;
1421 nsLocalExecutionGuard::~nsLocalExecutionGuard() {
1422 MOZ_ASSERT(mLocalExecutionFlag);
1423 mLocalExecutionFlag = false;
1424 mEventQueueStack.PopEventQueue(mLocalEventTarget);
1427 NS_IMPL_ISUPPORTS(nsThreadShutdownContext, nsIThreadShutdown)
1429 NS_IMETHODIMP
1430 nsThreadShutdownContext::OnCompletion(nsIRunnable* aEvent) {
1431 if (mCompleted) {
1432 aEvent->Run();
1433 } else {
1434 mCompletionCallbacks.AppendElement(aEvent);
1436 return NS_OK;
1439 NS_IMETHODIMP
1440 nsThreadShutdownContext::GetCompleted(bool* aCompleted) {
1441 *aCompleted = mCompleted;
1442 return NS_OK;
1445 NS_IMETHODIMP
1446 nsThreadShutdownContext::StopWaitingAndLeakThread() {
1447 // Take the joining thread from `mJoiningThread` so that the terminating
1448 // thread won't try to dispatch nsThreadShutdownAckEvent to us anymore.
1449 RefPtr<nsThread> joiningThread;
1451 auto lock = mJoiningThread.Lock();
1452 joiningThread = lock->forget();
1454 if (!joiningThread) {
1455 // Shutdown is already being resolved, so there's nothing for us to do.
1456 return NS_ERROR_NOT_AVAILABLE;
1459 MOZ_DIAGNOSTIC_ASSERT(joiningThread->IsOnCurrentThread());
1460 MarkCompleted();
1461 return NS_OK;
1464 void nsThreadShutdownContext::MarkCompleted() {
1465 MOZ_ASSERT(!mCompleted);
1466 mCompleted = true;
1467 nsTArray<nsCOMPtr<nsIRunnable>> callbacks(std::move(mCompletionCallbacks));
1468 for (auto& callback : callbacks) {
1469 callback->Run();
1473 namespace mozilla {
1474 PerformanceCounterState::Snapshot PerformanceCounterState::RunnableWillRun(
1475 PerformanceCounter* aCounter, TimeStamp aNow, bool aIsIdleRunnable) {
1476 if (IsNestedRunnable()) {
1477 // Flush out any accumulated time that should be accounted to the
1478 // current runnable before we start running a nested runnable.
1479 MaybeReportAccumulatedTime(aNow);
1482 Snapshot snapshot(mCurrentEventLoopDepth, mCurrentPerformanceCounter,
1483 mCurrentRunnableIsIdleRunnable);
1485 mCurrentEventLoopDepth = mNestedEventLoopDepth;
1486 mCurrentPerformanceCounter = aCounter;
1487 mCurrentRunnableIsIdleRunnable = aIsIdleRunnable;
1488 mCurrentTimeSliceStart = aNow;
1490 return snapshot;
1493 void PerformanceCounterState::RunnableDidRun(Snapshot&& aSnapshot) {
1494 // First thing: Restore our mCurrentEventLoopDepth so we can use
1495 // IsNestedRunnable().
1496 mCurrentEventLoopDepth = aSnapshot.mOldEventLoopDepth;
1498 // We may not need the current timestamp; don't bother computing it if we
1499 // don't.
1500 TimeStamp now;
1501 if (mCurrentPerformanceCounter || mIsMainThread || IsNestedRunnable()) {
1502 now = TimeStamp::Now();
1504 if (mCurrentPerformanceCounter || mIsMainThread) {
1505 MaybeReportAccumulatedTime(now);
1508 // And now restore the rest of our state.
1509 mCurrentPerformanceCounter = std::move(aSnapshot.mOldPerformanceCounter);
1510 mCurrentRunnableIsIdleRunnable = aSnapshot.mOldIsIdleRunnable;
1511 if (IsNestedRunnable()) {
1512 // Reset mCurrentTimeSliceStart to right now, so our parent runnable's
1513 // next slice can be properly accounted for.
1514 mCurrentTimeSliceStart = now;
1515 } else {
1516 // We are done at the outermost level; we are no longer in a timeslice.
1517 mCurrentTimeSliceStart = TimeStamp();
1521 void PerformanceCounterState::MaybeReportAccumulatedTime(TimeStamp aNow) {
1522 MOZ_ASSERT(mCurrentTimeSliceStart,
1523 "How did we get here if we're not in a timeslice?");
1525 if (!mCurrentPerformanceCounter && !mIsMainThread) {
1526 // No one cares about this timeslice.
1527 return;
1530 TimeDuration duration = aNow - mCurrentTimeSliceStart;
1531 if (mCurrentPerformanceCounter) {
1532 mCurrentPerformanceCounter->IncrementExecutionDuration(
1533 duration.ToMicroseconds());
1536 // Long tasks only matter on the main thread.
1537 if (mIsMainThread && duration.ToMilliseconds() > LONGTASK_BUSY_WINDOW_MS) {
1538 // Idle events (gc...) don't *really* count here
1539 if (!mCurrentRunnableIsIdleRunnable) {
1540 mLastLongNonIdleTaskEnd = aNow;
1542 mLastLongTaskEnd = aNow;
1544 if (profiler_thread_is_being_profiled_for_markers()) {
1545 struct LongTaskMarker {
1546 static constexpr Span<const char> MarkerTypeName() {
1547 return MakeStringSpan("MainThreadLongTask");
1549 static void StreamJSONMarkerData(
1550 baseprofiler::SpliceableJSONWriter& aWriter) {
1551 aWriter.StringProperty("category", "LongTask");
1553 static MarkerSchema MarkerTypeDisplay() {
1554 using MS = MarkerSchema;
1555 MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
1556 schema.AddKeyLabelFormat("category", "Type", MS::Format::String);
1557 return schema;
1561 profiler_add_marker(mCurrentRunnableIsIdleRunnable
1562 ? ProfilerString8View("LongIdleTask")
1563 : ProfilerString8View("LongTask"),
1564 geckoprofiler::category::OTHER,
1565 MarkerTiming::Interval(mCurrentTimeSliceStart, aNow),
1566 LongTaskMarker{});
1571 } // namespace mozilla