1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsTimerImpl.h"
8 #include "TimerThread.h"
10 #include "GeckoProfiler.h"
11 #include "nsThreadUtils.h"
13 #include "nsIObserverService.h"
14 #include "mozilla/Services.h"
15 #include "mozilla/ChaosMode.h"
16 #include "mozilla/ArenaAllocator.h"
17 #include "mozilla/ArrayUtils.h"
18 #include "mozilla/OperatorNewExtensions.h"
19 #include "mozilla/StaticPrefs_timer.h"
21 #include "mozilla/glean/GleanMetrics.h"
25 using namespace mozilla
;
27 // Uncomment the following line to enable runtime stats during development.
28 // #define TIMERS_RUNTIME_STATS
30 #ifdef TIMERS_RUNTIME_STATS
31 // This class gathers durations and displays some basic stats when destroyed.
32 // It is intended to be used as a static variable (see `AUTO_TIMERS_STATS`
33 // below), to display stats at the end of the program.
34 class StaticTimersStats
{
36 explicit StaticTimersStats(const char* aName
) : mName(aName
) {}
38 ~StaticTimersStats() {
39 // Using unsigned long long for computations and printfs.
40 using ULL
= unsigned long long;
41 ULL n
= static_cast<ULL
>(mCount
);
43 printf("[%d] Timers stats `%s`: (nothing)\n",
44 int(profiler_current_process_id().ToNumber()), mName
);
45 } else if (ULL sumNs
= static_cast<ULL
>(mSumDurationsNs
); sumNs
== 0) {
46 printf("[%d] Timers stats `%s`: %llu\n",
47 int(profiler_current_process_id().ToNumber()), mName
, n
);
49 printf("[%d] Timers stats `%s`: %llu ns / %llu = %llu ns, max %llu ns\n",
50 int(profiler_current_process_id().ToNumber()), mName
, sumNs
, n
,
51 sumNs
/ n
, static_cast<ULL
>(mLongestDurationNs
));
55 void AddDurationFrom(TimeStamp aStart
) {
56 // Duration between aStart and now, rounded to the nearest nanosecond.
57 DurationNs duration
= static_cast<DurationNs
>(
58 (TimeStamp::Now() - aStart
).ToMicroseconds() * 1000 + 0.5);
59 mSumDurationsNs
+= duration
;
61 // Update mLongestDurationNs if this one is longer.
63 DurationNs longest
= mLongestDurationNs
;
64 if (MOZ_LIKELY(longest
>= duration
)) {
65 // This duration is not the longest, nothing to do.
68 if (MOZ_LIKELY(mLongestDurationNs
.compareExchange(longest
, duration
))) {
69 // Successfully updated `mLongestDurationNs` with the new value.
72 // Otherwise someone else just updated `mLongestDurationNs`, we need to
73 // try again by looping.
78 MOZ_ASSERT(mSumDurationsNs
== 0, "Don't mix counts and durations");
83 using DurationNs
= uint64_t;
84 using Count
= uint32_t;
86 Atomic
<DurationNs
> mSumDurationsNs
{0};
87 Atomic
<DurationNs
> mLongestDurationNs
{0};
88 Atomic
<Count
> mCount
{0};
92 // RAII object that measures its scoped lifetime duration and reports it to a
93 // `StaticTimersStats`.
94 class MOZ_RAII AutoTimersStats
{
96 explicit AutoTimersStats(StaticTimersStats
& aStats
)
97 : mStats(aStats
), mStart(TimeStamp::Now()) {}
99 ~AutoTimersStats() { mStats
.AddDurationFrom(mStart
); }
102 StaticTimersStats
& mStats
;
106 // Macro that should be used to collect basic statistics from measurements of
107 // block durations, from where this macro is, until the end of its enclosing
108 // scope. The name is used in the static variable name and when displaying stats
109 // at the end of the program; Another location could use the same name but their
110 // stats will not be combined, so use different name if these locations should
112 # define AUTO_TIMERS_STATS(name) \
113 static ::StaticTimersStats sStat##name(#name); \
114 ::AutoTimersStats autoStat##name(sStat##name);
116 // This macro only counts the number of times it's used, not durations.
117 // Don't mix with AUTO_TIMERS_STATS!
118 # define COUNT_TIMERS_STATS(name) \
119 static ::StaticTimersStats sStat##name(#name); \
120 sStat##name.AddCount();
122 #else // TIMERS_RUNTIME_STATS
124 # define AUTO_TIMERS_STATS(name)
125 # define COUNT_TIMERS_STATS(name)
127 #endif // TIMERS_RUNTIME_STATS else
129 NS_IMPL_ISUPPORTS_INHERITED(TimerThread
, Runnable
, nsIObserver
)
131 TimerThread::TimerThread()
132 : Runnable("TimerThread"),
134 mMonitor("TimerThread.mMonitor"),
139 mAllowedEarlyFiringMicroseconds(0) {}
141 TimerThread::~TimerThread() {
144 NS_ASSERTION(mTimers
.IsEmpty(), "Timers remain in TimerThread::~TimerThread");
146 #if TIMER_THREAD_STATISTICS
148 MonitorAutoLock
lock(mMonitor
);
156 class TimerObserverRunnable
: public Runnable
{
158 explicit TimerObserverRunnable(nsIObserver
* aObserver
)
159 : mozilla::Runnable("TimerObserverRunnable"), mObserver(aObserver
) {}
164 nsCOMPtr
<nsIObserver
> mObserver
;
168 TimerObserverRunnable::Run() {
169 nsCOMPtr
<nsIObserverService
> observerService
=
170 mozilla::services::GetObserverService();
171 if (observerService
) {
172 observerService
->AddObserver(mObserver
, "sleep_notification", false);
173 observerService
->AddObserver(mObserver
, "wake_notification", false);
174 observerService
->AddObserver(mObserver
, "suspend_process_notification",
176 observerService
->AddObserver(mObserver
, "resume_process_notification",
186 // TimerEventAllocator is a thread-safe allocator used only for nsTimerEvents.
187 // It's needed to avoid contention over the default allocator lock when
188 // firing timer events (see bug 733277). The thread-safety is required because
189 // nsTimerEvent objects are allocated on the timer thread, and freed on another
190 // thread. Because TimerEventAllocator has its own lock, contention over that
191 // lock is limited to the allocation and deallocation of nsTimerEvent objects.
193 // Because this is layered over ArenaAllocator, it never shrinks -- even
194 // "freed" nsTimerEvents aren't truly freed, they're just put onto a free-list
195 // for later recycling. So the amount of memory consumed will always be equal
196 // to the high-water mark consumption. But nsTimerEvents are small and it's
197 // unusual to have more than a few hundred of them, so this shouldn't be a
198 // problem in practice.
200 class TimerEventAllocator
{
206 ArenaAllocator
<4096> mPool
MOZ_GUARDED_BY(mMonitor
);
207 FreeEntry
* mFirstFree
MOZ_GUARDED_BY(mMonitor
);
208 mozilla::Monitor mMonitor
;
211 TimerEventAllocator()
212 : mPool(), mFirstFree(nullptr), mMonitor("TimerEventAllocator") {}
214 ~TimerEventAllocator() = default;
216 void* Alloc(size_t aSize
);
217 void Free(void* aPtr
);
222 // This is a nsICancelableRunnable because we can dispatch it to Workers and
223 // those can be shut down at any time, and in these cases, Cancel() is called
225 class nsTimerEvent final
: public CancelableRunnable
{
227 NS_IMETHOD
Run() override
;
229 nsresult
Cancel() override
{
234 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
235 NS_IMETHOD
GetName(nsACString
& aName
) override
;
238 explicit nsTimerEvent(already_AddRefed
<nsTimerImpl
> aTimer
,
239 ProfilerThreadId aTimerThreadId
)
240 : mozilla::CancelableRunnable("nsTimerEvent"),
242 mGeneration(mTimer
->GetGeneration()),
243 mTimerThreadId(aTimerThreadId
) {
244 // Note: We override operator new for this class, and the override is
248 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
) ||
249 profiler_thread_is_being_profiled_for_markers(mTimerThreadId
)) {
250 mInitTime
= TimeStamp::Now();
255 static void Shutdown();
256 static void DeleteAllocatorIfNeeded();
258 static void* operator new(size_t aSize
) noexcept(true) {
259 return sAllocator
->Alloc(aSize
);
261 void operator delete(void* aPtr
) {
262 sAllocator
->Free(aPtr
);
264 DeleteAllocatorIfNeeded();
267 already_AddRefed
<nsTimerImpl
> ForgetTimer() { return mTimer
.forget(); }
270 nsTimerEvent(const nsTimerEvent
&) = delete;
271 nsTimerEvent
& operator=(const nsTimerEvent
&) = delete;
272 nsTimerEvent
& operator=(const nsTimerEvent
&&) = delete;
275 MOZ_ASSERT(!sCanDeleteAllocator
|| sAllocatorUsers
> 0,
276 "This will result in us attempting to deallocate the "
277 "nsTimerEvent allocator twice");
281 RefPtr
<nsTimerImpl
> mTimer
;
282 const int32_t mGeneration
;
283 ProfilerThreadId mTimerThreadId
;
285 static TimerEventAllocator
* sAllocator
;
287 static Atomic
<int32_t, SequentiallyConsistent
> sAllocatorUsers
;
288 static Atomic
<bool, SequentiallyConsistent
> sCanDeleteAllocator
;
291 TimerEventAllocator
* nsTimerEvent::sAllocator
= nullptr;
292 Atomic
<int32_t, SequentiallyConsistent
> nsTimerEvent::sAllocatorUsers
;
293 Atomic
<bool, SequentiallyConsistent
> nsTimerEvent::sCanDeleteAllocator
;
297 void* TimerEventAllocator::Alloc(size_t aSize
) {
298 MOZ_ASSERT(aSize
== sizeof(nsTimerEvent
));
300 mozilla::MonitorAutoLock
lock(mMonitor
);
305 mFirstFree
= mFirstFree
->mNext
;
307 p
= mPool
.Allocate(aSize
, fallible
);
313 void TimerEventAllocator::Free(void* aPtr
) {
314 mozilla::MonitorAutoLock
lock(mMonitor
);
316 FreeEntry
* entry
= reinterpret_cast<FreeEntry
*>(aPtr
);
318 entry
->mNext
= mFirstFree
;
325 static constexpr Span
<const char> MarkerTypeName() {
326 return MakeStringSpan("Timer");
328 static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter
& aWriter
,
329 uint32_t aDelay
, uint8_t aType
,
330 MarkerThreadId aThreadId
, bool aCanceled
) {
331 aWriter
.IntProperty("delay", aDelay
);
332 if (!aThreadId
.IsUnspecified()) {
333 // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
334 // int64_t is "implementation-defined" before C++20. This is
335 // acceptable here, because this is a one-way conversion to a unique
336 // identifier that's used to visually separate data by thread on the
339 "threadId", static_cast<int64_t>(aThreadId
.ThreadId().ToNumber()));
342 aWriter
.BoolProperty("canceled", true);
343 // Show a red 'X' as a prefix on the marker chart for canceled timers.
344 aWriter
.StringProperty("prefix", "❌");
347 // The string property for the timer type is not written when the type is
348 // one shot, as that's the type used almost all the time, and that would
349 // consume space in the profiler buffer and then in the profile JSON,
350 // getting in the way of capturing long power profiles.
351 // Bug 1815677 might make this cheap to capture.
352 if (aType
!= nsITimer::TYPE_ONE_SHOT
) {
353 if (aType
== nsITimer::TYPE_REPEATING_SLACK
) {
354 aWriter
.StringProperty("ttype", "repeating slack");
355 } else if (aType
== nsITimer::TYPE_REPEATING_PRECISE
) {
356 aWriter
.StringProperty("ttype", "repeating precise");
357 } else if (aType
== nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP
) {
358 aWriter
.StringProperty("ttype", "repeating precise can skip");
359 } else if (aType
== nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY
) {
360 aWriter
.StringProperty("ttype", "repeating slack low priority");
361 } else if (aType
== nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY
) {
362 aWriter
.StringProperty("ttype", "low priority");
366 static MarkerSchema
MarkerTypeDisplay() {
367 using MS
= MarkerSchema
;
368 MS schema
{MS::Location::MarkerChart
, MS::Location::MarkerTable
};
369 schema
.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds
);
370 schema
.AddKeyLabelFormat("ttype", "Timer Type", MS::Format::String
);
371 schema
.AddKeyLabelFormat("canceled", "Canceled", MS::Format::String
);
372 schema
.SetChartLabel("{marker.data.prefix} {marker.data.delay}");
373 schema
.SetTableLabel(
374 "{marker.name} - {marker.data.prefix} {marker.data.delay}");
379 struct AddRemoveTimerMarker
{
380 static constexpr Span
<const char> MarkerTypeName() {
381 return MakeStringSpan("AddRemoveTimer");
383 static void StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter
& aWriter
,
384 const ProfilerString8View
& aTimerName
,
385 uint32_t aDelay
, MarkerThreadId aThreadId
) {
386 aWriter
.StringProperty("name", aTimerName
);
387 aWriter
.IntProperty("delay", aDelay
);
388 if (!aThreadId
.IsUnspecified()) {
389 // Tech note: If `ToNumber()` returns a uint64_t, the conversion to
390 // int64_t is "implementation-defined" before C++20. This is
391 // acceptable here, because this is a one-way conversion to a unique
392 // identifier that's used to visually separate data by thread on the
395 "threadId", static_cast<int64_t>(aThreadId
.ThreadId().ToNumber()));
398 static MarkerSchema
MarkerTypeDisplay() {
399 using MS
= MarkerSchema
;
400 MS schema
{MS::Location::MarkerChart
, MS::Location::MarkerTable
};
401 schema
.AddKeyLabelFormatSearchable("name", "Name", MS::Format::String
,
402 MS::Searchable::Searchable
);
403 schema
.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds
);
404 schema
.SetTableLabel(
405 "{marker.name} - {marker.data.name} - {marker.data.delay}");
410 void nsTimerEvent::Init() { sAllocator
= new TimerEventAllocator(); }
412 void nsTimerEvent::Shutdown() {
413 sCanDeleteAllocator
= true;
414 DeleteAllocatorIfNeeded();
417 void nsTimerEvent::DeleteAllocatorIfNeeded() {
418 if (sCanDeleteAllocator
&& sAllocatorUsers
== 0) {
420 sAllocator
= nullptr;
424 #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY
426 nsTimerEvent::GetName(nsACString
& aName
) {
429 NS_SUCCEEDED(mTimer
->mEventTarget
->IsOnCurrentThread(¤t
)) &&
432 mTimer
->GetName(aName
);
438 nsTimerEvent::Run() {
439 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
)) {
440 TimeStamp now
= TimeStamp::Now();
441 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
442 ("[this=%p] time between PostTimerEvent() and Fire(): %fms\n", this,
443 (now
- mInitTime
).ToMilliseconds()));
446 if (profiler_thread_is_being_profiled_for_markers(mTimerThreadId
)) {
447 MutexAutoLock
lock(mTimer
->mMutex
);
449 mTimer
->GetName(name
, lock
);
450 // This adds a marker with the timer name as the marker name, to make it
451 // obvious which timers are being used. This marker will be useful to
452 // understand which timers might be added and firing excessively often.
454 name
, geckoprofiler::category::TIMER
,
455 MarkerOptions(MOZ_LIKELY(mInitTime
)
456 ? MarkerTiming::Interval(
457 mTimer
->mTimeout
- mTimer
->mDelay
, mInitTime
)
458 : MarkerTiming::IntervalUntilNowFrom(
459 mTimer
->mTimeout
- mTimer
->mDelay
),
460 MarkerThreadId(mTimerThreadId
)),
461 TimerMarker
{}, mTimer
->mDelay
.ToMilliseconds(), mTimer
->mType
,
462 MarkerThreadId::CurrentThread(), false);
463 // This marker is meant to help understand the behavior of the timer thread.
465 "PostTimerEvent", geckoprofiler::category::OTHER
,
466 MarkerOptions(MOZ_LIKELY(mInitTime
)
467 ? MarkerTiming::IntervalUntilNowFrom(mInitTime
)
468 : MarkerTiming::InstantNow(),
469 MarkerThreadId(mTimerThreadId
)),
470 AddRemoveTimerMarker
{}, name
, mTimer
->mDelay
.ToMilliseconds(),
471 MarkerThreadId::CurrentThread());
474 mTimer
->Fire(mGeneration
);
479 nsresult
TimerThread::Init() {
480 mMonitor
.AssertCurrentThreadOwns();
481 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
482 ("TimerThread::Init [%d]\n", mInitialized
));
485 nsTimerEvent::Init();
487 // We hold on to mThread to keep the thread alive.
489 NS_NewNamedThread("Timer", getter_AddRefs(mThread
), this,
490 {.stackSize
= nsIThreadManager::DEFAULT_STACK_SIZE
,
491 .blockDispatch
= true});
495 RefPtr
<TimerObserverRunnable
> r
= new TimerObserverRunnable(this);
496 if (NS_IsMainThread()) {
499 NS_DispatchToMainThread(r
);
507 return NS_ERROR_FAILURE
;
513 nsresult
TimerThread::Shutdown() {
514 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("TimerThread::Shutdown begin\n"));
517 return NS_ERROR_NOT_INITIALIZED
;
520 nsTArray
<RefPtr
<nsTimerImpl
>> timers
;
523 MonitorAutoLock
lock(mMonitor
);
527 // notify the cond var so that Run() can return
533 // Need to copy content of mTimers array to a local array
534 // because call to timers' Cancel() (and release its self)
535 // must not be done under the lock. Destructor of a callback
536 // might potentially call some code reentering the same lock
537 // that leads to unexpected behavior or deadlock.
539 timers
.SetCapacity(mTimers
.Length());
540 for (Entry
& entry
: mTimers
) {
542 timers
.AppendElement(entry
.Take());
549 for (const RefPtr
<nsTimerImpl
>& timer
: timers
) {
554 mThread
->Shutdown(); // wait for the thread to die
556 nsTimerEvent::Shutdown();
558 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("TimerThread::Shutdown end\n"));
564 struct MicrosecondsToInterval
{
565 PRIntervalTime
operator[](size_t aMs
) const {
566 return PR_MicrosecondsToInterval(aMs
);
570 struct IntervalComparator
{
571 int operator()(PRIntervalTime aInterval
) const {
572 return (0 < aInterval
) ? -1 : 1;
579 void TimerThread::VerifyTimerListConsistency() const {
580 mMonitor
.AssertCurrentThreadOwns();
582 // Find the first non-canceled timer (and check its cached timeout if we find
584 const size_t timerCount
= mTimers
.Length();
585 size_t lastNonCanceledTimerIndex
= 0;
586 while (lastNonCanceledTimerIndex
< timerCount
&&
587 !mTimers
[lastNonCanceledTimerIndex
].Value()) {
588 ++lastNonCanceledTimerIndex
;
590 MOZ_ASSERT(lastNonCanceledTimerIndex
== timerCount
||
591 mTimers
[lastNonCanceledTimerIndex
].Value());
592 MOZ_ASSERT(lastNonCanceledTimerIndex
== timerCount
||
593 mTimers
[lastNonCanceledTimerIndex
].Value()->mTimeout
==
594 mTimers
[lastNonCanceledTimerIndex
].Timeout());
596 // Verify that mTimers is sorted and the cached timeouts are consistent.
597 for (size_t timerIndex
= lastNonCanceledTimerIndex
+ 1;
598 timerIndex
< timerCount
; ++timerIndex
) {
599 if (mTimers
[timerIndex
].Value()) {
600 MOZ_ASSERT(mTimers
[timerIndex
].Timeout() ==
601 mTimers
[timerIndex
].Value()->mTimeout
);
602 MOZ_ASSERT(mTimers
[timerIndex
].Timeout() >=
603 mTimers
[lastNonCanceledTimerIndex
].Timeout());
604 lastNonCanceledTimerIndex
= timerIndex
;
610 size_t TimerThread::ComputeTimerInsertionIndex(const TimeStamp
& timeout
) const {
611 mMonitor
.AssertCurrentThreadOwns();
613 const size_t timerCount
= mTimers
.Length();
615 size_t firstGtIndex
= 0;
616 while (firstGtIndex
< timerCount
&&
617 (!mTimers
[firstGtIndex
].Value() ||
618 mTimers
[firstGtIndex
].Timeout() <= timeout
)) {
625 TimeStamp
TimerThread::ComputeWakeupTimeFromTimers() const {
626 mMonitor
.AssertCurrentThreadOwns();
628 // Timer list should be non-empty and first timer should always be
629 // non-canceled at this point and we rely on that here.
630 MOZ_ASSERT(!mTimers
.IsEmpty());
631 MOZ_ASSERT(mTimers
[0].Value());
633 // Overview: Find the last timer in the list that can be "bundled" together in
634 // the same wake-up with mTimers[0] and use its timeout as our target wake-up
637 // bundleWakeup is when we should wake up in order to be able to fire all of
638 // the timers in our selected bundle. It will always be the timeout of the
639 // last timer in the bundle.
640 TimeStamp bundleWakeup
= mTimers
[0].Timeout();
642 // cutoffTime is the latest that we can wake up for the timers currently
643 // accepted into the bundle. These needs to be updated as we go through the
644 // list because later timers may have more strict delay tolerances.
645 const TimeDuration minTimerDelay
= TimeDuration::FromMilliseconds(
646 StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
647 const TimeDuration maxTimerDelay
= TimeDuration::FromMilliseconds(
648 StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
649 TimeStamp cutoffTime
=
650 bundleWakeup
+ ComputeAcceptableFiringDelay(mTimers
[0].Delay(),
651 minTimerDelay
, maxTimerDelay
);
653 const size_t timerCount
= mTimers
.Length();
654 for (size_t entryIndex
= 1; entryIndex
< timerCount
; ++entryIndex
) {
655 const Entry
& curEntry
= mTimers
[entryIndex
];
656 const nsTimerImpl
* curTimer
= curEntry
.Value();
658 // Canceled timer - skip it
662 const TimeStamp curTimerDue
= curEntry
.Timeout();
663 if (curTimerDue
> cutoffTime
) {
664 // Can't include this timer in the bundle - it fires too late.
668 // This timer can be included in the bundle. Update bundleWakeup and
670 bundleWakeup
= curTimerDue
;
671 cutoffTime
= std::min(
672 curTimerDue
+ ComputeAcceptableFiringDelay(
673 curEntry
.Delay(), minTimerDelay
, maxTimerDelay
),
675 MOZ_ASSERT(bundleWakeup
<= cutoffTime
);
679 // Due to the fact that, on Windows, each TimeStamp object holds two distinct
680 // "values", this assert is not valid there. See bug 1829983 for the details.
681 MOZ_ASSERT(bundleWakeup
- mTimers
[0].Timeout() <=
682 ComputeAcceptableFiringDelay(mTimers
[0].Delay(), minTimerDelay
,
689 TimeDuration
TimerThread::ComputeAcceptableFiringDelay(
690 TimeDuration timerDuration
, TimeDuration minDelay
,
691 TimeDuration maxDelay
) const {
692 // Use the timer's duration divided by this value as a base for how much
693 // firing delay a timer can accept. 8 was chosen specifically because it is a
694 // power of two which means that this division turns nicely into a shift.
695 constexpr int64_t timerDurationDivider
= 8;
696 static_assert(IsPowerOfTwo(static_cast<uint64_t>(timerDurationDivider
)));
697 const TimeDuration tmp
= timerDuration
/ timerDurationDivider
;
698 return std::min(std::max(minDelay
, tmp
), maxDelay
);
703 MonitorAutoLock
lock(mMonitor
);
705 mProfilerThreadId
= profiler_current_thread_id();
707 // TODO: Make mAllowedEarlyFiringMicroseconds const and initialize it in the
709 mAllowedEarlyFiringMicroseconds
= 250;
710 const TimeDuration allowedEarlyFiring
=
711 TimeDuration::FromMicroseconds(mAllowedEarlyFiringMicroseconds
);
713 bool forceRunNextTimer
= false;
715 // Queue for tracking of how many timers are fired on each wake-up. We need to
716 // buffer these locally and only send off to glean occasionally to avoid
718 static constexpr size_t kMaxQueuedTimerFired
= 128;
719 size_t queuedTimerFiredCount
= 0;
720 AutoTArray
<uint64_t, kMaxQueuedTimerFired
> queuedTimersFiredPerWakeup
;
721 queuedTimersFiredPerWakeup
.SetLengthAndRetainStorage(kMaxQueuedTimerFired
);
723 uint64_t timersFiredThisWakeup
= 0;
725 // Have to use PRIntervalTime here, since PR_WaitCondVar takes it
726 TimeDuration waitFor
;
727 bool forceRunThisTimer
= forceRunNextTimer
;
728 forceRunNextTimer
= false;
731 VerifyTimerListConsistency();
735 // Sleep for 0.1 seconds while not firing timers.
736 uint32_t milliseconds
= 100;
737 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
738 milliseconds
= ChaosMode::randomUint32LessThan(200);
740 waitFor
= TimeDuration::FromMilliseconds(milliseconds
);
742 waitFor
= TimeDuration::Forever();
743 TimeStamp now
= TimeStamp::Now();
745 #if TIMER_THREAD_STATISTICS
746 if (!mNotified
&& !mIntendedWakeupTime
.IsNull() &&
747 now
< mIntendedWakeupTime
) {
749 const double earlinessms
= (mIntendedWakeupTime
- now
).ToMilliseconds();
750 mTotalEarlyWakeupTime
+= earlinessms
;
754 RemoveLeadingCanceledTimersInternal();
756 if (!mTimers
.IsEmpty()) {
757 if (now
+ allowedEarlyFiring
>= mTimers
[0].Value()->mTimeout
||
760 // NB: AddRef before the Release under RemoveTimerInternal to avoid
761 // mRefCnt passing through zero, in case all other refs than the one
762 // from mTimers have gone away (the last non-mTimers[i]-ref's Release
763 // must be racing with us, blocked in gThread->RemoveTimer waiting
764 // for TimerThread::mMonitor, under nsTimerImpl::Release.
766 RefPtr
<nsTimerImpl
> timerRef(mTimers
[0].Take());
767 RemoveFirstTimerInternal();
768 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
769 ("Timer thread woke up %fms from when it was supposed to\n",
770 fabs((now
- timerRef
->mTimeout
).ToMilliseconds())));
772 // We are going to let the call to PostTimerEvent here handle the
773 // release of the timer so that we don't end up releasing the timer
774 // on the TimerThread instead of on the thread it targets.
776 ++timersFiredThisWakeup
;
777 LogTimerEvent::Run
run(timerRef
.get());
778 PostTimerEvent(timerRef
.forget());
785 // Update now, as PostTimerEvent plus the locking may have taken a
786 // tick or two, and we may goto next below.
787 now
= TimeStamp::Now();
791 RemoveLeadingCanceledTimersInternal();
793 if (!mTimers
.IsEmpty()) {
794 TimeStamp timeout
= mTimers
[0].Value()->mTimeout
;
796 // Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer
797 // is due now or overdue.
799 // Note that we can only sleep for integer values of a certain
800 // resolution. We use mAllowedEarlyFiringMicroseconds, calculated
801 // before, to do the optimal rounding (i.e., of how to decide what
802 // interval is so small we should not wait at all).
803 double microseconds
= (timeout
- now
).ToMicroseconds();
805 // The mean value of sFractions must be 1 to ensure that the average of
806 // a long sequence of timeouts converges to the actual sum of their
808 static constexpr double sChaosFractions
[] = {0.0, 0.25, 0.5, 0.75,
810 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
811 microseconds
*= sChaosFractions
[ChaosMode::randomUint32LessThan(
812 ArrayLength(sChaosFractions
))];
813 forceRunNextTimer
= true;
816 if (microseconds
< mAllowedEarlyFiringMicroseconds
) {
817 forceRunNextTimer
= false;
818 goto next
; // round down; execute event now
821 // TECHNICAL NOTE: Determining waitFor (by subtracting |now| from our
822 // desired wake-up time) at this point is not ideal. For one thing, the
823 // |now| that we have at this point is somewhat old. Secondly, there is
824 // quite a bit of code between here and where we actually use waitFor to
825 // request sleep. If I am thinking about this correctly, both of these
826 // will contribute to us requesting more sleep than is actually needed
827 // to wake up at our desired time. We could avoid this problem by only
828 // determining our desired wake-up time here and then calculating the
829 // wait time when we're actually about to sleep.
830 const TimeStamp wakeupTime
= ComputeWakeupTimeFromTimers();
831 waitFor
= wakeupTime
- now
;
833 // If this were to fail that would mean that we had more timers that we
834 // should have fired.
835 MOZ_ASSERT(!waitFor
.IsZero());
837 if (ChaosMode::isActive(ChaosFeature::TimerScheduling
)) {
838 // If chaos mode is active then mess with the amount of time that we
839 // request to sleep (without changing what we record as our expected
840 // wake-up time). This will simulate unintended early/late wake-ups.
841 const double waitInMs
= waitFor
.ToMilliseconds();
842 const double chaosWaitInMs
=
843 waitInMs
* sChaosFractions
[ChaosMode::randomUint32LessThan(
844 ArrayLength(sChaosFractions
))];
845 waitFor
= TimeDuration::FromMilliseconds(chaosWaitInMs
);
848 mIntendedWakeupTime
= wakeupTime
;
850 mIntendedWakeupTime
= TimeStamp
{};
853 if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug
)) {
854 if (waitFor
== TimeDuration::Forever())
855 MOZ_LOG(GetTimerLog(), LogLevel::Debug
, ("waiting forever\n"));
857 MOZ_LOG(GetTimerLog(), LogLevel::Debug
,
858 ("waiting for %f\n", waitFor
.ToMilliseconds()));
863 // About to sleep - let's make note of how many timers we processed and
864 // see if we should send out a new batch of telemetry.
865 queuedTimersFiredPerWakeup
[queuedTimerFiredCount
] = timersFiredThisWakeup
;
866 ++queuedTimerFiredCount
;
867 if (queuedTimerFiredCount
== kMaxQueuedTimerFired
) {
868 glean::timer_thread::timers_fired_per_wakeup
.AccumulateSamples(
869 queuedTimersFiredPerWakeup
);
870 queuedTimerFiredCount
= 0;
874 #if TIMER_THREAD_STATISTICS
876 size_t bucketIndex
= 0;
877 while (bucketIndex
< sTimersFiredPerWakeupBucketCount
- 1 &&
878 timersFiredThisWakeup
>
879 sTimersFiredPerWakeupThresholds
[bucketIndex
]) {
882 MOZ_ASSERT(bucketIndex
< sTimersFiredPerWakeupBucketCount
);
883 ++mTimersFiredPerWakeup
[bucketIndex
];
887 ++mTimersFiredPerNotifiedWakeup
[bucketIndex
];
888 ++mTotalNotifiedWakeupCount
;
890 ++mTimersFiredPerUnnotifiedWakeup
[bucketIndex
];
891 ++mTotalUnnotifiedWakeupCount
;
896 timersFiredThisWakeup
= 0;
902 AUTO_PROFILER_TRACING_MARKER("TimerThread", "Wait", OTHER
);
903 mMonitor
.Wait(waitFor
);
906 forceRunNextTimer
= false;
911 // About to shut down - let's send out the final batch of timers fired counts.
912 if (queuedTimerFiredCount
!= 0) {
913 queuedTimersFiredPerWakeup
.SetLengthAndRetainStorage(queuedTimerFiredCount
);
914 glean::timer_thread::timers_fired_per_wakeup
.AccumulateSamples(
915 queuedTimersFiredPerWakeup
);
921 nsresult
TimerThread::AddTimer(nsTimerImpl
* aTimer
,
922 const MutexAutoLock
& aProofOfLock
) {
923 MonitorAutoLock
lock(mMonitor
);
924 AUTO_TIMERS_STATS(TimerThread_AddTimer
);
926 if (!aTimer
->mEventTarget
) {
927 return NS_ERROR_NOT_INITIALIZED
;
930 nsresult rv
= Init();
935 // Awaken the timer thread if:
936 // - This timer needs to fire *before* the Timer Thread is scheduled to wake
939 // - The delay is 0, which is usually meant to be run as soon as possible.
940 // Note: Even if the thread is scheduled to wake up now/soon, on some
941 // systems there could be a significant delay compared to notifying, which
942 // is almost immediate; and some users of 0-delay depend on it being this
944 const TimeDuration minTimerDelay
= TimeDuration::FromMilliseconds(
945 StaticPrefs::timer_minimum_firing_delay_tolerance_ms());
946 const TimeDuration maxTimerDelay
= TimeDuration::FromMilliseconds(
947 StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
948 const TimeDuration firingDelay
= ComputeAcceptableFiringDelay(
949 aTimer
->mDelay
, minTimerDelay
, maxTimerDelay
);
950 const bool firingBeforeNextWakeup
=
951 mIntendedWakeupTime
.IsNull() ||
952 (aTimer
->mTimeout
+ firingDelay
< mIntendedWakeupTime
);
953 const bool wakeUpTimerThread
=
954 mWaiting
&& (firingBeforeNextWakeup
|| aTimer
->mDelay
.IsZero());
956 #if TIMER_THREAD_STATISTICS
957 if (mTotalTimersAdded
== 0) {
958 mFirstTimerAdded
= TimeStamp::Now();
963 // Add the timer to our list.
964 if (!AddTimerInternal(*aTimer
)) {
965 return NS_ERROR_OUT_OF_MEMORY
;
968 if (wakeUpTimerThread
) {
973 if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId
)) {
975 aTimer
->GetName(name
, aProofOfLock
);
977 nsLiteralCString
prefix("Anonymous_");
979 "AddTimer", geckoprofiler::category::OTHER
,
980 MarkerOptions(MarkerThreadId(mProfilerThreadId
),
981 MarkerStack::MaybeCapture(
982 name
.Equals("nonfunction:JS") ||
983 StringHead(name
, prefix
.Length()) == prefix
)),
984 AddRemoveTimerMarker
{}, name
, aTimer
->mDelay
.ToMilliseconds(),
985 MarkerThreadId::CurrentThread());
991 nsresult
TimerThread::RemoveTimer(nsTimerImpl
* aTimer
,
992 const MutexAutoLock
& aProofOfLock
) {
993 MonitorAutoLock
lock(mMonitor
);
994 AUTO_TIMERS_STATS(TimerThread_RemoveTimer
);
996 // Remove the timer from our array. Tell callers that aTimer was not found
997 // by returning NS_ERROR_NOT_AVAILABLE.
999 if (!RemoveTimerInternal(*aTimer
)) {
1000 return NS_ERROR_NOT_AVAILABLE
;
1003 #if TIMER_THREAD_STATISTICS
1004 ++mTotalTimersRemoved
;
1007 // Note: The timer thread is *not* awoken.
1008 // The removed-timer entry is just left null, and will be reused (by a new or
1009 // re-set timer) or discarded (when the timer thread logic handles non-null
1010 // timers around it).
1011 // If this was the front timer, and in the unlikely case that its entry is not
1012 // soon reused by a re-set timer, the timer thread will wake up at the
1013 // previously-scheduled time, but will quickly notice that there is no actual
1014 // pending timer, and will restart its wait until the following real timeout.
1016 if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId
)) {
1018 aTimer
->GetName(name
, aProofOfLock
);
1020 nsLiteralCString
prefix("Anonymous_");
1021 // This marker is meant to help understand the behavior of the timer thread.
1022 profiler_add_marker(
1023 "RemoveTimer", geckoprofiler::category::OTHER
,
1024 MarkerOptions(MarkerThreadId(mProfilerThreadId
),
1025 MarkerStack::MaybeCapture(
1026 name
.Equals("nonfunction:JS") ||
1027 StringHead(name
, prefix
.Length()) == prefix
)),
1028 AddRemoveTimerMarker
{}, name
, aTimer
->mDelay
.ToMilliseconds(),
1029 MarkerThreadId::CurrentThread());
1030 // This adds a marker with the timer name as the marker name, to make it
1031 // obvious which timers are being used. This marker will be useful to
1032 // understand which timers might be added and removed excessively often.
1033 profiler_add_marker(name
, geckoprofiler::category::TIMER
,
1034 MarkerOptions(MarkerTiming::IntervalUntilNowFrom(
1035 aTimer
->mTimeout
- aTimer
->mDelay
),
1036 MarkerThreadId(mProfilerThreadId
)),
1037 TimerMarker
{}, aTimer
->mDelay
.ToMilliseconds(),
1038 aTimer
->mType
, MarkerThreadId::CurrentThread(), true);
1044 TimeStamp
TimerThread::FindNextFireTimeForCurrentThread(TimeStamp aDefault
,
1045 uint32_t aSearchBound
) {
1046 MonitorAutoLock
lock(mMonitor
);
1047 AUTO_TIMERS_STATS(TimerThread_FindNextFireTimeForCurrentThread
);
1049 for (const Entry
& entry
: mTimers
) {
1050 const nsTimerImpl
* timer
= entry
.Value();
1052 if (entry
.Timeout() > aDefault
) {
1056 // Don't yield to timers created with the *_LOW_PRIORITY type.
1057 if (!timer
->IsLowPriority()) {
1058 bool isOnCurrentThread
= false;
1060 timer
->mEventTarget
->IsOnCurrentThread(&isOnCurrentThread
);
1061 if (NS_SUCCEEDED(rv
) && isOnCurrentThread
) {
1062 return entry
.Timeout();
1066 if (aSearchBound
== 0) {
1067 // Return the currently highest timeout when we reach the bound.
1068 // This won't give accurate information if we stop before finding
1069 // any timer for the current thread, but at least won't report too
1070 // long idle period.
1071 return timer
->mTimeout
;
1078 // No timers for this thread, return the default.
1082 // This function must be called from within a lock
1083 // Also: we hold the mutex for the nsTimerImpl.
1084 bool TimerThread::AddTimerInternal(nsTimerImpl
& aTimer
) {
1085 mMonitor
.AssertCurrentThreadOwns();
1086 aTimer
.mMutex
.AssertCurrentThreadOwns();
1087 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal
);
1092 LogTimerEvent::LogDispatch(&aTimer
);
1094 const TimeStamp
& timeout
= aTimer
.mTimeout
;
1095 const size_t insertionIndex
= ComputeTimerInsertionIndex(timeout
);
1097 if (insertionIndex
!= 0 && !mTimers
[insertionIndex
- 1].Value()) {
1098 // Very common scenario in practice: The timer just before the insertion
1099 // point is canceled, overwrite it.
1100 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite_before
);
1101 mTimers
[insertionIndex
- 1] = Entry
{aTimer
};
1105 const size_t length
= mTimers
.Length();
1106 if (insertionIndex
== length
) {
1107 // We're at the end (including it's the very first insertion), add new timer
1109 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_append
);
1110 return mTimers
.AppendElement(Entry
{aTimer
}, mozilla::fallible
);
1113 if (!mTimers
[insertionIndex
].Value()) {
1114 // The timer at the insertion point is canceled, overwrite it.
1115 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite
);
1116 mTimers
[insertionIndex
] = Entry
{aTimer
};
1120 // The new timer has to be inserted.
1121 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert
);
1122 // The capacity should be checked first, because if it needs to be increased
1123 // and the memory allocation fails, only the new timer should be lost.
1124 if (length
== mTimers
.Capacity() && mTimers
[length
- 1].Value()) {
1125 // We have reached capacity, and the last entry is not canceled, so we
1126 // really want to increase the capacity in case the extra slot is required.
1127 // To force-expand the array, append a canceled-timer entry with a timestamp
1128 // far in the future.
1129 // This empty Entry may be used below to receive the moved-from previous
1130 // entry. If not, it may be used in a later call if we need to append a new
1131 // timer at the end.
1132 AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert_expand
);
1133 if (!mTimers
.AppendElement(
1134 Entry
{mTimers
[length
- 1].Timeout() +
1135 TimeDuration::FromSeconds(365.0 * 24.0 * 60.0 * 60.0)},
1136 mozilla::fallible
)) {
1141 // Extract the timer at the insertion point, and put the new timer in its
1143 Entry extractedEntry
= std::exchange(mTimers
[insertionIndex
], Entry
{aTimer
});
1144 // Following entries can be pushed until we hit a canceled timer or the end.
1145 for (size_t i
= insertionIndex
+ 1; i
< length
; ++i
) {
1146 Entry
& entryRef
= mTimers
[i
];
1147 if (!entryRef
.Value()) {
1148 // Canceled entry, overwrite it with the extracted entry from before.
1149 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_overwrite
);
1150 entryRef
= std::move(extractedEntry
);
1153 // Write extracted entry from before, and extract current entry.
1154 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_shifts
);
1155 std::swap(entryRef
, extractedEntry
);
1157 // We've reached the end of the list, with still one extracted entry to
1158 // re-insert. We've checked the capacity above, this cannot fail.
1159 COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_append
);
1160 mTimers
.AppendElement(std::move(extractedEntry
));
1164 // This function must be called from within a lock
1165 // Also: we hold the mutex for the nsTimerImpl.
1166 bool TimerThread::RemoveTimerInternal(nsTimerImpl
& aTimer
) {
1167 mMonitor
.AssertCurrentThreadOwns();
1168 aTimer
.mMutex
.AssertCurrentThreadOwns();
1169 AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal
);
1170 if (!aTimer
.IsInTimerThread()) {
1171 COUNT_TIMERS_STATS(TimerThread_RemoveTimerInternal_not_in_list
);
1174 AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal_in_list
);
1175 for (auto& entry
: mTimers
) {
1176 if (entry
.Value() == &aTimer
) {
1181 MOZ_ASSERT(!aTimer
.IsInTimerThread(),
1182 "Not found in the list but it should be!?");
1186 void TimerThread::RemoveLeadingCanceledTimersInternal() {
1187 mMonitor
.AssertCurrentThreadOwns();
1188 AUTO_TIMERS_STATS(TimerThread_RemoveLeadingCanceledTimersInternal
);
1190 size_t toRemove
= 0;
1191 while (toRemove
< mTimers
.Length() && !mTimers
[toRemove
].Value()) {
1194 mTimers
.RemoveElementsAt(0, toRemove
);
1197 void TimerThread::RemoveFirstTimerInternal() {
1198 mMonitor
.AssertCurrentThreadOwns();
1199 AUTO_TIMERS_STATS(TimerThread_RemoveFirstTimerInternal
);
1200 MOZ_ASSERT(!mTimers
.IsEmpty());
1201 mTimers
.RemoveElementAt(0);
1204 void TimerThread::PostTimerEvent(already_AddRefed
<nsTimerImpl
> aTimerRef
) {
1205 mMonitor
.AssertCurrentThreadOwns();
1206 AUTO_TIMERS_STATS(TimerThread_PostTimerEvent
);
1208 RefPtr
<nsTimerImpl
> timer(aTimerRef
);
1210 #if TIMER_THREAD_STATISTICS
1211 const double actualFiringDelay
=
1212 std::max((TimeStamp::Now() - timer
->mTimeout
).ToMilliseconds(), 0.0);
1214 ++mTotalTimersFiredNotified
;
1215 mTotalActualTimerFiringDelayNotified
+= actualFiringDelay
;
1217 ++mTotalTimersFiredUnnotified
;
1218 mTotalActualTimerFiringDelayUnnotified
+= actualFiringDelay
;
1222 if (!timer
->mEventTarget
) {
1223 NS_ERROR("Attempt to post timer event to NULL event target");
1227 // XXX we may want to reuse this nsTimerEvent in the case of repeating timers.
1229 // Since we already addref'd 'timer', we don't need to addref here.
1230 // We will release either in ~nsTimerEvent(), or pass the reference back to
1231 // the caller. We need to copy the generation number from this timer into the
1232 // event, so we can avoid firing a timer that was re-initialized after being
1235 nsCOMPtr
<nsIEventTarget
> target
= timer
->mEventTarget
;
1237 void* p
= nsTimerEvent::operator new(sizeof(nsTimerEvent
));
1241 RefPtr
<nsTimerEvent
> event
=
1242 ::new (KnownNotNull
, p
) nsTimerEvent(timer
.forget(), mProfilerThreadId
);
1246 // We release mMonitor around the Dispatch because if the Dispatch interacts
1247 // with the timer API we'll deadlock.
1248 MonitorAutoUnlock
unlock(mMonitor
);
1249 rv
= target
->Dispatch(event
, NS_DISPATCH_NORMAL
);
1250 if (NS_FAILED(rv
)) {
1251 timer
= event
->ForgetTimer();
1252 // We do this to avoid possible deadlock by taking the two locks in a
1253 // different order than is used in RemoveTimer(). RemoveTimer() has
1254 // aTimer->mMutex first. We use timer.get() to keep static analysis
1256 // NOTE: I'm not sure that any of the below is actually necessary. It
1257 // seems to me that the timer that we're trying to fire will have already
1258 // been removed prior to this.
1259 MutexAutoLock
lock1(timer
.get()->mMutex
);
1260 MonitorAutoLock
lock2(mMonitor
);
1261 RemoveTimerInternal(*timer
);
1266 void TimerThread::DoBeforeSleep() {
1268 MonitorAutoLock
lock(mMonitor
);
1272 // Note: wake may be notified without preceding sleep notification
1273 void TimerThread::DoAfterSleep() {
1275 MonitorAutoLock
lock(mMonitor
);
1278 // Wake up the timer thread to re-process the array to ensure the sleep delay
1279 // is correct, and fire any expired timers (perhaps quite a few)
1281 PROFILER_MARKER_UNTYPED("AfterSleep", OTHER
,
1282 MarkerThreadId(mProfilerThreadId
));
1287 TimerThread::Observe(nsISupports
* /* aSubject */, const char* aTopic
,
1288 const char16_t
* /* aData */) {
1289 if (StaticPrefs::timer_ignore_sleep_wake_notifications()) {
1293 if (strcmp(aTopic
, "sleep_notification") == 0 ||
1294 strcmp(aTopic
, "suspend_process_notification") == 0) {
1296 } else if (strcmp(aTopic
, "wake_notification") == 0 ||
1297 strcmp(aTopic
, "resume_process_notification") == 0) {
1304 uint32_t TimerThread::AllowedEarlyFiringMicroseconds() {
1305 MonitorAutoLock
lock(mMonitor
);
1306 return mAllowedEarlyFiringMicroseconds
;
1309 #if TIMER_THREAD_STATISTICS
1310 void TimerThread::PrintStatistics() const {
1311 mMonitor
.AssertCurrentThreadOwns();
1313 const TimeStamp freshNow
= TimeStamp::Now();
1314 const double timeElapsed
= mFirstTimerAdded
.IsNull()
1316 : (freshNow
- mFirstTimerAdded
).ToSeconds();
1317 printf_stderr("TimerThread Stats (Total time %8.2fs)\n", timeElapsed
);
1319 printf_stderr("Added: %6llu Removed: %6llu Fired: %6llu\n", mTotalTimersAdded
,
1320 mTotalTimersRemoved
,
1321 mTotalTimersFiredNotified
+ mTotalTimersFiredUnnotified
);
1323 auto PrintTimersFiredBucket
=
1324 [](const AutoTArray
<size_t, sTimersFiredPerWakeupBucketCount
>& buckets
,
1325 const size_t wakeupCount
, const size_t timersFiredCount
,
1326 const double totalTimerDelay
, const char* label
) {
1327 printf_stderr("%s : [", label
);
1328 for (size_t bucketVal
: buckets
) {
1329 printf_stderr(" %5llu", bucketVal
);
1332 " ] Wake-ups/timer %6llu / %6llu (%7.4f) Avg Timer Delay %7.4f\n",
1333 wakeupCount
, timersFiredCount
,
1334 static_cast<double>(wakeupCount
) / timersFiredCount
,
1335 totalTimerDelay
/ timersFiredCount
);
1338 printf_stderr("Wake-ups:\n");
1339 PrintTimersFiredBucket(
1340 mTimersFiredPerWakeup
, mTotalWakeupCount
,
1341 mTotalTimersFiredNotified
+ mTotalTimersFiredUnnotified
,
1342 mTotalActualTimerFiringDelayNotified
+
1343 mTotalActualTimerFiringDelayUnnotified
,
1345 PrintTimersFiredBucket(mTimersFiredPerNotifiedWakeup
,
1346 mTotalNotifiedWakeupCount
, mTotalTimersFiredNotified
,
1347 mTotalActualTimerFiringDelayNotified
, "Notified ");
1348 PrintTimersFiredBucket(mTimersFiredPerUnnotifiedWakeup
,
1349 mTotalUnnotifiedWakeupCount
,
1350 mTotalTimersFiredUnnotified
,
1351 mTotalActualTimerFiringDelayUnnotified
, "Unnotified ");
1353 printf_stderr("Early Wake-ups: %6llu Avg: %7.4fms\n", mEarlyWakeups
,
1354 mTotalEarlyWakeupTime
/ mEarlyWakeups
);
1358 /* This nsReadOnlyTimer class is used for the values returned by the
1359 * TimerThread::GetTimers method.
1360 * It is not possible to return a strong reference to the nsTimerImpl
1361 * instance (that could extend the lifetime of the timer and cause it to fire
1362 * a callback pointing to already freed memory) or a weak reference
1363 * (nsSupportsWeakReference doesn't support freeing the referee on a thread
1364 * that isn't the thread that owns the weak reference), so instead the timer
1365 * name, delay and type are copied to a new object. */
1366 class nsReadOnlyTimer final
: public nsITimer
{
1368 explicit nsReadOnlyTimer(const nsACString
& aName
, uint32_t aDelay
,
1370 : mName(aName
), mDelay(aDelay
), mType(aType
) {}
1373 NS_IMETHOD
Init(nsIObserver
* aObserver
, uint32_t aDelayInMs
,
1374 uint32_t aType
) override
{
1375 return NS_ERROR_NOT_IMPLEMENTED
;
1377 NS_IMETHOD
InitWithCallback(nsITimerCallback
* aCallback
, uint32_t aDelayInMs
,
1378 uint32_t aType
) override
{
1379 return NS_ERROR_NOT_IMPLEMENTED
;
1381 NS_IMETHOD
InitHighResolutionWithCallback(nsITimerCallback
* aCallback
,
1382 const mozilla::TimeDuration
& aDelay
,
1383 uint32_t aType
) override
{
1384 return NS_ERROR_NOT_IMPLEMENTED
;
1386 NS_IMETHOD
Cancel(void) override
{ return NS_ERROR_NOT_IMPLEMENTED
; }
1387 NS_IMETHOD
InitWithNamedFuncCallback(nsTimerCallbackFunc aCallback
,
1388 void* aClosure
, uint32_t aDelay
,
1390 const char* aName
) override
{
1391 return NS_ERROR_NOT_IMPLEMENTED
;
1393 NS_IMETHOD
InitHighResolutionWithNamedFuncCallback(
1394 nsTimerCallbackFunc aCallback
, void* aClosure
,
1395 const mozilla::TimeDuration
& aDelay
, uint32_t aType
,
1396 const char* aName
) override
{
1397 return NS_ERROR_NOT_IMPLEMENTED
;
1400 NS_IMETHOD
GetName(nsACString
& aName
) override
{
1404 NS_IMETHOD
GetDelay(uint32_t* aDelay
) override
{
1408 NS_IMETHOD
SetDelay(uint32_t aDelay
) override
{
1409 return NS_ERROR_NOT_IMPLEMENTED
;
1411 NS_IMETHOD
GetType(uint32_t* aType
) override
{
1415 NS_IMETHOD
SetType(uint32_t aType
) override
{
1416 return NS_ERROR_NOT_IMPLEMENTED
;
1418 NS_IMETHOD
GetClosure(void** aClosure
) override
{
1419 return NS_ERROR_NOT_IMPLEMENTED
;
1421 NS_IMETHOD
GetCallback(nsITimerCallback
** aCallback
) override
{
1422 return NS_ERROR_NOT_IMPLEMENTED
;
1424 NS_IMETHOD
GetTarget(nsIEventTarget
** aTarget
) override
{
1425 return NS_ERROR_NOT_IMPLEMENTED
;
1427 NS_IMETHOD
SetTarget(nsIEventTarget
* aTarget
) override
{
1428 return NS_ERROR_NOT_IMPLEMENTED
;
1430 NS_IMETHOD
GetAllowedEarlyFiringMicroseconds(
1431 uint32_t* aAllowedEarlyFiringMicroseconds
) override
{
1432 return NS_ERROR_NOT_IMPLEMENTED
;
1434 size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf
) override
{
1435 return sizeof(*this);
1442 ~nsReadOnlyTimer() = default;
1445 NS_IMPL_ISUPPORTS(nsReadOnlyTimer
, nsITimer
)
1447 nsresult
TimerThread::GetTimers(nsTArray
<RefPtr
<nsITimer
>>& aRetVal
) {
1448 nsTArray
<RefPtr
<nsTimerImpl
>> timers
;
1450 MonitorAutoLock
lock(mMonitor
);
1451 for (const auto& entry
: mTimers
) {
1452 nsTimerImpl
* timer
= entry
.Value();
1456 timers
.AppendElement(timer
);
1460 for (nsTimerImpl
* timer
: timers
) {
1462 timer
->GetName(name
);
1465 timer
->GetDelay(&delay
);
1468 timer
->GetType(&type
);
1470 aRetVal
.AppendElement(new nsReadOnlyTimer(name
, delay
, type
));