1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
16 #include "nsThreadUtils.h"
17 #include "nsPIDOMWindow.h"
18 #include "nsIObserverService.h"
19 #include "nsIOService.h"
20 #include "nsIGlobalObject.h"
21 #include "nsIXPConnect.h"
22 #ifdef MOZ_GECKO_PROFILER
23 # include "GeckoProfilerReporter.h"
25 #if defined(XP_UNIX) || defined(MOZ_DMD)
26 # include "nsMemoryInfoDumper.h"
30 #include "VRProcessManager.h"
31 #include "mozilla/Attributes.h"
32 #include "mozilla/MemoryReportingProcess.h"
33 #include "mozilla/PodOperations.h"
34 #include "mozilla/Preferences.h"
35 #include "mozilla/RDDProcessManager.h"
36 #include "mozilla/ResultExtensions.h"
37 #include "mozilla/Services.h"
38 #include "mozilla/Telemetry.h"
39 #include "mozilla/UniquePtrExtensions.h"
40 #include "mozilla/dom/MemoryReportTypes.h"
41 #include "mozilla/dom/ContentParent.h"
42 #include "mozilla/gfx/GPUProcessManager.h"
43 #include "mozilla/ipc/UtilityProcessManager.h"
44 #include "mozilla/ipc/FileDescriptorUtils.h"
46 #ifdef MOZ_WIDGET_ANDROID
47 # include "mozilla/java/GeckoAppShellWrappers.h"
48 # include "mozilla/jni/Utils.h"
52 # include "mozilla/MemoryInfo.h"
56 # define getpid _getpid
62 using namespace mozilla
;
63 using namespace mozilla::ipc
;
66 #if defined(MOZ_MEMORY)
67 # define HAVE_JEMALLOC_STATS 1
68 # include "mozmemory.h"
73 # include "mozilla/MemoryMapping.h"
79 [[nodiscard
]] static nsresult
GetProcSelfStatmField(int aField
, int64_t* aN
) {
80 // There are more than two fields, but we're only interested in the first
82 static const int MAX_FIELD
= 2;
83 size_t fields
[MAX_FIELD
];
84 MOZ_ASSERT(aField
< MAX_FIELD
, "bad field number");
85 FILE* f
= fopen("/proc/self/statm", "r");
87 int nread
= fscanf(f
, "%zu %zu", &fields
[0], &fields
[1]);
89 if (nread
== MAX_FIELD
) {
90 *aN
= fields
[aField
] * getpagesize();
94 return NS_ERROR_FAILURE
;
97 [[nodiscard
]] static nsresult
GetProcSelfSmapsPrivate(int64_t* aN
, pid_t aPid
) {
98 // You might be tempted to calculate USS by subtracting the "shared" value
99 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
100 // statm's "shared" value actually counts pages backed by files, which has
101 // little to do with whether the pages are actually shared. /proc/self/smaps
102 // on the other hand appears to give us the correct information.
104 nsTArray
<MemoryMapping
> mappings(1024);
105 MOZ_TRY(GetMemoryMappings(mappings
, aPid
));
108 for (auto& mapping
: mappings
) {
109 amount
+= mapping
.Private_Clean();
110 amount
+= mapping
.Private_Dirty();
116 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
117 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
118 return GetProcSelfStatmField(0, aN
);
121 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
122 return GetProcSelfStatmField(1, aN
);
125 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
126 return ResidentDistinguishedAmount(aN
);
129 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
130 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
131 int64_t* aN
, pid_t aPid
= 0) {
132 return GetProcSelfSmapsPrivate(aN
, aPid
);
135 # ifdef HAVE_MALLINFO
136 # define HAVE_SYSTEM_HEAP_REPORTER 1
137 [[nodiscard
]] static nsresult
SystemHeapSize(int64_t* aSizeOut
) {
138 struct mallinfo info
= mallinfo();
140 // The documentation in the glibc man page makes it sound like |uordblks|
141 // would suffice, but that only gets the small allocations that are put in
142 // the brk heap. We need |hblkhd| as well to get the larger allocations
145 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
146 // unreliable if memory usage gets high. However, the system heap size on
147 // Linux should usually be zero (so long as jemalloc is enabled) so that
148 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
149 // adding them to provide a small amount of extra overflow protection.
150 *aSizeOut
= size_t(info
.hblkhd
) + size_t(info
.uordblks
);
155 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
156 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
158 # include <sys/param.h>
159 # include <sys/sysctl.h>
160 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
161 defined(__FreeBSD_kernel__)
162 # include <sys/user.h>
167 # if defined(__NetBSD__)
169 # define KERN_PROC KERN_PROC2
170 # define KINFO_PROC struct kinfo_proc2
172 # define KINFO_PROC struct kinfo_proc
175 # if defined(__DragonFly__)
176 # define KP_SIZE(kp) (kp.kp_vm_map_size)
177 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
178 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
179 # define KP_SIZE(kp) (kp.ki_size)
180 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
181 # elif defined(__NetBSD__)
182 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
183 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
184 # elif defined(__OpenBSD__)
185 # define KP_SIZE(kp) \
186 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
187 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
190 [[nodiscard
]] static nsresult
GetKinfoProcSelf(KINFO_PROC
* aProc
) {
191 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
192 static LazyLogModule
sPledgeLog("SandboxPledge");
193 MOZ_LOG(sPledgeLog
, LogLevel::Debug
,
194 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__
));
195 return NS_ERROR_FAILURE
;
202 # if defined(__NetBSD__) || defined(__OpenBSD__)
207 u_int miblen
= sizeof(mib
) / sizeof(mib
[0]);
208 size_t size
= sizeof(KINFO_PROC
);
209 if (sysctl(mib
, miblen
, aProc
, &size
, nullptr, 0)) {
210 return NS_ERROR_FAILURE
;
215 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
216 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
218 nsresult rv
= GetKinfoProcSelf(&proc
);
219 if (NS_SUCCEEDED(rv
)) {
225 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
227 nsresult rv
= GetKinfoProcSelf(&proc
);
228 if (NS_SUCCEEDED(rv
)) {
234 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
235 return ResidentDistinguishedAmount(aN
);
239 # include <libutil.h>
240 # include <algorithm>
242 [[nodiscard
]] static nsresult
GetKinfoVmentrySelf(int64_t* aPrss
,
245 struct kinfo_vmentry
* vmmap
;
246 struct kinfo_vmentry
* kve
;
247 if (!(vmmap
= kinfo_getvmmap(getpid(), &cnt
))) {
248 return NS_ERROR_FAILURE
;
257 for (int i
= 0; i
< cnt
; i
++) {
260 *aPrss
+= kve
->kve_private_resident
;
263 *aMaxreg
= std::max(*aMaxreg
, kve
->kve_end
- kve
->kve_start
);
271 # define HAVE_PRIVATE_REPORTER 1
272 [[nodiscard
]] static nsresult
PrivateDistinguishedAmount(int64_t* aN
) {
274 nsresult rv
= GetKinfoVmentrySelf(&priv
, nullptr);
275 NS_ENSURE_SUCCESS(rv
, rv
);
276 *aN
= priv
* getpagesize();
280 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
281 [[nodiscard
]] static nsresult
VsizeMaxContiguousDistinguishedAmount(
283 uint64_t biggestRegion
;
284 nsresult rv
= GetKinfoVmentrySelf(nullptr, &biggestRegion
);
285 if (NS_SUCCEEDED(rv
)) {
292 #elif defined(SOLARIS)
298 static void XMappingIter(int64_t& aVsize
, int64_t& aResident
,
303 int mapfd
= open("/proc/self/xmap", O_RDONLY
);
305 prxmap_t
* prmapp
= nullptr;
307 if (!fstat(mapfd
, &st
)) {
308 int nmap
= st
.st_size
/ sizeof(prxmap_t
);
310 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
311 // prior to the release of Solaris 11.
312 // Here is a workaround for it.
314 prmapp
= (prxmap_t
*)malloc((nmap
+ 1) * sizeof(prxmap_t
));
319 int n
= pread(mapfd
, prmapp
, (nmap
+ 1) * sizeof(prxmap_t
), 0);
323 if (nmap
>= n
/ sizeof(prxmap_t
)) {
327 for (int i
= 0; i
< n
/ sizeof(prxmap_t
); i
++) {
328 aVsize
+= prmapp
[i
].pr_size
;
329 aResident
+= prmapp
[i
].pr_rss
* prmapp
[i
].pr_pagesize
;
330 if (prmapp
[i
].pr_mflags
& MA_SHARED
) {
331 aShared
+= prmapp
[i
].pr_rss
* prmapp
[i
].pr_pagesize
;
344 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
345 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
346 int64_t vsize
, resident
, shared
;
347 XMappingIter(vsize
, resident
, shared
);
349 return NS_ERROR_FAILURE
;
355 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
356 int64_t vsize
, resident
, shared
;
357 XMappingIter(vsize
, resident
, shared
);
358 if (resident
== -1) {
359 return NS_ERROR_FAILURE
;
365 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
366 return ResidentDistinguishedAmount(aN
);
369 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
370 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(int64_t* aN
) {
371 int64_t vsize
, resident
, shared
;
372 XMappingIter(vsize
, resident
, shared
);
373 if (resident
== -1) {
374 return NS_ERROR_FAILURE
;
376 *aN
= resident
- shared
;
380 #elif defined(XP_MACOSX)
382 # include <mach/mach_init.h>
383 # include <mach/mach_vm.h>
384 # include <mach/shared_region.h>
385 # include <mach/task.h>
386 # include <sys/sysctl.h>
388 [[nodiscard
]] static bool GetTaskBasicInfo(struct task_basic_info
* aTi
) {
389 mach_msg_type_number_t count
= TASK_BASIC_INFO_COUNT
;
391 task_info(mach_task_self(), TASK_BASIC_INFO
, (task_info_t
)aTi
, &count
);
392 return kr
== KERN_SUCCESS
;
395 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
396 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
397 // it, so we might as well too.
398 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
399 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
401 if (!GetTaskBasicInfo(&ti
)) {
402 return NS_ERROR_FAILURE
;
404 *aN
= ti
.virtual_size
;
408 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
409 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
410 // an accurate result. The OS will take away MADV_FREE'd pages when there's
411 // memory pressure, so ideally, they shouldn't count against our RSS.
413 // Purging these pages can take a long time for some users (see bug 789975),
414 // so we provide the option to get the RSS without purging first.
415 [[nodiscard
]] static nsresult
ResidentDistinguishedAmountHelper(int64_t* aN
,
417 # ifdef HAVE_JEMALLOC_STATS
419 Telemetry::AutoTimer
<Telemetry::MEMORY_FREE_PURGED_PAGES_MS
> timer
;
420 jemalloc_purge_freed_pages();
425 if (!GetTaskBasicInfo(&ti
)) {
426 return NS_ERROR_FAILURE
;
428 *aN
= ti
.resident_size
;
432 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
433 return ResidentDistinguishedAmountHelper(aN
, /* doPurge = */ false);
436 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
437 return ResidentDistinguishedAmountHelper(aN
, /* doPurge = */ true);
440 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
442 static bool InSharedRegion(mach_vm_address_t aAddr
, cpu_type_t aType
) {
443 mach_vm_address_t base
;
444 mach_vm_address_t size
;
448 base
= SHARED_REGION_BASE_ARM
;
449 size
= SHARED_REGION_SIZE_ARM
;
452 base
= SHARED_REGION_BASE_ARM64
;
453 size
= SHARED_REGION_SIZE_ARM64
;
456 base
= SHARED_REGION_BASE_I386
;
457 size
= SHARED_REGION_SIZE_I386
;
459 case CPU_TYPE_X86_64
:
460 base
= SHARED_REGION_BASE_X86_64
;
461 size
= SHARED_REGION_SIZE_X86_64
;
467 return base
<= aAddr
&& aAddr
< (base
+ size
);
470 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
471 int64_t* aN
, mach_port_t aPort
= 0) {
473 return NS_ERROR_FAILURE
;
477 size_t len
= sizeof(cpu_type
);
478 if (sysctlbyname("sysctl.proc_cputype", &cpu_type
, &len
, NULL
, 0) != 0) {
479 return NS_ERROR_FAILURE
;
482 // Roughly based on libtop_update_vm_regions in
483 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
484 size_t privatePages
= 0;
485 mach_vm_size_t topSize
= 0;
486 for (mach_vm_address_t addr
= MACH_VM_MIN_ADDRESS
;; addr
+= topSize
) {
487 vm_region_top_info_data_t topInfo
;
488 mach_msg_type_number_t topInfoCount
= VM_REGION_TOP_INFO_COUNT
;
489 mach_port_t topObjectName
;
491 kern_return_t kr
= mach_vm_region(
492 aPort
? aPort
: mach_task_self(), &addr
, &topSize
, VM_REGION_TOP_INFO
,
493 reinterpret_cast<vm_region_info_t
>(&topInfo
), &topInfoCount
,
495 if (kr
== KERN_INVALID_ADDRESS
) {
496 // Done iterating VM regions.
498 } else if (kr
!= KERN_SUCCESS
) {
499 return NS_ERROR_FAILURE
;
502 if (InSharedRegion(addr
, cpu_type
) && topInfo
.share_mode
!= SM_PRIVATE
) {
506 switch (topInfo
.share_mode
) {
508 // NB: Large pages are not shareable and always resident.
510 privatePages
+= topInfo
.private_pages_resident
;
511 privatePages
+= topInfo
.shared_pages_resident
;
514 privatePages
+= topInfo
.private_pages_resident
;
515 if (topInfo
.ref_count
== 1) {
516 // Treat copy-on-write pages as private if they only have one
518 privatePages
+= topInfo
.shared_pages_resident
;
522 // Using mprotect() or similar to protect a page in the middle of a
523 // mapping can create aliased mappings. They look like shared mappings
524 // to the VM_REGION_TOP_INFO interface, so re-check with
525 // VM_REGION_EXTENDED_INFO.
527 mach_vm_size_t exSize
= 0;
528 vm_region_extended_info_data_t exInfo
;
529 mach_msg_type_number_t exInfoCount
= VM_REGION_EXTENDED_INFO_COUNT
;
530 mach_port_t exObjectName
;
531 kr
= mach_vm_region(aPort
? aPort
: mach_task_self(), &addr
, &exSize
,
532 VM_REGION_EXTENDED_INFO
,
533 reinterpret_cast<vm_region_info_t
>(&exInfo
),
534 &exInfoCount
, &exObjectName
);
535 if (kr
== KERN_INVALID_ADDRESS
) {
536 // Done iterating VM regions.
538 } else if (kr
!= KERN_SUCCESS
) {
539 return NS_ERROR_FAILURE
;
542 if (exInfo
.share_mode
== SM_PRIVATE_ALIASED
) {
543 privatePages
+= exInfo
.pages_resident
;
553 if (host_page_size(aPort
? aPort
: mach_task_self(), &pageSize
) !=
555 pageSize
= PAGE_SIZE
;
558 *aN
= privatePages
* pageSize
;
562 [[nodiscard
]] static nsresult
PhysicalFootprintAmount(int64_t* aN
,
563 mach_port_t aPort
= 0) {
566 // The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data
567 // matches the value in the 'Memory' column of the Activity Monitor.
568 task_vm_info_data_t task_vm_info
;
569 mach_msg_type_number_t count
= TASK_VM_INFO_COUNT
;
570 kern_return_t kr
= task_info(aPort
? aPort
: mach_task_self(), TASK_VM_INFO
,
571 (task_info_t
)&task_vm_info
, &count
);
572 if (kr
!= KERN_SUCCESS
) {
573 return NS_ERROR_FAILURE
;
576 *aN
= task_vm_info
.phys_footprint
;
580 #elif defined(XP_WIN)
582 # include <windows.h>
584 # include <algorithm>
586 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
587 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
589 s
.dwLength
= sizeof(s
);
591 if (!GlobalMemoryStatusEx(&s
)) {
592 return NS_ERROR_FAILURE
;
595 *aN
= s
.ullTotalVirtual
- s
.ullAvailVirtual
;
599 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
600 PROCESS_MEMORY_COUNTERS pmc
;
601 pmc
.cb
= sizeof(PROCESS_MEMORY_COUNTERS
);
603 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc
, sizeof(pmc
))) {
604 return NS_ERROR_FAILURE
;
607 *aN
= pmc
.WorkingSetSize
;
611 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
612 return ResidentDistinguishedAmount(aN
);
615 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
617 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
618 int64_t* aN
, HANDLE aProcess
= nullptr) {
619 // Determine how many entries we need.
620 PSAPI_WORKING_SET_INFORMATION tmp
;
621 DWORD tmpSize
= sizeof(tmp
);
622 memset(&tmp
, 0, tmpSize
);
624 HANDLE proc
= aProcess
? aProcess
: GetCurrentProcess();
625 QueryWorkingSet(proc
, &tmp
, tmpSize
);
627 // Fudge the size in case new entries are added between calls.
628 size_t entries
= tmp
.NumberOfEntries
* 2;
631 return NS_ERROR_FAILURE
;
634 DWORD infoArraySize
= tmpSize
+ (entries
* sizeof(PSAPI_WORKING_SET_BLOCK
));
635 UniqueFreePtr
<PSAPI_WORKING_SET_INFORMATION
> infoArray(
636 static_cast<PSAPI_WORKING_SET_INFORMATION
*>(malloc(infoArraySize
)));
639 return NS_ERROR_FAILURE
;
642 if (!QueryWorkingSet(proc
, infoArray
.get(), infoArraySize
)) {
643 return NS_ERROR_FAILURE
;
646 entries
= static_cast<size_t>(infoArray
->NumberOfEntries
);
647 size_t privatePages
= 0;
648 for (size_t i
= 0; i
< entries
; i
++) {
649 // Count shared pages that only one process is using as private.
650 if (!infoArray
->WorkingSetInfo
[i
].Shared
||
651 infoArray
->WorkingSetInfo
[i
].ShareCount
<= 1) {
659 *aN
= privatePages
* si
.dwPageSize
;
663 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
664 [[nodiscard
]] static nsresult
VsizeMaxContiguousDistinguishedAmount(
666 SIZE_T biggestRegion
= 0;
667 MEMORY_BASIC_INFORMATION vmemInfo
= {0};
668 for (size_t currentAddress
= 0;;) {
669 if (!VirtualQuery((LPCVOID
)currentAddress
, &vmemInfo
, sizeof(vmemInfo
))) {
670 // Something went wrong, just return whatever we've got already.
674 if (vmemInfo
.State
== MEM_FREE
) {
675 biggestRegion
= std::max(biggestRegion
, vmemInfo
.RegionSize
);
678 SIZE_T lastAddress
= currentAddress
;
679 currentAddress
+= vmemInfo
.RegionSize
;
681 // If we overflow, we've examined all of the address space.
682 if (currentAddress
< lastAddress
) {
691 # define HAVE_PRIVATE_REPORTER 1
692 [[nodiscard
]] static nsresult
PrivateDistinguishedAmount(int64_t* aN
) {
693 PROCESS_MEMORY_COUNTERS_EX pmcex
;
694 pmcex
.cb
= sizeof(PROCESS_MEMORY_COUNTERS_EX
);
696 if (!GetProcessMemoryInfo(GetCurrentProcess(),
697 (PPROCESS_MEMORY_COUNTERS
)&pmcex
, sizeof(pmcex
))) {
698 return NS_ERROR_FAILURE
;
701 *aN
= pmcex
.PrivateUsage
;
705 # define HAVE_SYSTEM_HEAP_REPORTER 1
706 // Windows can have multiple separate heaps, but we should not touch non-default
707 // heaps because they may be destroyed at anytime while we hold a handle. So we
708 // count only the default heap.
709 [[nodiscard
]] static nsresult
SystemHeapSize(int64_t* aSizeOut
) {
710 HANDLE heap
= GetProcessHeap();
712 NS_ENSURE_TRUE(HeapLock(heap
), NS_ERROR_FAILURE
);
714 int64_t heapSize
= 0;
715 PROCESS_HEAP_ENTRY entry
;
716 entry
.lpData
= nullptr;
717 while (HeapWalk(heap
, &entry
)) {
718 // We don't count entry.cbOverhead, because we just want to measure the
719 // space available to the program.
720 if (entry
.wFlags
& PROCESS_HEAP_ENTRY_BUSY
) {
721 heapSize
+= entry
.cbData
;
725 // Check this result only after unlocking the heap, so that we don't leave
726 // the heap locked if there was an error.
727 DWORD lastError
= GetLastError();
729 // I have no idea how things would proceed if unlocking this heap failed...
730 NS_ENSURE_TRUE(HeapUnlock(heap
), NS_ERROR_FAILURE
);
732 NS_ENSURE_TRUE(lastError
== ERROR_NO_MORE_ITEMS
, NS_ERROR_FAILURE
);
734 *aSizeOut
= heapSize
;
745 struct SegmentEntry
: public PLDHashEntryHdr
{
746 static PLDHashNumber
HashKey(const void* aKey
) {
747 auto kind
= static_cast<const SegmentKind
*>(aKey
);
748 return mozilla::HashGeneric(kind
->mState
, kind
->mType
, kind
->mProtect
,
752 static bool MatchEntry(const PLDHashEntryHdr
* aEntry
, const void* aKey
) {
753 auto kind
= static_cast<const SegmentKind
*>(aKey
);
754 auto entry
= static_cast<const SegmentEntry
*>(aEntry
);
755 return kind
->mState
== entry
->mKind
.mState
&&
756 kind
->mType
== entry
->mKind
.mType
&&
757 kind
->mProtect
== entry
->mKind
.mProtect
&&
758 kind
->mIsStack
== entry
->mKind
.mIsStack
;
761 static void InitEntry(PLDHashEntryHdr
* aEntry
, const void* aKey
) {
762 auto kind
= static_cast<const SegmentKind
*>(aKey
);
763 auto entry
= static_cast<SegmentEntry
*>(aEntry
);
764 entry
->mKind
= *kind
;
769 static const PLDHashTableOps Ops
;
771 SegmentKind mKind
; // The segment kind.
772 uint32_t mCount
; // The number of segments of this kind.
773 size_t mSize
; // The combined size of segments of this kind.
776 /* static */ const PLDHashTableOps
SegmentEntry::Ops
= {
777 SegmentEntry::HashKey
, SegmentEntry::MatchEntry
,
778 PLDHashTable::MoveEntryStub
, PLDHashTable::ClearEntryStub
,
779 SegmentEntry::InitEntry
};
781 class WindowsAddressSpaceReporter final
: public nsIMemoryReporter
{
782 ~WindowsAddressSpaceReporter() {}
787 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
788 nsISupports
* aData
, bool aAnonymize
) override
{
789 // First iterate over all the segments and record how many of each kind
790 // there were and their aggregate sizes. We use a hash table for this
791 // because there are a couple of dozen different kinds possible.
793 PLDHashTable
table(&SegmentEntry::Ops
, sizeof(SegmentEntry
));
794 MEMORY_BASIC_INFORMATION info
= {0};
795 bool isPrevSegStackGuard
= false;
796 for (size_t currentAddress
= 0;;) {
797 if (!VirtualQuery((LPCVOID
)currentAddress
, &info
, sizeof(info
))) {
798 // Something went wrong, just return whatever we've got already.
802 size_t size
= info
.RegionSize
;
804 // Note that |type| and |protect| are ignored in some cases.
805 DWORD state
= info
.State
;
807 (state
== MEM_RESERVE
|| state
== MEM_COMMIT
) ? info
.Type
: 0;
808 DWORD protect
= (state
== MEM_COMMIT
) ? info
.Protect
: 0;
809 bool isStack
= isPrevSegStackGuard
&& state
== MEM_COMMIT
&&
810 type
== MEM_PRIVATE
&& protect
== PAGE_READWRITE
;
812 SegmentKind kind
= {state
, type
, protect
, isStack
? 1 : 0};
814 static_cast<SegmentEntry
*>(table
.Add(&kind
, mozilla::fallible
));
817 entry
->mSize
+= size
;
820 isPrevSegStackGuard
= info
.State
== MEM_COMMIT
&&
821 info
.Type
== MEM_PRIVATE
&&
822 info
.Protect
== (PAGE_READWRITE
| PAGE_GUARD
);
824 size_t lastAddress
= currentAddress
;
825 currentAddress
+= size
;
827 // If we overflow, we've examined all of the address space.
828 if (currentAddress
< lastAddress
) {
833 // Then iterate over the hash table and report the details for each segment
836 for (auto iter
= table
.Iter(); !iter
.Done(); iter
.Next()) {
837 // For each range of pages, we consider one or more of its State, Type
838 // and Protect values. These are documented at
839 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
840 // (for State and Type) and
841 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
844 // Not all State values have accompanying Type and Protection values.
846 bool doProtect
= false;
848 auto entry
= static_cast<const SegmentEntry
*>(iter
.Get());
850 nsCString
path("address-space");
852 switch (entry
->mKind
.mState
) {
854 path
.AppendLiteral("/free");
858 path
.AppendLiteral("/reserved");
863 path
.AppendLiteral("/commit");
869 // Should be impossible, but handle it just in case.
870 path
.AppendLiteral("/???");
875 switch (entry
->mKind
.mType
) {
877 path
.AppendLiteral("/image");
881 path
.AppendLiteral("/mapped");
885 path
.AppendLiteral("/private");
889 // Should be impossible, but handle it just in case.
890 path
.AppendLiteral("/???");
896 DWORD protect
= entry
->mKind
.mProtect
;
897 // Basic attributes. Exactly one of these should be set.
898 if (protect
& PAGE_EXECUTE
) {
899 path
.AppendLiteral("/execute");
901 if (protect
& PAGE_EXECUTE_READ
) {
902 path
.AppendLiteral("/execute-read");
904 if (protect
& PAGE_EXECUTE_READWRITE
) {
905 path
.AppendLiteral("/execute-readwrite");
907 if (protect
& PAGE_EXECUTE_WRITECOPY
) {
908 path
.AppendLiteral("/execute-writecopy");
910 if (protect
& PAGE_NOACCESS
) {
911 path
.AppendLiteral("/noaccess");
913 if (protect
& PAGE_READONLY
) {
914 path
.AppendLiteral("/readonly");
916 if (protect
& PAGE_READWRITE
) {
917 path
.AppendLiteral("/readwrite");
919 if (protect
& PAGE_WRITECOPY
) {
920 path
.AppendLiteral("/writecopy");
923 // Modifiers. At most one of these should be set.
924 if (protect
& PAGE_GUARD
) {
925 path
.AppendLiteral("+guard");
927 if (protect
& PAGE_NOCACHE
) {
928 path
.AppendLiteral("+nocache");
930 if (protect
& PAGE_WRITECOMBINE
) {
931 path
.AppendLiteral("+writecombine");
934 // Annotate likely stack segments, too.
935 if (entry
->mKind
.mIsStack
) {
936 path
.AppendLiteral("+stack");
940 // Append the segment count.
941 path
.AppendPrintf("(segments=%u)", entry
->mCount
);
943 aHandleReport
->Callback(""_ns
, path
, KIND_OTHER
, UNITS_BYTES
,
944 entry
->mSize
, "From MEMORY_BASIC_INFORMATION."_ns
,
951 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter
, nsIMemoryReporter
)
953 #endif // XP_<PLATFORM>
955 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
956 class VsizeMaxContiguousReporter final
: public nsIMemoryReporter
{
957 ~VsizeMaxContiguousReporter() {}
962 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
963 nsISupports
* aData
, bool aAnonymize
) override
{
965 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount
))) {
967 "vsize-max-contiguous", KIND_OTHER
, UNITS_BYTES
, amount
,
968 "Size of the maximum contiguous block of available virtual memory.");
973 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter
, nsIMemoryReporter
)
976 #ifdef HAVE_PRIVATE_REPORTER
977 class PrivateReporter final
: public nsIMemoryReporter
{
978 ~PrivateReporter() {}
983 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
984 nsISupports
* aData
, bool aAnonymize
) override
{
986 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount
))) {
989 "private", KIND_OTHER
, UNITS_BYTES
, amount
,
990 "Memory that cannot be shared with other processes, including memory that is "
991 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
992 "pages that have been written to.");
998 NS_IMPL_ISUPPORTS(PrivateReporter
, nsIMemoryReporter
)
1001 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1002 class VsizeReporter final
: public nsIMemoryReporter
{
1003 ~VsizeReporter() = default;
1008 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1009 nsISupports
* aData
, bool aAnonymize
) override
{
1011 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount
))) {
1014 "vsize", KIND_OTHER
, UNITS_BYTES
, amount
,
1015 "Memory mapped by the process, including code and data segments, the heap, "
1016 "thread stacks, memory explicitly mapped by the process via mmap and similar "
1017 "operations, and memory shared with other processes. This is the vsize figure "
1018 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
1019 "processes share huge amounts of memory with one another. But even on other "
1020 "operating systems, 'resident' is a much better measure of the memory "
1021 "resources used by the process.");
1027 NS_IMPL_ISUPPORTS(VsizeReporter
, nsIMemoryReporter
)
1029 class ResidentReporter final
: public nsIMemoryReporter
{
1030 ~ResidentReporter() = default;
1035 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1036 nsISupports
* aData
, bool aAnonymize
) override
{
1038 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount
))) {
1041 "resident", KIND_OTHER
, UNITS_BYTES
, amount
,
1042 "Memory mapped by the process that is present in physical memory, also known "
1043 "as the resident set size (RSS). This is the best single figure to use when "
1044 "considering the memory resources used by the process, but it depends both on "
1045 "other processes being run and details of the OS kernel and so is best used "
1046 "for comparing the memory usage of a single process at different points in "
1053 NS_IMPL_ISUPPORTS(ResidentReporter
, nsIMemoryReporter
)
1055 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1057 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1058 class ResidentUniqueReporter final
: public nsIMemoryReporter
{
1059 ~ResidentUniqueReporter() = default;
1064 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1065 nsISupports
* aData
, bool aAnonymize
) override
{
1068 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount
))) {
1070 "resident-unique", KIND_OTHER
, UNITS_BYTES
, amount
,
1071 "Memory mapped by the process that is present in physical memory and not "
1072 "shared with any other processes. This is also known as the process's unique "
1073 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1074 "closed this process.");
1077 if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount
))) {
1079 "resident-phys-footprint", KIND_OTHER
, UNITS_BYTES
, amount
,
1080 "Memory footprint reported by MacOS's task_info API's phys_footprint field. "
1081 "This matches the memory column in Activity Monitor.");
1088 NS_IMPL_ISUPPORTS(ResidentUniqueReporter
, nsIMemoryReporter
)
1090 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1092 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1094 class SystemHeapReporter final
: public nsIMemoryReporter
{
1095 ~SystemHeapReporter() = default;
1100 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1101 nsISupports
* aData
, bool aAnonymize
) override
{
1103 if (NS_SUCCEEDED(SystemHeapSize(&amount
))) {
1106 "system-heap-allocated", KIND_OTHER
, UNITS_BYTES
, amount
,
1107 "Memory used by the system allocator that is currently allocated to the "
1108 "application. This is distinct from the jemalloc heap that Firefox uses for "
1109 "most or all of its heap allocations. Ideally this number is zero, but "
1110 "on some platforms we cannot force every heap allocation through jemalloc.");
1116 NS_IMPL_ISUPPORTS(SystemHeapReporter
, nsIMemoryReporter
)
1117 #endif // HAVE_SYSTEM_HEAP_REPORTER
1121 # include <sys/resource.h>
1123 # define HAVE_RESIDENT_PEAK_REPORTER 1
1125 [[nodiscard
]] static nsresult
ResidentPeakDistinguishedAmount(int64_t* aN
) {
1126 struct rusage usage
;
1127 if (0 == getrusage(RUSAGE_SELF
, &usage
)) {
1128 // The units for ru_maxrrs:
1130 // - Solaris: pages? But some sources it actually always returns 0, so
1132 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1134 *aN
= usage
.ru_maxrss
;
1135 # elif defined(SOLARIS)
1136 *aN
= usage
.ru_maxrss
* getpagesize();
1138 *aN
= usage
.ru_maxrss
* 1024;
1144 return NS_ERROR_FAILURE
;
1147 class ResidentPeakReporter final
: public nsIMemoryReporter
{
1148 ~ResidentPeakReporter() = default;
1153 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1154 nsISupports
* aData
, bool aAnonymize
) override
{
1156 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount
))) {
1158 "resident-peak", KIND_OTHER
, UNITS_BYTES
, amount
,
1159 "The peak 'resident' value for the lifetime of the process.");
1164 NS_IMPL_ISUPPORTS(ResidentPeakReporter
, nsIMemoryReporter
)
1166 # define HAVE_PAGE_FAULT_REPORTERS 1
1168 class PageFaultsSoftReporter final
: public nsIMemoryReporter
{
1169 ~PageFaultsSoftReporter() = default;
1174 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1175 nsISupports
* aData
, bool aAnonymize
) override
{
1176 struct rusage usage
;
1177 int err
= getrusage(RUSAGE_SELF
, &usage
);
1179 int64_t amount
= usage
.ru_minflt
;
1182 "page-faults-soft", KIND_OTHER
, UNITS_COUNT_CUMULATIVE
, amount
,
1183 "The number of soft page faults (also known as 'minor page faults') that "
1184 "have occurred since the process started. A soft page fault occurs when the "
1185 "process tries to access a page which is present in physical memory but is "
1186 "not mapped into the process's address space. For instance, a process might "
1187 "observe soft page faults when it loads a shared library which is already "
1188 "present in physical memory. A process may experience many thousands of soft "
1189 "page faults even when the machine has plenty of available physical memory, "
1190 "and because the OS services a soft page fault without accessing the disk, "
1191 "they impact performance much less than hard page faults.");
1197 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter
, nsIMemoryReporter
)
1199 [[nodiscard
]] static nsresult
PageFaultsHardDistinguishedAmount(
1201 struct rusage usage
;
1202 int err
= getrusage(RUSAGE_SELF
, &usage
);
1204 return NS_ERROR_FAILURE
;
1206 *aAmount
= usage
.ru_majflt
;
1210 class PageFaultsHardReporter final
: public nsIMemoryReporter
{
1211 ~PageFaultsHardReporter() = default;
1216 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1217 nsISupports
* aData
, bool aAnonymize
) override
{
1219 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount
))) {
1222 "page-faults-hard", KIND_OTHER
, UNITS_COUNT_CUMULATIVE
, amount
,
1223 "The number of hard page faults (also known as 'major page faults') that have "
1224 "occurred since the process started. A hard page fault occurs when a process "
1225 "tries to access a page which is not present in physical memory. The "
1226 "operating system must access the disk in order to fulfill a hard page fault. "
1227 "When memory is plentiful, you should see very few hard page faults. But if "
1228 "the process tries to use more memory than your machine has available, you "
1229 "may see many thousands of hard page faults. Because accessing the disk is up "
1230 "to a million times slower than accessing RAM, the program may run very "
1231 "slowly when it is experiencing more than 100 or so hard page faults a "
1238 NS_IMPL_ISUPPORTS(PageFaultsHardReporter
, nsIMemoryReporter
)
1243 ** memory reporter implementation for jemalloc and OSX malloc,
1244 ** to obtain info on total memory in use (that we know about,
1245 ** at least -- on OSX, there are sometimes other zones in use).
1248 #ifdef HAVE_JEMALLOC_STATS
1250 static size_t HeapOverhead(jemalloc_stats_t
* aStats
) {
1251 return aStats
->waste
+ aStats
->bookkeeping
+ aStats
->page_cache
+
1255 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1256 // 100x for the percentage.
1257 static int64_t HeapOverheadFraction(jemalloc_stats_t
* aStats
) {
1258 size_t heapOverhead
= HeapOverhead(aStats
);
1259 size_t heapCommitted
= aStats
->allocated
+ heapOverhead
;
1260 return int64_t(10000 * (heapOverhead
/ (double)heapCommitted
));
1263 class JemallocHeapReporter final
: public nsIMemoryReporter
{
1264 ~JemallocHeapReporter() = default;
1269 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1270 nsISupports
* aData
, bool aAnonymize
) override
{
1271 jemalloc_stats_t stats
;
1272 const size_t num_bins
= jemalloc_stats_num_bins();
1273 nsTArray
<jemalloc_bin_stats_t
> bin_stats(num_bins
);
1274 bin_stats
.SetLength(num_bins
);
1275 jemalloc_stats(&stats
, bin_stats
.Elements());
1279 "heap-committed/allocated", KIND_OTHER
, UNITS_BYTES
, stats
.allocated
,
1280 "Memory mapped by the heap allocator that is currently allocated to the "
1281 "application. This may exceed the amount of memory requested by the "
1282 "application because the allocator regularly rounds up request sizes. (The "
1283 "exact amount requested is not recorded.)");
1286 "heap-allocated", KIND_OTHER
, UNITS_BYTES
, stats
.allocated
,
1287 "The same as 'heap-committed/allocated'.");
1289 // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1290 // because KIND_HEAP memory means "counted in heap-allocated", which
1292 for (auto& bin
: bin_stats
) {
1293 MOZ_ASSERT(bin
.size
);
1294 nsPrintfCString
path("explicit/heap-overhead/bin-unused/bin-%zu",
1296 aHandleReport
->Callback(EmptyCString(), path
, KIND_NONHEAP
, UNITS_BYTES
,
1299 "Unused bytes in all runs of all bins for this size class"),
1303 if (stats
.waste
> 0) {
1305 "explicit/heap-overhead/waste", KIND_NONHEAP
, UNITS_BYTES
,
1307 "Committed bytes which do not correspond to an active allocation and which the "
1308 "allocator is not intentionally keeping alive (i.e., not "
1309 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1313 "explicit/heap-overhead/bookkeeping", KIND_NONHEAP
, UNITS_BYTES
,
1315 "Committed bytes which the heap allocator uses for internal data structures.");
1318 "explicit/heap-overhead/page-cache", KIND_NONHEAP
, UNITS_BYTES
,
1320 "Memory which the allocator could return to the operating system, but hasn't. "
1321 "The allocator keeps this memory around as an optimization, so it doesn't "
1322 "have to ask the OS the next time it needs to fulfill a request. This value "
1323 "is typically not larger than a few megabytes.");
1326 "heap-committed/overhead", KIND_OTHER
, UNITS_BYTES
,
1327 HeapOverhead(&stats
),
1328 "The sum of 'explicit/heap-overhead/*'.");
1331 "heap-mapped", KIND_OTHER
, UNITS_BYTES
, stats
.mapped
,
1332 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1333 "neither in physical memory nor paged to disk.");
1336 "heap-chunksize", KIND_OTHER
, UNITS_BYTES
, stats
.chunksize
,
1340 mozilla::phc::MemoryUsage usage
;
1341 ReplaceMalloc::PHCMemoryUsage(usage
);
1344 "explicit/heap-overhead/phc/metadata", KIND_NONHEAP
, UNITS_BYTES
,
1345 usage
.mMetadataBytes
,
1346 "Memory used by PHC to store stacks and other metadata for each allocation");
1348 "explicit/heap-overhead/phc/fragmentation", KIND_NONHEAP
, UNITS_BYTES
,
1349 usage
.mFragmentationBytes
,
1350 "The amount of memory lost due to rounding up allocations to the next page "
1352 "This is also known as 'internal fragmentation'. "
1353 "Note that all allocators have some internal fragmentation, there may still "
1354 "be some internal fragmentation without PHC.");
1362 NS_IMPL_ISUPPORTS(JemallocHeapReporter
, nsIMemoryReporter
)
1364 #endif // HAVE_JEMALLOC_STATS
1366 // Why is this here? At first glance, you'd think it could be defined and
1367 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1368 // However, the obvious time to register it is when the table is initialized,
1369 // and that happens before XPCOM components are initialized, which means the
1370 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1371 class AtomTablesReporter final
: public nsIMemoryReporter
{
1372 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1374 ~AtomTablesReporter() = default;
1379 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1380 nsISupports
* aData
, bool aAnonymize
) override
{
1382 NS_AddSizeOfAtoms(MallocSizeOf
, sizes
);
1384 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP
, UNITS_BYTES
,
1385 sizes
.mTable
, "Memory used by the atom table.");
1388 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP
, UNITS_BYTES
,
1389 sizes
.mDynamicAtoms
,
1390 "Memory used by dynamic atom objects and chars (which are stored "
1391 "at the end of each atom object).");
1396 NS_IMPL_ISUPPORTS(AtomTablesReporter
, nsIMemoryReporter
)
1398 class ThreadsReporter final
: public nsIMemoryReporter
{
1399 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1400 ~ThreadsReporter() = default;
1405 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1406 nsISupports
* aData
, bool aAnonymize
) override
{
1408 nsTArray
<MemoryMapping
> mappings(1024);
1409 MOZ_TRY(GetMemoryMappings(mappings
));
1412 // Enumerating over active threads requires holding a lock, so we collect
1413 // info on all threads, and then call our reporter callbacks after releasing
1418 size_t mPrivateSize
;
1420 AutoTArray
<ThreadData
, 32> threads
;
1422 size_t eventQueueSizes
= 0;
1423 size_t wrapperSizes
= 0;
1424 size_t threadCount
= 0;
1426 for (auto* thread
: nsThread::Enumerate()) {
1428 eventQueueSizes
+= thread
->SizeOfEventQueues(MallocSizeOf
);
1429 wrapperSizes
+= thread
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1431 if (!thread
->StackBase()) {
1435 #if defined(XP_LINUX)
1436 int idx
= mappings
.BinaryIndexOf(thread
->StackBase());
1440 // Referenced() is the combined size of all pages in the region which have
1441 // ever been touched, and are therefore consuming memory. For stack
1442 // regions, these pages are guaranteed to be un-shared unless we fork
1443 // after creating threads (which we don't).
1444 size_t privateSize
= mappings
[idx
].Referenced();
1446 // On Linux, we have to be very careful matching memory regions to thread
1449 // To begin with, the kernel only reports VM stats for regions of all
1450 // adjacent pages with the same flags, protection, and backing file.
1451 // There's no way to get finer-grained usage information for a subset of
1454 // Stack segments always have a guard page at the bottom of the stack
1455 // (assuming we only support stacks that grow down), so there's no danger
1456 // of them being merged with other stack regions. At the top, there's no
1457 // protection page, and no way to allocate one without using pthreads
1458 // directly and allocating our own stacks. So we get around the problem by
1459 // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1460 // don't expect to be set on any heap regions. But this is not fool-proof.
1462 // A second kink is that different C libraries (and different versions
1463 // thereof) report stack base locations and sizes differently with regard
1464 // to the guard page. For the libraries that include the guard page in the
1465 // stack size base pointer, we need to adjust those values to compensate.
1466 // But it's possible that our logic will get out of sync with library
1467 // changes, or someone will compile with an unexpected library.
1470 // The upshot of all of this is that there may be configurations that our
1471 // special cases don't cover. And if there are, we want to know about it.
1472 // So assert that total size of the memory region we're reporting actually
1473 // matches the allocated size of the thread stack.
1475 MOZ_ASSERT(mappings
[idx
].Size() == thread
->StackSize(),
1476 "Mapping region size doesn't match stack allocation size");
1478 #elif defined(XP_WIN)
1479 auto memInfo
= MemoryInfo::Get(thread
->StackBase(), thread
->StackSize());
1480 size_t privateSize
= memInfo
.Committed();
1482 size_t privateSize
= thread
->StackSize();
1483 MOZ_ASSERT_UNREACHABLE(
1484 "Shouldn't have stack base pointer on this "
1488 threads
.AppendElement(ThreadData
{
1489 nsCString(PR_GetThreadName(thread
->GetPRThread())),
1491 // On Linux, it's possible (but unlikely) that our stack region will
1492 // have been merged with adjacent heap regions, in which case we'll
1493 // get combined size information for both. So we take the minimum of
1494 // the reported private size and the requested stack size to avoid the
1495 // possible of majorly over-reporting in that case.
1496 std::min(privateSize
, thread
->StackSize()),
1500 for (auto& thread
: threads
) {
1501 nsPrintfCString
path("explicit/threads/stacks/%s (tid=%u)",
1502 thread
.mName
.get(), thread
.mThreadId
);
1504 aHandleReport
->Callback(
1505 ""_ns
, path
, KIND_NONHEAP
, UNITS_BYTES
, thread
.mPrivateSize
,
1506 nsLiteralCString("The sizes of thread stacks which have been "
1507 "committed to memory."),
1511 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP
,
1512 UNITS_BYTES
, eventQueueSizes
,
1513 "The sizes of nsThread event queues and observers.");
1515 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP
,
1516 UNITS_BYTES
, wrapperSizes
,
1517 "The sizes of nsThread/PRThread wrappers.");
1520 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1521 // that's 12K. For 64 bit, it's 24K.
1524 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1525 constexpr size_t kKernelSize
= (sizeof(void*) == 8 ? 24 : 12) * 1024;
1526 #elif defined(XP_LINUX)
1527 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1528 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1529 // have no way of knowing which ones do, so all we can do is guess.
1530 # if defined(__x86_64__) || defined(__i386__)
1531 constexpr size_t kKernelSize
= 4 * 1024;
1533 constexpr size_t kKernelSize
= 8 * 1024;
1535 #elif defined(XP_MACOSX)
1536 // On Darwin, kernel stacks are 16K:
1538 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1539 constexpr size_t kKernelSize
= 16 * 1024;
1541 // Elsewhere, just assume that kernel stacks require at least 8K.
1542 constexpr size_t kKernelSize
= 8 * 1024;
1545 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP
,
1546 UNITS_BYTES
, threadCount
* kKernelSize
,
1547 "The total kernel overhead for all active threads.");
1552 NS_IMPL_ISUPPORTS(ThreadsReporter
, nsIMemoryReporter
)
1556 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1557 // However, this ends up breaking the linking step of various unit tests due
1558 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1559 // in DMD builds. So instead we do it here.
1560 class DeadlockDetectorReporter final
: public nsIMemoryReporter
{
1561 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1563 ~DeadlockDetectorReporter() = default;
1568 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1569 nsISupports
* aData
, bool aAnonymize
) override
{
1571 "explicit/deadlock-detector", KIND_HEAP
, UNITS_BYTES
,
1572 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf
),
1573 "Memory used by the deadlock detector.");
1578 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter
, nsIMemoryReporter
)
1587 class DMDReporter final
: public nsIMemoryReporter
{
1591 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1592 nsISupports
* aData
, bool aAnonymize
) override
{
1594 dmd::SizeOf(&sizes
);
1597 "explicit/dmd/stack-traces/used", KIND_HEAP
, UNITS_BYTES
,
1598 sizes
.mStackTracesUsed
,
1599 "Memory used by stack traces which correspond to at least "
1600 "one heap block DMD is tracking.");
1603 "explicit/dmd/stack-traces/unused", KIND_HEAP
, UNITS_BYTES
,
1604 sizes
.mStackTracesUnused
,
1605 "Memory used by stack traces which don't correspond to any heap "
1606 "blocks DMD is currently tracking.");
1608 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP
,
1609 UNITS_BYTES
, sizes
.mStackTraceTable
,
1610 "Memory used by DMD's stack trace table.");
1612 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP
, UNITS_BYTES
,
1613 sizes
.mLiveBlockTable
,
1614 "Memory used by DMD's live block table.");
1616 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP
, UNITS_BYTES
,
1617 sizes
.mDeadBlockTable
,
1618 "Memory used by DMD's dead block list.");
1624 ~DMDReporter() = default;
1626 NS_IMPL_ISUPPORTS(DMDReporter
, nsIMemoryReporter
)
1629 } // namespace mozilla
1633 #ifdef MOZ_WIDGET_ANDROID
1634 class AndroidMemoryReporter final
: public nsIMemoryReporter
{
1638 AndroidMemoryReporter() = default;
1641 CollectReports(nsIHandleReportCallback
* aHandleReport
, nsISupports
* aData
,
1642 bool aAnonymize
) override
{
1643 if (!jni::IsAvailable() || jni::GetAPIVersion() < 23) {
1647 int32_t heap
= java::GeckoAppShell::GetMemoryUsage("summary.java-heap"_ns
);
1649 MOZ_COLLECT_REPORT("java-heap", KIND_OTHER
, UNITS_BYTES
, heap
* 1024,
1650 "The private Java Heap usage");
1656 ~AndroidMemoryReporter() = default;
1659 NS_IMPL_ISUPPORTS(AndroidMemoryReporter
, nsIMemoryReporter
)
1663 ** nsMemoryReporterManager implementation
1666 NS_IMPL_ISUPPORTS(nsMemoryReporterManager
, nsIMemoryReporterManager
,
1670 nsMemoryReporterManager::Init() {
1671 if (!NS_IsMainThread()) {
1675 // Under normal circumstances this function is only called once. However,
1676 // we've (infrequently) seen memory report dumps in crash reports that
1677 // suggest that this function is sometimes called multiple times. That in
1678 // turn means that multiple reporters of each kind are registered, which
1679 // leads to duplicated reports of individual measurements such as "resident",
1682 // It's unclear how these multiple calls can occur. The only plausible theory
1683 // so far is badly-written extensions, because this function is callable from
1684 // JS code via nsIMemoryReporter.idl.
1686 // Whatever the cause, it's a bad thing. So we protect against it with the
1688 static bool isInited
= false;
1690 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1695 #ifdef HAVE_JEMALLOC_STATS
1696 RegisterStrongReporter(new JemallocHeapReporter());
1699 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1700 RegisterStrongReporter(new VsizeReporter());
1701 RegisterStrongReporter(new ResidentReporter());
1704 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1705 RegisterStrongReporter(new VsizeMaxContiguousReporter());
1708 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1709 RegisterStrongReporter(new ResidentPeakReporter());
1712 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1713 RegisterStrongReporter(new ResidentUniqueReporter());
1716 #ifdef HAVE_PAGE_FAULT_REPORTERS
1717 RegisterStrongReporter(new PageFaultsSoftReporter());
1718 RegisterStrongReporter(new PageFaultsHardReporter());
1721 #ifdef HAVE_PRIVATE_REPORTER
1722 RegisterStrongReporter(new PrivateReporter());
1725 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1726 RegisterStrongReporter(new SystemHeapReporter());
1729 RegisterStrongReporter(new AtomTablesReporter());
1731 RegisterStrongReporter(new ThreadsReporter());
1734 RegisterStrongReporter(new DeadlockDetectorReporter());
1737 #ifdef MOZ_GECKO_PROFILER
1738 // We have to register this here rather than in profiler_init() because
1739 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1740 RegisterStrongReporter(new GeckoProfilerReporter());
1744 RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1748 RegisterStrongReporter(new WindowsAddressSpaceReporter());
1751 #ifdef MOZ_WIDGET_ANDROID
1752 RegisterStrongReporter(new AndroidMemoryReporter());
1756 nsMemoryInfoDumper::Initialize();
1759 // Report our own memory usage as well.
1760 RegisterWeakReporter(this);
1765 nsMemoryReporterManager::nsMemoryReporterManager()
1766 : mMutex("nsMemoryReporterManager::mMutex"),
1767 mIsRegistrationBlocked(false),
1768 mStrongReporters(new StrongReportersTable()),
1769 mWeakReporters(new WeakReportersTable()),
1770 mSavedStrongReporters(nullptr),
1771 mSavedWeakReporters(nullptr),
1773 mPendingProcessesState(nullptr),
1774 mPendingReportersState(nullptr)
1775 #ifdef HAVE_JEMALLOC_STATS
1777 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID
))
1782 nsMemoryReporterManager::~nsMemoryReporterManager() {
1783 delete mStrongReporters
;
1784 delete mWeakReporters
;
1785 NS_ASSERTION(!mSavedStrongReporters
, "failed to restore strong reporters");
1786 NS_ASSERTION(!mSavedWeakReporters
, "failed to restore weak reporters");
1790 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback
* aHandleReport
,
1791 nsISupports
* aData
, bool aAnonymize
) {
1792 size_t n
= MallocSizeOf(this);
1794 mozilla::MutexAutoLock
autoLock(mMutex
);
1795 n
+= mStrongReporters
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1796 n
+= mWeakReporters
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1799 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP
, UNITS_BYTES
,
1800 n
, "Memory used by the memory reporter infrastructure.");
1805 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1806 # define MEMORY_REPORTING_LOG(format, ...) \
1807 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1809 # define MEMORY_REPORTING_LOG(...)
1813 nsMemoryReporterManager::GetReports(
1814 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1815 nsIFinishReportingCallback
* aFinishReporting
,
1816 nsISupports
* aFinishReportingData
, bool aAnonymize
) {
1817 return GetReportsExtended(aHandleReport
, aHandleReportData
, aFinishReporting
,
1818 aFinishReportingData
, aAnonymize
,
1819 /* minimize = */ false,
1820 /* DMDident = */ u
""_ns
);
1824 nsMemoryReporterManager::GetReportsExtended(
1825 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1826 nsIFinishReportingCallback
* aFinishReporting
,
1827 nsISupports
* aFinishReportingData
, bool aAnonymize
, bool aMinimize
,
1828 const nsAString
& aDMDDumpIdent
) {
1831 // Memory reporters are not necessarily threadsafe, so this function must
1832 // be called from the main thread.
1833 if (!NS_IsMainThread()) {
1837 uint32_t generation
= mNextGeneration
++;
1839 if (mPendingProcessesState
) {
1840 // A request is in flight. Don't start another one. And don't report
1841 // an error; just ignore it, and let the in-flight request finish.
1842 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation
,
1843 mPendingProcessesState
->mGeneration
);
1847 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation
);
1849 uint32_t concurrency
= Preferences::GetUint("memory.report_concurrency", 1);
1850 MOZ_ASSERT(concurrency
>= 1);
1851 if (concurrency
< 1) {
1854 mPendingProcessesState
= new PendingProcessesState(
1855 generation
, aAnonymize
, aMinimize
, concurrency
, aHandleReport
,
1856 aHandleReportData
, aFinishReporting
, aFinishReportingData
, aDMDDumpIdent
);
1859 nsCOMPtr
<nsIRunnable
> callback
=
1860 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1861 &nsMemoryReporterManager::StartGettingReports
);
1862 rv
= MinimizeMemoryUsage(callback
);
1864 rv
= StartGettingReports();
1870 nsresult
nsMemoryReporterManager::StartGettingReports() {
1871 PendingProcessesState
* s
= mPendingProcessesState
;
1874 // Get reports for this process.
1875 FILE* parentDMDFile
= nullptr;
1877 if (!s
->mDMDDumpIdent
.IsEmpty()) {
1878 rv
= nsMemoryInfoDumper::OpenDMDFile(s
->mDMDDumpIdent
, getpid(),
1880 if (NS_WARN_IF(NS_FAILED(rv
))) {
1881 // Proceed with the memory report as if DMD were disabled.
1882 parentDMDFile
= nullptr;
1888 GetReportsForThisProcessExtended(
1889 s
->mHandleReport
, s
->mHandleReportData
, s
->mAnonymize
, parentDMDFile
,
1890 s
->mFinishReporting
, s
->mFinishReportingData
);
1892 nsTArray
<dom::ContentParent
*> childWeakRefs
;
1893 dom::ContentParent::GetAll(childWeakRefs
);
1894 if (!childWeakRefs
.IsEmpty()) {
1895 // Request memory reports from child processes. This happens
1896 // after the parent report so that the parent's main thread will
1897 // be free to process the child reports, instead of causing them
1898 // to be buffered and consume (possibly scarce) memory.
1900 for (size_t i
= 0; i
< childWeakRefs
.Length(); ++i
) {
1901 s
->mChildrenPending
.AppendElement(childWeakRefs
[i
]);
1905 if (gfx::GPUProcessManager
* gpu
= gfx::GPUProcessManager::Get()) {
1906 if (RefPtr
<MemoryReportingProcess
> proc
= gpu
->GetProcessMemoryReporter()) {
1907 s
->mChildrenPending
.AppendElement(proc
.forget());
1911 if (RDDProcessManager
* rdd
= RDDProcessManager::Get()) {
1912 if (RefPtr
<MemoryReportingProcess
> proc
= rdd
->GetProcessMemoryReporter()) {
1913 s
->mChildrenPending
.AppendElement(proc
.forget());
1917 if (gfx::VRProcessManager
* vr
= gfx::VRProcessManager::Get()) {
1918 if (RefPtr
<MemoryReportingProcess
> proc
= vr
->GetProcessMemoryReporter()) {
1919 s
->mChildrenPending
.AppendElement(proc
.forget());
1923 if (!IsRegistrationBlocked() && net::gIOService
) {
1924 if (RefPtr
<MemoryReportingProcess
> proc
=
1925 net::gIOService
->GetSocketProcessMemoryReporter()) {
1926 s
->mChildrenPending
.AppendElement(proc
.forget());
1930 if (!IsRegistrationBlocked()) {
1931 if (RefPtr
<UtilityProcessManager
> utility
=
1932 UtilityProcessManager::GetIfExists()) {
1933 for (RefPtr
<UtilityProcessParent
>& parent
:
1934 utility
->GetAllProcessesProcessParent()) {
1935 if (RefPtr
<MemoryReportingProcess
> proc
=
1936 utility
->GetProcessMemoryReporter(parent
)) {
1937 s
->mChildrenPending
.AppendElement(proc
.forget());
1943 if (!s
->mChildrenPending
.IsEmpty()) {
1944 nsCOMPtr
<nsITimer
> timer
;
1945 rv
= NS_NewTimerWithFuncCallback(
1946 getter_AddRefs(timer
), TimeoutCallback
, this, kTimeoutLengthMS
,
1947 nsITimer::TYPE_ONE_SHOT
,
1948 "nsMemoryReporterManager::StartGettingReports");
1949 if (NS_WARN_IF(NS_FAILED(rv
))) {
1954 MOZ_ASSERT(!s
->mTimer
);
1955 s
->mTimer
.swap(timer
);
1961 void nsMemoryReporterManager::DispatchReporter(
1962 nsIMemoryReporter
* aReporter
, bool aIsAsync
,
1963 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1965 MOZ_ASSERT(mPendingReportersState
);
1967 // Grab refs to everything used in the lambda function.
1968 RefPtr
<nsMemoryReporterManager
> self
= this;
1969 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
1970 nsCOMPtr
<nsIHandleReportCallback
> handleReport
= aHandleReport
;
1971 nsCOMPtr
<nsISupports
> handleReportData
= aHandleReportData
;
1973 nsCOMPtr
<nsIRunnable
> event
= NS_NewRunnableFunction(
1974 "nsMemoryReporterManager::DispatchReporter",
1975 [self
, reporter
, aIsAsync
, handleReport
, handleReportData
, aAnonymize
]() {
1976 reporter
->CollectReports(handleReport
, handleReportData
, aAnonymize
);
1982 NS_DispatchToMainThread(event
);
1983 mPendingReportersState
->mReportsPending
++;
1987 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1988 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1989 bool aAnonymize
, FILE* aDMDFile
,
1990 nsIFinishReportingCallback
* aFinishReporting
,
1991 nsISupports
* aFinishReportingData
) {
1992 // Memory reporters are not necessarily threadsafe, so this function must
1993 // be called from the main thread.
1994 if (!NS_IsMainThread()) {
1998 if (NS_WARN_IF(mPendingReportersState
)) {
1999 // Report is already in progress.
2000 return NS_ERROR_IN_PROGRESS
;
2005 // Clear DMD's reportedness state before running the memory
2006 // reporters, to avoid spurious twice-reported warnings.
2007 dmd::ClearReports();
2010 MOZ_ASSERT(!aDMDFile
);
2013 mPendingReportersState
= new PendingReportersState(
2014 aFinishReporting
, aFinishReportingData
, aDMDFile
);
2017 mozilla::MutexAutoLock
autoLock(mMutex
);
2019 for (const auto& entry
: *mStrongReporters
) {
2020 DispatchReporter(entry
.GetKey(), entry
.GetData(), aHandleReport
,
2021 aHandleReportData
, aAnonymize
);
2024 for (const auto& entry
: *mWeakReporters
) {
2025 nsCOMPtr
<nsIMemoryReporter
> reporter
= entry
.GetKey();
2026 DispatchReporter(reporter
, entry
.GetData(), aHandleReport
,
2027 aHandleReportData
, aAnonymize
);
2036 nsMemoryReporterManager::EndReport() {
2037 if (--mPendingReportersState
->mReportsPending
== 0) {
2039 if (mPendingReportersState
->mDMDFile
) {
2040 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState
->mDMDFile
);
2043 if (mPendingProcessesState
) {
2044 // This is the parent process.
2045 EndProcessReport(mPendingProcessesState
->mGeneration
, true);
2047 mPendingReportersState
->mFinishReporting
->Callback(
2048 mPendingReportersState
->mFinishReportingData
);
2051 delete mPendingReportersState
;
2052 mPendingReportersState
= nullptr;
2058 nsMemoryReporterManager::PendingProcessesState
*
2059 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration
) {
2060 // Memory reporting only happens on the main thread.
2061 MOZ_RELEASE_ASSERT(NS_IsMainThread());
2063 PendingProcessesState
* s
= mPendingProcessesState
;
2066 // If we reach here, then:
2068 // - A child process reported back too late, and no subsequent request
2071 // So there's nothing to be done. Just ignore it.
2072 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
2077 if (aGeneration
!= s
->mGeneration
) {
2078 // If we reach here, a child process must have reported back, too late,
2079 // while a subsequent (higher-numbered) request is in flight. Again,
2081 MOZ_ASSERT(aGeneration
< s
->mGeneration
);
2082 MEMORY_REPORTING_LOG(
2083 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration
,
2091 // This function has no return value. If something goes wrong, there's no
2092 // clear place to report the problem to, but that's ok -- we will end up
2093 // hitting the timeout and executing TimeoutCallback().
2094 void nsMemoryReporterManager::HandleChildReport(
2095 uint32_t aGeneration
, const dom::MemoryReport
& aChildReport
) {
2096 PendingProcessesState
* s
= GetStateForGeneration(aGeneration
);
2101 // Child reports should have a non-empty process.
2102 MOZ_ASSERT(!aChildReport
.process().IsEmpty());
2104 // If the call fails, ignore and continue.
2105 s
->mHandleReport
->Callback(aChildReport
.process(), aChildReport
.path(),
2106 aChildReport
.kind(), aChildReport
.units(),
2107 aChildReport
.amount(), aChildReport
.desc(),
2108 s
->mHandleReportData
);
2112 bool nsMemoryReporterManager::StartChildReport(
2113 mozilla::MemoryReportingProcess
* aChild
,
2114 const PendingProcessesState
* aState
) {
2115 if (!aChild
->IsAlive()) {
2116 MEMORY_REPORTING_LOG(
2117 "StartChildReports (gen=%u): child exited before"
2118 " its report was started\n",
2119 aState
->mGeneration
);
2123 Maybe
<mozilla::ipc::FileDescriptor
> dmdFileDesc
;
2125 if (!aState
->mDMDDumpIdent
.IsEmpty()) {
2126 FILE* dmdFile
= nullptr;
2127 nsresult rv
= nsMemoryInfoDumper::OpenDMDFile(aState
->mDMDDumpIdent
,
2128 aChild
->Pid(), &dmdFile
);
2129 if (NS_WARN_IF(NS_FAILED(rv
))) {
2130 // Proceed with the memory report as if DMD were disabled.
2134 dmdFileDesc
= Some(mozilla::ipc::FILEToFileDescriptor(dmdFile
));
2139 return aChild
->SendRequestMemoryReport(
2140 aState
->mGeneration
, aState
->mAnonymize
, aState
->mMinimize
, dmdFileDesc
);
2143 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration
,
2145 PendingProcessesState
* s
= GetStateForGeneration(aGeneration
);
2150 MOZ_ASSERT(s
->mNumProcessesRunning
> 0);
2151 s
->mNumProcessesRunning
--;
2152 s
->mNumProcessesCompleted
++;
2153 MEMORY_REPORTING_LOG(
2154 "HandleChildReports (aGen=%u): process %u %s"
2155 " (%u running, %u pending)\n",
2156 aGeneration
, s
->mNumProcessesCompleted
,
2157 aSuccess
? "completed" : "exited during report", s
->mNumProcessesRunning
,
2158 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2160 // Start pending children up to the concurrency limit.
2161 while (s
->mNumProcessesRunning
< s
->mConcurrencyLimit
&&
2162 !s
->mChildrenPending
.IsEmpty()) {
2163 // Pop last element from s->mChildrenPending
2164 const RefPtr
<MemoryReportingProcess
> nextChild
=
2165 s
->mChildrenPending
.PopLastElement();
2166 // Start report (if the child is still alive).
2167 if (StartChildReport(nextChild
, s
)) {
2168 ++s
->mNumProcessesRunning
;
2169 MEMORY_REPORTING_LOG(
2170 "HandleChildReports (aGen=%u): started child report"
2171 " (%u running, %u pending)\n",
2172 aGeneration
, s
->mNumProcessesRunning
,
2173 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2177 // If all the child processes (if any) have reported, we can cancel
2178 // the timer (if started) and finish up. Otherwise, just return.
2179 if (s
->mNumProcessesRunning
== 0) {
2180 MOZ_ASSERT(s
->mChildrenPending
.IsEmpty());
2182 s
->mTimer
->Cancel();
2189 void nsMemoryReporterManager::TimeoutCallback(nsITimer
* aTimer
, void* aData
) {
2190 nsMemoryReporterManager
* mgr
= static_cast<nsMemoryReporterManager
*>(aData
);
2191 PendingProcessesState
* s
= mgr
->mPendingProcessesState
;
2193 // Release assert because: if the pointer is null we're about to
2194 // crash regardless of DEBUG, and this way the compiler doesn't
2195 // complain about unused variables.
2196 MOZ_RELEASE_ASSERT(s
, "mgr->mPendingProcessesState");
2197 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2198 s
->mGeneration
, s
->mNumProcessesRunning
,
2199 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2201 // We don't bother sending any kind of cancellation message to the child
2202 // processes that haven't reported back.
2203 mgr
->FinishReporting();
2206 nsresult
nsMemoryReporterManager::FinishReporting() {
2207 // Memory reporting only happens on the main thread.
2208 if (!NS_IsMainThread()) {
2212 MOZ_ASSERT(mPendingProcessesState
);
2213 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2214 mPendingProcessesState
->mGeneration
,
2215 mPendingProcessesState
->mNumProcessesCompleted
);
2217 // Call this before deleting |mPendingProcessesState|. That way, if
2218 // |mFinishReportData| calls GetReports(), it will silently abort, as
2220 nsresult rv
= mPendingProcessesState
->mFinishReporting
->Callback(
2221 mPendingProcessesState
->mFinishReportingData
);
2223 delete mPendingProcessesState
;
2224 mPendingProcessesState
= nullptr;
2228 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2229 uint32_t aGeneration
, bool aAnonymize
, bool aMinimize
,
2230 uint32_t aConcurrencyLimit
, nsIHandleReportCallback
* aHandleReport
,
2231 nsISupports
* aHandleReportData
,
2232 nsIFinishReportingCallback
* aFinishReporting
,
2233 nsISupports
* aFinishReportingData
, const nsAString
& aDMDDumpIdent
)
2234 : mGeneration(aGeneration
),
2235 mAnonymize(aAnonymize
),
2236 mMinimize(aMinimize
),
2238 mNumProcessesRunning(1), // reporting starts with the parent
2239 mNumProcessesCompleted(0),
2240 mConcurrencyLimit(aConcurrencyLimit
),
2241 mHandleReport(aHandleReport
),
2242 mHandleReportData(aHandleReportData
),
2243 mFinishReporting(aFinishReporting
),
2244 mFinishReportingData(aFinishReportingData
),
2245 mDMDDumpIdent(aDMDDumpIdent
) {}
2247 static void CrashIfRefcountIsZero(nsISupports
* aObj
) {
2248 // This will probably crash if the object's refcount is 0.
2249 uint32_t refcnt
= NS_ADDREF(aObj
);
2251 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2256 nsresult
nsMemoryReporterManager::RegisterReporterHelper(
2257 nsIMemoryReporter
* aReporter
, bool aForce
, bool aStrong
, bool aIsAsync
) {
2258 // This method is thread-safe.
2259 mozilla::MutexAutoLock
autoLock(mMutex
);
2261 if (mIsRegistrationBlocked
&& !aForce
) {
2262 return NS_ERROR_FAILURE
;
2265 if (mStrongReporters
->Contains(aReporter
) ||
2266 mWeakReporters
->Contains(aReporter
)) {
2267 return NS_ERROR_FAILURE
;
2270 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2271 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2272 // addref'ed and released |aReporter| before finally addref'ing it for
2273 // good, it would free aReporter! The kung fu death grip could itself be
2274 // problematic if PutEntry didn't addref |aReporter| (because then when the
2275 // death grip goes out of scope, we would delete the reporter). In debug
2276 // mode, we check that this doesn't happen.
2278 // If |aStrong| is false, we require that |aReporter| have a non-zero
2282 nsCOMPtr
<nsIMemoryReporter
> kungFuDeathGrip
= aReporter
;
2283 mStrongReporters
->InsertOrUpdate(aReporter
, aIsAsync
);
2284 CrashIfRefcountIsZero(aReporter
);
2286 CrashIfRefcountIsZero(aReporter
);
2287 nsCOMPtr
<nsIXPConnectWrappedJS
> jsComponent
= do_QueryInterface(aReporter
);
2289 // We cannot allow non-native reporters (WrappedJS), since we'll be
2290 // holding onto a raw pointer, which would point to the wrapper,
2291 // and that wrapper is likely to go away as soon as this register
2292 // call finishes. This would then lead to subsequent crashes in
2293 // CollectReports().
2294 return NS_ERROR_XPC_BAD_CONVERT_JS
;
2296 mWeakReporters
->InsertOrUpdate(aReporter
, aIsAsync
);
2303 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter
* aReporter
) {
2304 return RegisterReporterHelper(aReporter
, /* force = */ false,
2305 /* strong = */ true,
2306 /* async = */ false);
2310 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2311 nsIMemoryReporter
* aReporter
) {
2312 return RegisterReporterHelper(aReporter
, /* force = */ false,
2313 /* strong = */ true,
2314 /* async = */ true);
2318 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter
* aReporter
) {
2319 return RegisterReporterHelper(aReporter
, /* force = */ false,
2320 /* strong = */ false,
2321 /* async = */ false);
2325 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2326 nsIMemoryReporter
* aReporter
) {
2327 return RegisterReporterHelper(aReporter
, /* force = */ false,
2328 /* strong = */ false,
2329 /* async = */ true);
2333 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2334 nsIMemoryReporter
* aReporter
) {
2335 return RegisterReporterHelper(aReporter
, /* force = */ true,
2336 /* strong = */ true,
2337 /* async = */ false);
2341 nsMemoryReporterManager::UnregisterStrongReporter(
2342 nsIMemoryReporter
* aReporter
) {
2343 // This method is thread-safe.
2344 mozilla::MutexAutoLock
autoLock(mMutex
);
2346 MOZ_ASSERT(!mWeakReporters
->Contains(aReporter
));
2348 if (mStrongReporters
->Contains(aReporter
)) {
2349 mStrongReporters
->Remove(aReporter
);
2353 // We don't register new reporters when the block is in place, but we do
2354 // unregister existing reporters. This is so we don't keep holding strong
2355 // references that these reporters aren't expecting (which can keep them
2356 // alive longer than intended).
2357 if (mSavedStrongReporters
&& mSavedStrongReporters
->Contains(aReporter
)) {
2358 mSavedStrongReporters
->Remove(aReporter
);
2362 return NS_ERROR_FAILURE
;
2366 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter
* aReporter
) {
2367 // This method is thread-safe.
2368 mozilla::MutexAutoLock
autoLock(mMutex
);
2370 MOZ_ASSERT(!mStrongReporters
->Contains(aReporter
));
2372 if (mWeakReporters
->Contains(aReporter
)) {
2373 mWeakReporters
->Remove(aReporter
);
2377 // We don't register new reporters when the block is in place, but we do
2378 // unregister existing reporters. This is so we don't keep holding weak
2379 // references that the old reporters aren't expecting (which can end up as
2380 // dangling pointers that lead to use-after-frees).
2381 if (mSavedWeakReporters
&& mSavedWeakReporters
->Contains(aReporter
)) {
2382 mSavedWeakReporters
->Remove(aReporter
);
2386 return NS_ERROR_FAILURE
;
2390 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2391 // This method is thread-safe.
2392 mozilla::MutexAutoLock
autoLock(mMutex
);
2393 if (mIsRegistrationBlocked
) {
2394 return NS_ERROR_FAILURE
;
2396 mIsRegistrationBlocked
= true;
2398 // Hide the existing reporters, saving them for later restoration.
2399 MOZ_ASSERT(!mSavedStrongReporters
);
2400 MOZ_ASSERT(!mSavedWeakReporters
);
2401 mSavedStrongReporters
= mStrongReporters
;
2402 mSavedWeakReporters
= mWeakReporters
;
2403 mStrongReporters
= new StrongReportersTable();
2404 mWeakReporters
= new WeakReportersTable();
2410 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2411 // This method is thread-safe.
2412 mozilla::MutexAutoLock
autoLock(mMutex
);
2413 if (!mIsRegistrationBlocked
) {
2414 return NS_ERROR_FAILURE
;
2417 // Banish the current reporters, and restore the hidden ones.
2418 delete mStrongReporters
;
2419 delete mWeakReporters
;
2420 mStrongReporters
= mSavedStrongReporters
;
2421 mWeakReporters
= mSavedWeakReporters
;
2422 mSavedStrongReporters
= nullptr;
2423 mSavedWeakReporters
= nullptr;
2425 mIsRegistrationBlocked
= false;
2430 nsMemoryReporterManager::GetVsize(int64_t* aVsize
) {
2431 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2432 return VsizeDistinguishedAmount(aVsize
);
2435 return NS_ERROR_NOT_AVAILABLE
;
2440 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount
) {
2441 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2442 return VsizeMaxContiguousDistinguishedAmount(aAmount
);
2445 return NS_ERROR_NOT_AVAILABLE
;
2450 nsMemoryReporterManager::GetResident(int64_t* aAmount
) {
2451 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2452 return ResidentDistinguishedAmount(aAmount
);
2455 return NS_ERROR_NOT_AVAILABLE
;
2460 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount
) {
2461 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2462 return ResidentFastDistinguishedAmount(aAmount
);
2465 return NS_ERROR_NOT_AVAILABLE
;
2470 int64_t nsMemoryReporterManager::ResidentFast() {
2471 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2473 nsresult rv
= ResidentFastDistinguishedAmount(&amount
);
2474 NS_ENSURE_SUCCESS(rv
, 0);
2482 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount
) {
2483 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2484 return ResidentPeakDistinguishedAmount(aAmount
);
2487 return NS_ERROR_NOT_AVAILABLE
;
2492 int64_t nsMemoryReporterManager::ResidentPeak() {
2493 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2495 nsresult rv
= ResidentPeakDistinguishedAmount(&amount
);
2496 NS_ENSURE_SUCCESS(rv
, 0);
2504 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount
) {
2505 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2506 return ResidentUniqueDistinguishedAmount(aAmount
);
2509 return NS_ERROR_NOT_AVAILABLE
;
2515 int64_t nsMemoryReporterManager::PhysicalFootprint(mach_port_t aPort
) {
2517 nsresult rv
= PhysicalFootprintAmount(&amount
, aPort
);
2518 NS_ENSURE_SUCCESS(rv
, 0);
2535 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2538 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess
) {
2540 nsresult rv
= ResidentUniqueDistinguishedAmount(&amount
, aProcess
);
2541 NS_ENSURE_SUCCESS(rv
, 0);
2548 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg
) {
2549 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2551 nsresult rv
= ResidentUniqueDistinguishedAmount(&amount
);
2552 NS_ENSURE_SUCCESS(rv
, 0);
2559 #endif // XP_{WIN, MACOSX, LINUX, *}
2562 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount
) {
2563 #ifdef HAVE_JEMALLOC_STATS
2564 jemalloc_stats_t stats
;
2565 jemalloc_stats(&stats
);
2566 *aAmount
= stats
.allocated
;
2570 return NS_ERROR_NOT_AVAILABLE
;
2574 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2576 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount
) {
2577 #ifdef HAVE_JEMALLOC_STATS
2578 jemalloc_stats_t stats
;
2579 jemalloc_stats(&stats
);
2580 *aAmount
= HeapOverheadFraction(&stats
);
2584 return NS_ERROR_NOT_AVAILABLE
;
2588 [[nodiscard
]] static nsresult
GetInfallibleAmount(InfallibleAmountFn aAmountFn
,
2591 *aAmount
= aAmountFn();
2595 return NS_ERROR_NOT_AVAILABLE
;
2599 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount
) {
2600 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeGCHeap
, aAmount
);
2604 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount
) {
2605 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeTemporaryPeak
, aAmount
);
2609 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount
) {
2610 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeCompartmentsSystem
,
2615 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount
) {
2616 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeCompartmentsUser
,
2621 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount
) {
2622 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeRealmsSystem
, aAmount
);
2626 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount
) {
2627 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeRealmsUser
, aAmount
);
2631 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount
) {
2632 return GetInfallibleAmount(mAmountFns
.mImagesContentUsedUncompressed
,
2637 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount
) {
2638 return GetInfallibleAmount(mAmountFns
.mStorageSQLite
, aAmount
);
2642 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount
) {
2643 return GetInfallibleAmount(mAmountFns
.mLowMemoryEventsPhysical
, aAmount
);
2647 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount
) {
2648 return GetInfallibleAmount(mAmountFns
.mGhostWindows
, aAmount
);
2652 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount
) {
2653 #ifdef HAVE_PAGE_FAULT_REPORTERS
2654 return PageFaultsHardDistinguishedAmount(aAmount
);
2657 return NS_ERROR_NOT_AVAILABLE
;
2662 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas
) {
2663 void* p
= malloc(16);
2665 return NS_ERROR_OUT_OF_MEMORY
;
2667 size_t usable
= moz_malloc_usable_size(p
);
2669 *aHas
= !!(usable
> 0);
2674 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled
) {
2678 *aIsEnabled
= false;
2684 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning
) {
2686 *aIsRunning
= dmd::IsRunning();
2688 *aIsRunning
= false;
2696 * This runnable lets us implement
2697 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2698 * notification, spin the event loop, and repeat this process a few times.
2700 * When this sequence finishes, we invoke the callback function passed to the
2701 * runnable's constructor.
2703 class MinimizeMemoryUsageRunnable
: public Runnable
{
2705 explicit MinimizeMemoryUsageRunnable(nsIRunnable
* aCallback
)
2706 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2707 mCallback(aCallback
),
2708 mRemainingIters(sNumIters
) {}
2710 NS_IMETHOD
Run() override
{
2711 nsCOMPtr
<nsIObserverService
> os
= services::GetObserverService();
2713 return NS_ERROR_FAILURE
;
2716 if (mRemainingIters
== 0) {
2717 os
->NotifyObservers(nullptr, "after-minimize-memory-usage",
2718 u
"MinimizeMemoryUsageRunnable");
2725 os
->NotifyObservers(nullptr, "memory-pressure", u
"heap-minimize");
2727 NS_DispatchToMainThread(this);
2733 // Send sNumIters heap-minimize notifications, spinning the event
2734 // loop after each notification (see bug 610166 comment 12 for an
2735 // explanation), because one notification doesn't cut it.
2736 static const uint32_t sNumIters
= 3;
2738 nsCOMPtr
<nsIRunnable
> mCallback
;
2739 uint32_t mRemainingIters
;
2745 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable
* aCallback
) {
2746 RefPtr
<MinimizeMemoryUsageRunnable
> runnable
=
2747 new MinimizeMemoryUsageRunnable(aCallback
);
2749 return NS_DispatchToMainThread(runnable
);
2753 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy
* aTopWindow
,
2754 int64_t* aJSObjectsSize
,
2755 int64_t* aJSStringsSize
,
2756 int64_t* aJSOtherSize
, int64_t* aDomSize
,
2757 int64_t* aStyleSize
, int64_t* aOtherSize
,
2758 int64_t* aTotalSize
, double* aJSMilliseconds
,
2759 double* aNonJSMilliseconds
) {
2760 nsCOMPtr
<nsIGlobalObject
> global
= do_QueryInterface(aTopWindow
);
2761 auto* piWindow
= nsPIDOMWindowOuter::From(aTopWindow
);
2762 if (NS_WARN_IF(!global
) || NS_WARN_IF(!piWindow
)) {
2763 return NS_ERROR_FAILURE
;
2766 TimeStamp t1
= TimeStamp::Now();
2768 // Measure JS memory consumption (and possibly some non-JS consumption, via
2769 // |jsPrivateSize|).
2770 size_t jsObjectsSize
, jsStringsSize
, jsPrivateSize
, jsOtherSize
;
2771 nsresult rv
= mSizeOfTabFns
.mJS(global
->GetGlobalJSObject(), &jsObjectsSize
,
2772 &jsStringsSize
, &jsPrivateSize
, &jsOtherSize
);
2773 if (NS_WARN_IF(NS_FAILED(rv
))) {
2777 TimeStamp t2
= TimeStamp::Now();
2779 // Measure non-JS memory consumption.
2780 size_t domSize
, styleSize
, otherSize
;
2781 rv
= mSizeOfTabFns
.mNonJS(piWindow
, &domSize
, &styleSize
, &otherSize
);
2782 if (NS_WARN_IF(NS_FAILED(rv
))) {
2786 TimeStamp t3
= TimeStamp::Now();
2792 *aTotalSize += (n); \
2794 DO(aJSObjectsSize
, jsObjectsSize
);
2795 DO(aJSStringsSize
, jsStringsSize
);
2796 DO(aJSOtherSize
, jsOtherSize
);
2797 DO(aDomSize
, jsPrivateSize
+ domSize
);
2798 DO(aStyleSize
, styleSize
);
2799 DO(aOtherSize
, otherSize
);
2802 *aJSMilliseconds
= (t2
- t1
).ToMilliseconds();
2803 *aNonJSMilliseconds
= (t3
- t2
).ToMilliseconds();
2810 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2811 RefPtr<nsMemoryReporterManager> mgr = \
2812 nsMemoryReporterManager::GetOrCreate(); \
2814 return NS_ERROR_FAILURE; \
2817 nsresult
RegisterStrongMemoryReporter(nsIMemoryReporter
* aReporter
) {
2818 // Hold a strong reference to the argument to make sure it gets released if
2819 // we return early below.
2820 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
2821 GET_MEMORY_REPORTER_MANAGER(mgr
)
2822 return mgr
->RegisterStrongReporter(reporter
);
2825 nsresult
RegisterStrongAsyncMemoryReporter(nsIMemoryReporter
* aReporter
) {
2826 // Hold a strong reference to the argument to make sure it gets released if
2827 // we return early below.
2828 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
2829 GET_MEMORY_REPORTER_MANAGER(mgr
)
2830 return mgr
->RegisterStrongAsyncReporter(reporter
);
2833 nsresult
RegisterWeakMemoryReporter(nsIMemoryReporter
* aReporter
) {
2834 GET_MEMORY_REPORTER_MANAGER(mgr
)
2835 return mgr
->RegisterWeakReporter(aReporter
);
2838 nsresult
RegisterWeakAsyncMemoryReporter(nsIMemoryReporter
* aReporter
) {
2839 GET_MEMORY_REPORTER_MANAGER(mgr
)
2840 return mgr
->RegisterWeakAsyncReporter(aReporter
);
2843 nsresult
UnregisterStrongMemoryReporter(nsIMemoryReporter
* aReporter
) {
2844 GET_MEMORY_REPORTER_MANAGER(mgr
)
2845 return mgr
->UnregisterStrongReporter(aReporter
);
2848 nsresult
UnregisterWeakMemoryReporter(nsIMemoryReporter
* aReporter
) {
2849 GET_MEMORY_REPORTER_MANAGER(mgr
)
2850 return mgr
->UnregisterWeakReporter(aReporter
);
2853 // Macro for generating functions that register distinguished amount functions
2854 // with the memory reporter manager.
2855 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2856 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2857 GET_MEMORY_REPORTER_MANAGER(mgr) \
2858 mgr->mAmountFns.m##name = aAmountFn; \
2862 // Macro for generating functions that unregister distinguished amount
2863 // functions with the memory reporter manager.
2864 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2865 nsresult Unregister##name##DistinguishedAmount() { \
2866 GET_MEMORY_REPORTER_MANAGER(mgr) \
2867 mgr->mAmountFns.m##name = nullptr; \
2871 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeGCHeap
)
2872 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeTemporaryPeak
)
2873 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
,
2874 JSMainRuntimeCompartmentsSystem
)
2875 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeCompartmentsUser
)
2876 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeRealmsSystem
)
2877 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeRealmsUser
)
2879 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, ImagesContentUsedUncompressed
)
2880 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed
)
2882 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, StorageSQLite
)
2883 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite
)
2885 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, LowMemoryEventsPhysical
)
2887 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, GhostWindows
)
2889 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2890 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2892 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2893 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2894 GET_MEMORY_REPORTER_MANAGER(mgr) \
2895 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2899 DEFINE_REGISTER_SIZE_OF_TAB(JS
);
2900 DEFINE_REGISTER_SIZE_OF_TAB(NonJS
);
2902 #undef DEFINE_REGISTER_SIZE_OF_TAB
2904 #undef GET_MEMORY_REPORTER_MANAGER
2906 } // namespace mozilla