1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
16 #include "nsThreadManager.h"
17 #include "nsThreadUtils.h"
18 #include "nsPIDOMWindow.h"
19 #include "nsIObserverService.h"
20 #include "nsIOService.h"
21 #include "nsIGlobalObject.h"
22 #include "nsIXPConnect.h"
23 #ifdef MOZ_GECKO_PROFILER
24 # include "GeckoProfilerReporter.h"
26 #if defined(XP_UNIX) || defined(MOZ_DMD)
27 # include "nsMemoryInfoDumper.h"
31 #include "VRProcessManager.h"
32 #include "mozilla/Attributes.h"
33 #include "mozilla/MemoryReportingProcess.h"
34 #include "mozilla/PodOperations.h"
35 #include "mozilla/Preferences.h"
36 #include "mozilla/RDDProcessManager.h"
37 #include "mozilla/ResultExtensions.h"
38 #include "mozilla/Services.h"
39 #include "mozilla/Telemetry.h"
40 #include "mozilla/UniquePtrExtensions.h"
41 #include "mozilla/dom/MemoryReportTypes.h"
42 #include "mozilla/dom/ContentParent.h"
43 #include "mozilla/gfx/GPUProcessManager.h"
44 #include "mozilla/ipc/UtilityProcessManager.h"
45 #include "mozilla/ipc/FileDescriptorUtils.h"
50 #ifdef MOZ_WIDGET_ANDROID
51 # include "mozilla/java/GeckoAppShellWrappers.h"
52 # include "mozilla/jni/Utils.h"
56 # include "mozilla/MemoryInfo.h"
60 # define getpid _getpid
66 using namespace mozilla
;
67 using namespace mozilla::ipc
;
72 # include "mozilla/MemoryMapping.h"
78 [[nodiscard
]] static nsresult
GetProcSelfStatmField(int aField
, int64_t* aN
) {
79 // There are more than two fields, but we're only interested in the first
81 static const int MAX_FIELD
= 2;
82 size_t fields
[MAX_FIELD
];
83 MOZ_ASSERT(aField
< MAX_FIELD
, "bad field number");
84 FILE* f
= fopen("/proc/self/statm", "r");
86 int nread
= fscanf(f
, "%zu %zu", &fields
[0], &fields
[1]);
88 if (nread
== MAX_FIELD
) {
89 *aN
= fields
[aField
] * getpagesize();
93 return NS_ERROR_FAILURE
;
96 [[nodiscard
]] static nsresult
GetProcSelfSmapsPrivate(int64_t* aN
, pid_t aPid
) {
97 // You might be tempted to calculate USS by subtracting the "shared" value
98 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
99 // statm's "shared" value actually counts pages backed by files, which has
100 // little to do with whether the pages are actually shared. /proc/self/smaps
101 // on the other hand appears to give us the correct information.
103 nsTArray
<MemoryMapping
> mappings(1024);
104 MOZ_TRY(GetMemoryMappings(mappings
, aPid
));
107 for (auto& mapping
: mappings
) {
108 amount
+= mapping
.Private_Clean();
109 amount
+= mapping
.Private_Dirty();
115 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
116 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
117 return GetProcSelfStatmField(0, aN
);
120 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
121 return GetProcSelfStatmField(1, aN
);
124 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
125 return ResidentDistinguishedAmount(aN
);
128 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
129 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
130 int64_t* aN
, pid_t aPid
= 0) {
131 return GetProcSelfSmapsPrivate(aN
, aPid
);
134 # ifdef HAVE_MALLINFO
135 # define HAVE_SYSTEM_HEAP_REPORTER 1
136 [[nodiscard
]] static nsresult
SystemHeapSize(int64_t* aSizeOut
) {
137 struct mallinfo info
= mallinfo();
139 // The documentation in the glibc man page makes it sound like |uordblks|
140 // would suffice, but that only gets the small allocations that are put in
141 // the brk heap. We need |hblkhd| as well to get the larger allocations
144 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
145 // unreliable if memory usage gets high. However, the system heap size on
146 // Linux should usually be zero (so long as jemalloc is enabled) so that
147 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
148 // adding them to provide a small amount of extra overflow protection.
149 *aSizeOut
= size_t(info
.hblkhd
) + size_t(info
.uordblks
);
154 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
155 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
157 # include <sys/param.h>
158 # include <sys/sysctl.h>
159 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
160 defined(__FreeBSD_kernel__)
161 # include <sys/user.h>
166 # if defined(__NetBSD__)
168 # define KERN_PROC KERN_PROC2
169 # define KINFO_PROC struct kinfo_proc2
171 # define KINFO_PROC struct kinfo_proc
174 # if defined(__DragonFly__)
175 # define KP_SIZE(kp) (kp.kp_vm_map_size)
176 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
177 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
178 # define KP_SIZE(kp) (kp.ki_size)
179 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
180 # elif defined(__NetBSD__)
181 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
182 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
183 # elif defined(__OpenBSD__)
184 # define KP_SIZE(kp) \
185 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
186 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
189 [[nodiscard
]] static nsresult
GetKinfoProcSelf(KINFO_PROC
* aProc
) {
190 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
191 static LazyLogModule
sPledgeLog("SandboxPledge");
192 MOZ_LOG(sPledgeLog
, LogLevel::Debug
,
193 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__
));
194 return NS_ERROR_FAILURE
;
201 # if defined(__NetBSD__) || defined(__OpenBSD__)
206 u_int miblen
= sizeof(mib
) / sizeof(mib
[0]);
207 size_t size
= sizeof(KINFO_PROC
);
208 if (sysctl(mib
, miblen
, aProc
, &size
, nullptr, 0)) {
209 return NS_ERROR_FAILURE
;
214 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
215 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
217 nsresult rv
= GetKinfoProcSelf(&proc
);
218 if (NS_SUCCEEDED(rv
)) {
224 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
226 nsresult rv
= GetKinfoProcSelf(&proc
);
227 if (NS_SUCCEEDED(rv
)) {
233 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
234 return ResidentDistinguishedAmount(aN
);
238 # include <libutil.h>
239 # include <algorithm>
241 [[nodiscard
]] static nsresult
GetKinfoVmentrySelf(int64_t* aPrss
,
244 struct kinfo_vmentry
* vmmap
;
245 struct kinfo_vmentry
* kve
;
246 if (!(vmmap
= kinfo_getvmmap(getpid(), &cnt
))) {
247 return NS_ERROR_FAILURE
;
256 for (int i
= 0; i
< cnt
; i
++) {
259 *aPrss
+= kve
->kve_private_resident
;
262 *aMaxreg
= std::max(*aMaxreg
, kve
->kve_end
- kve
->kve_start
);
270 # define HAVE_PRIVATE_REPORTER 1
271 [[nodiscard
]] static nsresult
PrivateDistinguishedAmount(int64_t* aN
) {
273 nsresult rv
= GetKinfoVmentrySelf(&priv
, nullptr);
274 NS_ENSURE_SUCCESS(rv
, rv
);
275 *aN
= priv
* getpagesize();
279 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
280 [[nodiscard
]] static nsresult
VsizeMaxContiguousDistinguishedAmount(
282 uint64_t biggestRegion
;
283 nsresult rv
= GetKinfoVmentrySelf(nullptr, &biggestRegion
);
284 if (NS_SUCCEEDED(rv
)) {
291 #elif defined(SOLARIS)
297 static void XMappingIter(int64_t& aVsize
, int64_t& aResident
,
302 int mapfd
= open("/proc/self/xmap", O_RDONLY
);
304 prxmap_t
* prmapp
= nullptr;
306 if (!fstat(mapfd
, &st
)) {
307 int nmap
= st
.st_size
/ sizeof(prxmap_t
);
309 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
310 // prior to the release of Solaris 11.
311 // Here is a workaround for it.
313 prmapp
= (prxmap_t
*)malloc((nmap
+ 1) * sizeof(prxmap_t
));
318 int n
= pread(mapfd
, prmapp
, (nmap
+ 1) * sizeof(prxmap_t
), 0);
322 if (nmap
>= n
/ sizeof(prxmap_t
)) {
326 for (int i
= 0; i
< n
/ sizeof(prxmap_t
); i
++) {
327 aVsize
+= prmapp
[i
].pr_size
;
328 aResident
+= prmapp
[i
].pr_rss
* prmapp
[i
].pr_pagesize
;
329 if (prmapp
[i
].pr_mflags
& MA_SHARED
) {
330 aShared
+= prmapp
[i
].pr_rss
* prmapp
[i
].pr_pagesize
;
343 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
344 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
345 int64_t vsize
, resident
, shared
;
346 XMappingIter(vsize
, resident
, shared
);
348 return NS_ERROR_FAILURE
;
354 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
355 int64_t vsize
, resident
, shared
;
356 XMappingIter(vsize
, resident
, shared
);
357 if (resident
== -1) {
358 return NS_ERROR_FAILURE
;
364 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
365 return ResidentDistinguishedAmount(aN
);
368 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
369 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(int64_t* aN
) {
370 int64_t vsize
, resident
, shared
;
371 XMappingIter(vsize
, resident
, shared
);
372 if (resident
== -1) {
373 return NS_ERROR_FAILURE
;
375 *aN
= resident
- shared
;
379 #elif defined(XP_MACOSX)
381 # include <mach/mach_init.h>
382 # include <mach/mach_vm.h>
383 # include <mach/shared_region.h>
384 # include <mach/task.h>
385 # include <sys/sysctl.h>
387 [[nodiscard
]] static bool GetTaskBasicInfo(struct task_basic_info
* aTi
) {
388 mach_msg_type_number_t count
= TASK_BASIC_INFO_COUNT
;
390 task_info(mach_task_self(), TASK_BASIC_INFO
, (task_info_t
)aTi
, &count
);
391 return kr
== KERN_SUCCESS
;
394 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
395 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
396 // it, so we might as well too.
397 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
398 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
400 if (!GetTaskBasicInfo(&ti
)) {
401 return NS_ERROR_FAILURE
;
403 *aN
= ti
.virtual_size
;
407 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
408 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
409 // an accurate result. The OS will take away MADV_FREE'd pages when there's
410 // memory pressure, so ideally, they shouldn't count against our RSS.
412 // Purging these pages can take a long time for some users (see bug 789975),
413 // so we provide the option to get the RSS without purging first.
414 [[nodiscard
]] static nsresult
ResidentDistinguishedAmountHelper(int64_t* aN
,
416 # ifdef HAVE_JEMALLOC_STATS
418 Telemetry::AutoTimer
<Telemetry::MEMORY_FREE_PURGED_PAGES_MS
> timer
;
419 jemalloc_purge_freed_pages();
424 if (!GetTaskBasicInfo(&ti
)) {
425 return NS_ERROR_FAILURE
;
427 *aN
= ti
.resident_size
;
431 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
432 return ResidentDistinguishedAmountHelper(aN
, /* doPurge = */ false);
435 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
436 return ResidentDistinguishedAmountHelper(aN
, /* doPurge = */ true);
439 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
441 static bool InSharedRegion(mach_vm_address_t aAddr
, cpu_type_t aType
) {
442 mach_vm_address_t base
;
443 mach_vm_address_t size
;
447 base
= SHARED_REGION_BASE_ARM
;
448 size
= SHARED_REGION_SIZE_ARM
;
451 base
= SHARED_REGION_BASE_ARM64
;
452 size
= SHARED_REGION_SIZE_ARM64
;
455 base
= SHARED_REGION_BASE_I386
;
456 size
= SHARED_REGION_SIZE_I386
;
458 case CPU_TYPE_X86_64
:
459 base
= SHARED_REGION_BASE_X86_64
;
460 size
= SHARED_REGION_SIZE_X86_64
;
466 return base
<= aAddr
&& aAddr
< (base
+ size
);
469 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
470 int64_t* aN
, mach_port_t aPort
= 0) {
472 return NS_ERROR_FAILURE
;
476 size_t len
= sizeof(cpu_type
);
477 if (sysctlbyname("sysctl.proc_cputype", &cpu_type
, &len
, NULL
, 0) != 0) {
478 return NS_ERROR_FAILURE
;
481 // Roughly based on libtop_update_vm_regions in
482 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
483 size_t privatePages
= 0;
484 mach_vm_size_t topSize
= 0;
485 for (mach_vm_address_t addr
= MACH_VM_MIN_ADDRESS
;; addr
+= topSize
) {
486 vm_region_top_info_data_t topInfo
;
487 mach_msg_type_number_t topInfoCount
= VM_REGION_TOP_INFO_COUNT
;
488 mach_port_t topObjectName
;
490 kern_return_t kr
= mach_vm_region(
491 aPort
? aPort
: mach_task_self(), &addr
, &topSize
, VM_REGION_TOP_INFO
,
492 reinterpret_cast<vm_region_info_t
>(&topInfo
), &topInfoCount
,
494 if (kr
== KERN_INVALID_ADDRESS
) {
495 // Done iterating VM regions.
497 } else if (kr
!= KERN_SUCCESS
) {
498 return NS_ERROR_FAILURE
;
501 if (InSharedRegion(addr
, cpu_type
) && topInfo
.share_mode
!= SM_PRIVATE
) {
505 switch (topInfo
.share_mode
) {
507 // NB: Large pages are not shareable and always resident.
509 privatePages
+= topInfo
.private_pages_resident
;
510 privatePages
+= topInfo
.shared_pages_resident
;
513 privatePages
+= topInfo
.private_pages_resident
;
514 if (topInfo
.ref_count
== 1) {
515 // Treat copy-on-write pages as private if they only have one
517 privatePages
+= topInfo
.shared_pages_resident
;
521 // Using mprotect() or similar to protect a page in the middle of a
522 // mapping can create aliased mappings. They look like shared mappings
523 // to the VM_REGION_TOP_INFO interface, so re-check with
524 // VM_REGION_EXTENDED_INFO.
526 mach_vm_size_t exSize
= 0;
527 vm_region_extended_info_data_t exInfo
;
528 mach_msg_type_number_t exInfoCount
= VM_REGION_EXTENDED_INFO_COUNT
;
529 mach_port_t exObjectName
;
530 kr
= mach_vm_region(aPort
? aPort
: mach_task_self(), &addr
, &exSize
,
531 VM_REGION_EXTENDED_INFO
,
532 reinterpret_cast<vm_region_info_t
>(&exInfo
),
533 &exInfoCount
, &exObjectName
);
534 if (kr
== KERN_INVALID_ADDRESS
) {
535 // Done iterating VM regions.
537 } else if (kr
!= KERN_SUCCESS
) {
538 return NS_ERROR_FAILURE
;
541 if (exInfo
.share_mode
== SM_PRIVATE_ALIASED
) {
542 privatePages
+= exInfo
.pages_resident
;
552 if (host_page_size(aPort
? aPort
: mach_task_self(), &pageSize
) !=
554 pageSize
= PAGE_SIZE
;
557 *aN
= privatePages
* pageSize
;
561 [[nodiscard
]] static nsresult
PhysicalFootprintAmount(int64_t* aN
,
562 mach_port_t aPort
= 0) {
565 // The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data
566 // matches the value in the 'Memory' column of the Activity Monitor.
567 task_vm_info_data_t task_vm_info
;
568 mach_msg_type_number_t count
= TASK_VM_INFO_COUNT
;
569 kern_return_t kr
= task_info(aPort
? aPort
: mach_task_self(), TASK_VM_INFO
,
570 (task_info_t
)&task_vm_info
, &count
);
571 if (kr
!= KERN_SUCCESS
) {
572 return NS_ERROR_FAILURE
;
575 *aN
= task_vm_info
.phys_footprint
;
579 #elif defined(XP_WIN)
581 # include <windows.h>
583 # include <algorithm>
585 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
586 [[nodiscard
]] static nsresult
VsizeDistinguishedAmount(int64_t* aN
) {
588 s
.dwLength
= sizeof(s
);
590 if (!GlobalMemoryStatusEx(&s
)) {
591 return NS_ERROR_FAILURE
;
594 *aN
= s
.ullTotalVirtual
- s
.ullAvailVirtual
;
598 [[nodiscard
]] static nsresult
ResidentDistinguishedAmount(int64_t* aN
) {
599 PROCESS_MEMORY_COUNTERS pmc
;
600 pmc
.cb
= sizeof(PROCESS_MEMORY_COUNTERS
);
602 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc
, sizeof(pmc
))) {
603 return NS_ERROR_FAILURE
;
606 *aN
= pmc
.WorkingSetSize
;
610 [[nodiscard
]] static nsresult
ResidentFastDistinguishedAmount(int64_t* aN
) {
611 return ResidentDistinguishedAmount(aN
);
614 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
616 [[nodiscard
]] static nsresult
ResidentUniqueDistinguishedAmount(
617 int64_t* aN
, HANDLE aProcess
= nullptr) {
618 // Determine how many entries we need.
619 PSAPI_WORKING_SET_INFORMATION tmp
;
620 DWORD tmpSize
= sizeof(tmp
);
621 memset(&tmp
, 0, tmpSize
);
623 HANDLE proc
= aProcess
? aProcess
: GetCurrentProcess();
624 QueryWorkingSet(proc
, &tmp
, tmpSize
);
626 // Fudge the size in case new entries are added between calls.
627 size_t entries
= tmp
.NumberOfEntries
* 2;
630 return NS_ERROR_FAILURE
;
633 DWORD infoArraySize
= tmpSize
+ (entries
* sizeof(PSAPI_WORKING_SET_BLOCK
));
634 UniqueFreePtr
<PSAPI_WORKING_SET_INFORMATION
> infoArray(
635 static_cast<PSAPI_WORKING_SET_INFORMATION
*>(malloc(infoArraySize
)));
638 return NS_ERROR_FAILURE
;
641 if (!QueryWorkingSet(proc
, infoArray
.get(), infoArraySize
)) {
642 return NS_ERROR_FAILURE
;
645 entries
= static_cast<size_t>(infoArray
->NumberOfEntries
);
646 size_t privatePages
= 0;
647 for (size_t i
= 0; i
< entries
; i
++) {
648 // Count shared pages that only one process is using as private.
649 if (!infoArray
->WorkingSetInfo
[i
].Shared
||
650 infoArray
->WorkingSetInfo
[i
].ShareCount
<= 1) {
658 *aN
= privatePages
* si
.dwPageSize
;
662 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
663 [[nodiscard
]] static nsresult
VsizeMaxContiguousDistinguishedAmount(
665 SIZE_T biggestRegion
= 0;
666 MEMORY_BASIC_INFORMATION vmemInfo
= {0};
667 for (size_t currentAddress
= 0;;) {
668 if (!VirtualQuery((LPCVOID
)currentAddress
, &vmemInfo
, sizeof(vmemInfo
))) {
669 // Something went wrong, just return whatever we've got already.
673 if (vmemInfo
.State
== MEM_FREE
) {
674 biggestRegion
= std::max(biggestRegion
, vmemInfo
.RegionSize
);
677 SIZE_T lastAddress
= currentAddress
;
678 currentAddress
+= vmemInfo
.RegionSize
;
680 // If we overflow, we've examined all of the address space.
681 if (currentAddress
< lastAddress
) {
690 # define HAVE_PRIVATE_REPORTER 1
691 [[nodiscard
]] static nsresult
PrivateDistinguishedAmount(int64_t* aN
) {
692 PROCESS_MEMORY_COUNTERS_EX pmcex
;
693 pmcex
.cb
= sizeof(PROCESS_MEMORY_COUNTERS_EX
);
695 if (!GetProcessMemoryInfo(GetCurrentProcess(),
696 (PPROCESS_MEMORY_COUNTERS
)&pmcex
, sizeof(pmcex
))) {
697 return NS_ERROR_FAILURE
;
700 *aN
= pmcex
.PrivateUsage
;
704 # define HAVE_SYSTEM_HEAP_REPORTER 1
705 // Windows can have multiple separate heaps, but we should not touch non-default
706 // heaps because they may be destroyed at anytime while we hold a handle. So we
707 // count only the default heap.
708 [[nodiscard
]] static nsresult
SystemHeapSize(int64_t* aSizeOut
) {
709 HANDLE heap
= GetProcessHeap();
711 NS_ENSURE_TRUE(HeapLock(heap
), NS_ERROR_FAILURE
);
713 int64_t heapSize
= 0;
714 PROCESS_HEAP_ENTRY entry
;
715 entry
.lpData
= nullptr;
716 while (HeapWalk(heap
, &entry
)) {
717 // We don't count entry.cbOverhead, because we just want to measure the
718 // space available to the program.
719 if (entry
.wFlags
& PROCESS_HEAP_ENTRY_BUSY
) {
720 heapSize
+= entry
.cbData
;
724 // Check this result only after unlocking the heap, so that we don't leave
725 // the heap locked if there was an error.
726 DWORD lastError
= GetLastError();
728 // I have no idea how things would proceed if unlocking this heap failed...
729 NS_ENSURE_TRUE(HeapUnlock(heap
), NS_ERROR_FAILURE
);
731 NS_ENSURE_TRUE(lastError
== ERROR_NO_MORE_ITEMS
, NS_ERROR_FAILURE
);
733 *aSizeOut
= heapSize
;
744 struct SegmentEntry
: public PLDHashEntryHdr
{
745 static PLDHashNumber
HashKey(const void* aKey
) {
746 auto kind
= static_cast<const SegmentKind
*>(aKey
);
747 return mozilla::HashGeneric(kind
->mState
, kind
->mType
, kind
->mProtect
,
751 static bool MatchEntry(const PLDHashEntryHdr
* aEntry
, const void* aKey
) {
752 auto kind
= static_cast<const SegmentKind
*>(aKey
);
753 auto entry
= static_cast<const SegmentEntry
*>(aEntry
);
754 return kind
->mState
== entry
->mKind
.mState
&&
755 kind
->mType
== entry
->mKind
.mType
&&
756 kind
->mProtect
== entry
->mKind
.mProtect
&&
757 kind
->mIsStack
== entry
->mKind
.mIsStack
;
760 static void InitEntry(PLDHashEntryHdr
* aEntry
, const void* aKey
) {
761 auto kind
= static_cast<const SegmentKind
*>(aKey
);
762 auto entry
= static_cast<SegmentEntry
*>(aEntry
);
763 entry
->mKind
= *kind
;
768 static const PLDHashTableOps Ops
;
770 SegmentKind mKind
; // The segment kind.
771 uint32_t mCount
; // The number of segments of this kind.
772 size_t mSize
; // The combined size of segments of this kind.
775 /* static */ const PLDHashTableOps
SegmentEntry::Ops
= {
776 SegmentEntry::HashKey
, SegmentEntry::MatchEntry
,
777 PLDHashTable::MoveEntryStub
, PLDHashTable::ClearEntryStub
,
778 SegmentEntry::InitEntry
};
780 class WindowsAddressSpaceReporter final
: public nsIMemoryReporter
{
781 ~WindowsAddressSpaceReporter() {}
786 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
787 nsISupports
* aData
, bool aAnonymize
) override
{
788 // First iterate over all the segments and record how many of each kind
789 // there were and their aggregate sizes. We use a hash table for this
790 // because there are a couple of dozen different kinds possible.
792 PLDHashTable
table(&SegmentEntry::Ops
, sizeof(SegmentEntry
));
793 MEMORY_BASIC_INFORMATION info
= {0};
794 bool isPrevSegStackGuard
= false;
795 for (size_t currentAddress
= 0;;) {
796 if (!VirtualQuery((LPCVOID
)currentAddress
, &info
, sizeof(info
))) {
797 // Something went wrong, just return whatever we've got already.
801 size_t size
= info
.RegionSize
;
803 // Note that |type| and |protect| are ignored in some cases.
804 DWORD state
= info
.State
;
806 (state
== MEM_RESERVE
|| state
== MEM_COMMIT
) ? info
.Type
: 0;
807 DWORD protect
= (state
== MEM_COMMIT
) ? info
.Protect
: 0;
808 bool isStack
= isPrevSegStackGuard
&& state
== MEM_COMMIT
&&
809 type
== MEM_PRIVATE
&& protect
== PAGE_READWRITE
;
811 SegmentKind kind
= {state
, type
, protect
, isStack
? 1 : 0};
813 static_cast<SegmentEntry
*>(table
.Add(&kind
, mozilla::fallible
));
816 entry
->mSize
+= size
;
819 isPrevSegStackGuard
= info
.State
== MEM_COMMIT
&&
820 info
.Type
== MEM_PRIVATE
&&
821 info
.Protect
== (PAGE_READWRITE
| PAGE_GUARD
);
823 size_t lastAddress
= currentAddress
;
824 currentAddress
+= size
;
826 // If we overflow, we've examined all of the address space.
827 if (currentAddress
< lastAddress
) {
832 // Then iterate over the hash table and report the details for each segment
835 for (auto iter
= table
.Iter(); !iter
.Done(); iter
.Next()) {
836 // For each range of pages, we consider one or more of its State, Type
837 // and Protect values. These are documented at
838 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
839 // (for State and Type) and
840 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
843 // Not all State values have accompanying Type and Protection values.
845 bool doProtect
= false;
847 auto entry
= static_cast<const SegmentEntry
*>(iter
.Get());
849 nsCString
path("address-space");
851 switch (entry
->mKind
.mState
) {
853 path
.AppendLiteral("/free");
857 path
.AppendLiteral("/reserved");
862 path
.AppendLiteral("/commit");
868 // Should be impossible, but handle it just in case.
869 path
.AppendLiteral("/???");
874 switch (entry
->mKind
.mType
) {
876 path
.AppendLiteral("/image");
880 path
.AppendLiteral("/mapped");
884 path
.AppendLiteral("/private");
888 // Should be impossible, but handle it just in case.
889 path
.AppendLiteral("/???");
895 DWORD protect
= entry
->mKind
.mProtect
;
896 // Basic attributes. Exactly one of these should be set.
897 if (protect
& PAGE_EXECUTE
) {
898 path
.AppendLiteral("/execute");
900 if (protect
& PAGE_EXECUTE_READ
) {
901 path
.AppendLiteral("/execute-read");
903 if (protect
& PAGE_EXECUTE_READWRITE
) {
904 path
.AppendLiteral("/execute-readwrite");
906 if (protect
& PAGE_EXECUTE_WRITECOPY
) {
907 path
.AppendLiteral("/execute-writecopy");
909 if (protect
& PAGE_NOACCESS
) {
910 path
.AppendLiteral("/noaccess");
912 if (protect
& PAGE_READONLY
) {
913 path
.AppendLiteral("/readonly");
915 if (protect
& PAGE_READWRITE
) {
916 path
.AppendLiteral("/readwrite");
918 if (protect
& PAGE_WRITECOPY
) {
919 path
.AppendLiteral("/writecopy");
922 // Modifiers. At most one of these should be set.
923 if (protect
& PAGE_GUARD
) {
924 path
.AppendLiteral("+guard");
926 if (protect
& PAGE_NOCACHE
) {
927 path
.AppendLiteral("+nocache");
929 if (protect
& PAGE_WRITECOMBINE
) {
930 path
.AppendLiteral("+writecombine");
933 // Annotate likely stack segments, too.
934 if (entry
->mKind
.mIsStack
) {
935 path
.AppendLiteral("+stack");
939 // Append the segment count.
940 path
.AppendPrintf("(segments=%u)", entry
->mCount
);
942 aHandleReport
->Callback(""_ns
, path
, KIND_OTHER
, UNITS_BYTES
,
943 entry
->mSize
, "From MEMORY_BASIC_INFORMATION."_ns
,
950 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter
, nsIMemoryReporter
)
952 #endif // XP_<PLATFORM>
954 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
955 class VsizeMaxContiguousReporter final
: public nsIMemoryReporter
{
956 ~VsizeMaxContiguousReporter() {}
961 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
962 nsISupports
* aData
, bool aAnonymize
) override
{
964 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount
))) {
966 "vsize-max-contiguous", KIND_OTHER
, UNITS_BYTES
, amount
,
967 "Size of the maximum contiguous block of available virtual memory.");
972 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter
, nsIMemoryReporter
)
975 #ifdef HAVE_PRIVATE_REPORTER
976 class PrivateReporter final
: public nsIMemoryReporter
{
977 ~PrivateReporter() {}
982 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
983 nsISupports
* aData
, bool aAnonymize
) override
{
985 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount
))) {
988 "private", KIND_OTHER
, UNITS_BYTES
, amount
,
989 "Memory that cannot be shared with other processes, including memory that is "
990 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
991 "pages that have been written to.");
997 NS_IMPL_ISUPPORTS(PrivateReporter
, nsIMemoryReporter
)
1000 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1001 class VsizeReporter final
: public nsIMemoryReporter
{
1002 ~VsizeReporter() = default;
1007 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1008 nsISupports
* aData
, bool aAnonymize
) override
{
1010 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount
))) {
1013 "vsize", KIND_OTHER
, UNITS_BYTES
, amount
,
1014 "Memory mapped by the process, including code and data segments, the heap, "
1015 "thread stacks, memory explicitly mapped by the process via mmap and similar "
1016 "operations, and memory shared with other processes. This is the vsize figure "
1017 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
1018 "processes share huge amounts of memory with one another. But even on other "
1019 "operating systems, 'resident' is a much better measure of the memory "
1020 "resources used by the process.");
1026 NS_IMPL_ISUPPORTS(VsizeReporter
, nsIMemoryReporter
)
1028 class ResidentReporter final
: public nsIMemoryReporter
{
1029 ~ResidentReporter() = default;
1034 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1035 nsISupports
* aData
, bool aAnonymize
) override
{
1037 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount
))) {
1040 "resident", KIND_OTHER
, UNITS_BYTES
, amount
,
1041 "Memory mapped by the process that is present in physical memory, also known "
1042 "as the resident set size (RSS). This is the best single figure to use when "
1043 "considering the memory resources used by the process, but it depends both on "
1044 "other processes being run and details of the OS kernel and so is best used "
1045 "for comparing the memory usage of a single process at different points in "
1052 NS_IMPL_ISUPPORTS(ResidentReporter
, nsIMemoryReporter
)
1054 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1056 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1057 class ResidentUniqueReporter final
: public nsIMemoryReporter
{
1058 ~ResidentUniqueReporter() = default;
1063 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1064 nsISupports
* aData
, bool aAnonymize
) override
{
1067 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount
))) {
1069 "resident-unique", KIND_OTHER
, UNITS_BYTES
, amount
,
1070 "Memory mapped by the process that is present in physical memory and not "
1071 "shared with any other processes. This is also known as the process's unique "
1072 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1073 "closed this process.");
1076 if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount
))) {
1078 "resident-phys-footprint", KIND_OTHER
, UNITS_BYTES
, amount
,
1079 "Memory footprint reported by MacOS's task_info API's phys_footprint field. "
1080 "This matches the memory column in Activity Monitor.");
1087 NS_IMPL_ISUPPORTS(ResidentUniqueReporter
, nsIMemoryReporter
)
1089 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1091 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1093 class SystemHeapReporter final
: public nsIMemoryReporter
{
1094 ~SystemHeapReporter() = default;
1099 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1100 nsISupports
* aData
, bool aAnonymize
) override
{
1102 if (NS_SUCCEEDED(SystemHeapSize(&amount
))) {
1105 "system-heap-allocated", KIND_OTHER
, UNITS_BYTES
, amount
,
1106 "Memory used by the system allocator that is currently allocated to the "
1107 "application. This is distinct from the jemalloc heap that Firefox uses for "
1108 "most or all of its heap allocations. Ideally this number is zero, but "
1109 "on some platforms we cannot force every heap allocation through jemalloc.");
1115 NS_IMPL_ISUPPORTS(SystemHeapReporter
, nsIMemoryReporter
)
1116 #endif // HAVE_SYSTEM_HEAP_REPORTER
1120 # include <sys/resource.h>
1122 # define HAVE_RESIDENT_PEAK_REPORTER 1
1124 [[nodiscard
]] static nsresult
ResidentPeakDistinguishedAmount(int64_t* aN
) {
1125 struct rusage usage
;
1126 if (0 == getrusage(RUSAGE_SELF
, &usage
)) {
1127 // The units for ru_maxrrs:
1129 // - Solaris: pages? But some sources it actually always returns 0, so
1131 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1133 *aN
= usage
.ru_maxrss
;
1134 # elif defined(SOLARIS)
1135 *aN
= usage
.ru_maxrss
* getpagesize();
1137 *aN
= usage
.ru_maxrss
* 1024;
1143 return NS_ERROR_FAILURE
;
1146 class ResidentPeakReporter final
: public nsIMemoryReporter
{
1147 ~ResidentPeakReporter() = default;
1152 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1153 nsISupports
* aData
, bool aAnonymize
) override
{
1155 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount
))) {
1157 "resident-peak", KIND_OTHER
, UNITS_BYTES
, amount
,
1158 "The peak 'resident' value for the lifetime of the process.");
1163 NS_IMPL_ISUPPORTS(ResidentPeakReporter
, nsIMemoryReporter
)
1165 # define HAVE_PAGE_FAULT_REPORTERS 1
1167 class PageFaultsSoftReporter final
: public nsIMemoryReporter
{
1168 ~PageFaultsSoftReporter() = default;
1173 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1174 nsISupports
* aData
, bool aAnonymize
) override
{
1175 struct rusage usage
;
1176 int err
= getrusage(RUSAGE_SELF
, &usage
);
1178 int64_t amount
= usage
.ru_minflt
;
1181 "page-faults-soft", KIND_OTHER
, UNITS_COUNT_CUMULATIVE
, amount
,
1182 "The number of soft page faults (also known as 'minor page faults') that "
1183 "have occurred since the process started. A soft page fault occurs when the "
1184 "process tries to access a page which is present in physical memory but is "
1185 "not mapped into the process's address space. For instance, a process might "
1186 "observe soft page faults when it loads a shared library which is already "
1187 "present in physical memory. A process may experience many thousands of soft "
1188 "page faults even when the machine has plenty of available physical memory, "
1189 "and because the OS services a soft page fault without accessing the disk, "
1190 "they impact performance much less than hard page faults.");
1196 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter
, nsIMemoryReporter
)
1198 [[nodiscard
]] static nsresult
PageFaultsHardDistinguishedAmount(
1200 struct rusage usage
;
1201 int err
= getrusage(RUSAGE_SELF
, &usage
);
1203 return NS_ERROR_FAILURE
;
1205 *aAmount
= usage
.ru_majflt
;
1209 class PageFaultsHardReporter final
: public nsIMemoryReporter
{
1210 ~PageFaultsHardReporter() = default;
1215 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1216 nsISupports
* aData
, bool aAnonymize
) override
{
1218 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount
))) {
1221 "page-faults-hard", KIND_OTHER
, UNITS_COUNT_CUMULATIVE
, amount
,
1222 "The number of hard page faults (also known as 'major page faults') that have "
1223 "occurred since the process started. A hard page fault occurs when a process "
1224 "tries to access a page which is not present in physical memory. The "
1225 "operating system must access the disk in order to fulfill a hard page fault. "
1226 "When memory is plentiful, you should see very few hard page faults. But if "
1227 "the process tries to use more memory than your machine has available, you "
1228 "may see many thousands of hard page faults. Because accessing the disk is up "
1229 "to a million times slower than accessing RAM, the program may run very "
1230 "slowly when it is experiencing more than 100 or so hard page faults a "
1237 NS_IMPL_ISUPPORTS(PageFaultsHardReporter
, nsIMemoryReporter
)
1242 ** memory reporter implementation for jemalloc and OSX malloc,
1243 ** to obtain info on total memory in use (that we know about,
1244 ** at least -- on OSX, there are sometimes other zones in use).
1247 #ifdef HAVE_JEMALLOC_STATS
1249 static size_t HeapOverhead(const jemalloc_stats_t
& aStats
) {
1250 return aStats
.waste
+ aStats
.bookkeeping
+ aStats
.page_cache
+
1254 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1255 // 100x for the percentage.
1258 int64_t nsMemoryReporterManager::HeapOverheadFraction(
1259 const jemalloc_stats_t
& aStats
) {
1260 size_t heapOverhead
= HeapOverhead(aStats
);
1261 size_t heapCommitted
= aStats
.allocated
+ heapOverhead
;
1262 return int64_t(10000 * (heapOverhead
/ (double)heapCommitted
));
1265 class JemallocHeapReporter final
: public nsIMemoryReporter
{
1266 ~JemallocHeapReporter() = default;
1271 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1272 nsISupports
* aData
, bool aAnonymize
) override
{
1273 jemalloc_stats_t stats
;
1274 const size_t num_bins
= jemalloc_stats_num_bins();
1275 nsTArray
<jemalloc_bin_stats_t
> bin_stats(num_bins
);
1276 bin_stats
.SetLength(num_bins
);
1277 jemalloc_stats(&stats
, bin_stats
.Elements());
1281 "heap-committed/allocated", KIND_OTHER
, UNITS_BYTES
, stats
.allocated
,
1282 "Memory mapped by the heap allocator that is currently allocated to the "
1283 "application. This may exceed the amount of memory requested by the "
1284 "application because the allocator regularly rounds up request sizes. (The "
1285 "exact amount requested is not recorded.)");
1288 "heap-allocated", KIND_OTHER
, UNITS_BYTES
, stats
.allocated
,
1289 "The same as 'heap-committed/allocated'.");
1291 // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1292 // because KIND_HEAP memory means "counted in heap-allocated", which
1294 for (auto& bin
: bin_stats
) {
1295 MOZ_ASSERT(bin
.size
);
1296 nsPrintfCString
path("explicit/heap-overhead/bin-unused/bin-%zu",
1298 aHandleReport
->Callback(EmptyCString(), path
, KIND_NONHEAP
, UNITS_BYTES
,
1301 "Unused bytes in all runs of all bins for this size class"),
1305 if (stats
.waste
> 0) {
1307 "explicit/heap-overhead/waste", KIND_NONHEAP
, UNITS_BYTES
,
1309 "Committed bytes which do not correspond to an active allocation and which the "
1310 "allocator is not intentionally keeping alive (i.e., not "
1311 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1315 "explicit/heap-overhead/bookkeeping", KIND_NONHEAP
, UNITS_BYTES
,
1317 "Committed bytes which the heap allocator uses for internal data structures.");
1320 "explicit/heap-overhead/page-cache", KIND_NONHEAP
, UNITS_BYTES
,
1322 "Memory which the allocator could return to the operating system, but hasn't. "
1323 "The allocator keeps this memory around as an optimization, so it doesn't "
1324 "have to ask the OS the next time it needs to fulfill a request. This value "
1325 "is typically not larger than a few megabytes.");
1328 "heap-committed/overhead", KIND_OTHER
, UNITS_BYTES
,
1329 HeapOverhead(stats
),
1330 "The sum of 'explicit/heap-overhead/*'.");
1333 "heap-mapped", KIND_OTHER
, UNITS_BYTES
, stats
.mapped
,
1334 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1335 "neither in physical memory nor paged to disk.");
1338 "heap-chunksize", KIND_OTHER
, UNITS_BYTES
, stats
.chunksize
,
1342 mozilla::phc::MemoryUsage usage
;
1343 mozilla::phc::PHCMemoryUsage(usage
);
1346 "explicit/heap-overhead/phc/metadata", KIND_NONHEAP
, UNITS_BYTES
,
1347 usage
.mMetadataBytes
,
1348 "Memory used by PHC to store stacks and other metadata for each allocation");
1350 "explicit/heap-overhead/phc/fragmentation", KIND_NONHEAP
, UNITS_BYTES
,
1351 usage
.mFragmentationBytes
,
1352 "The amount of memory lost due to rounding up allocations to the next page "
1354 "This is also known as 'internal fragmentation'. "
1355 "Note that all allocators have some internal fragmentation, there may still "
1356 "be some internal fragmentation without PHC.");
1364 NS_IMPL_ISUPPORTS(JemallocHeapReporter
, nsIMemoryReporter
)
1366 #endif // HAVE_JEMALLOC_STATS
1368 // Why is this here? At first glance, you'd think it could be defined and
1369 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1370 // However, the obvious time to register it is when the table is initialized,
1371 // and that happens before XPCOM components are initialized, which means the
1372 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1373 class AtomTablesReporter final
: public nsIMemoryReporter
{
1374 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1376 ~AtomTablesReporter() = default;
1381 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1382 nsISupports
* aData
, bool aAnonymize
) override
{
1384 NS_AddSizeOfAtoms(MallocSizeOf
, sizes
);
1386 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP
, UNITS_BYTES
,
1387 sizes
.mTable
, "Memory used by the atom table.");
1390 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP
, UNITS_BYTES
,
1391 sizes
.mDynamicAtoms
,
1392 "Memory used by dynamic atom objects and chars (which are stored "
1393 "at the end of each atom object).");
1398 NS_IMPL_ISUPPORTS(AtomTablesReporter
, nsIMemoryReporter
)
1400 class ThreadsReporter final
: public nsIMemoryReporter
{
1401 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1402 ~ThreadsReporter() = default;
1407 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1408 nsISupports
* aData
, bool aAnonymize
) override
{
1410 nsTArray
<MemoryMapping
> mappings(1024);
1411 MOZ_TRY(GetMemoryMappings(mappings
));
1414 // Enumerating over active threads requires holding a lock, so we collect
1415 // info on all threads, and then call our reporter callbacks after releasing
1420 size_t mPrivateSize
;
1422 AutoTArray
<ThreadData
, 32> threads
;
1424 size_t eventQueueSizes
= 0;
1425 size_t wrapperSizes
= 0;
1426 size_t threadCount
= 0;
1429 nsThreadManager
& tm
= nsThreadManager::get();
1430 OffTheBooksMutexAutoLock
lock(tm
.ThreadListMutex());
1431 for (auto* thread
: tm
.ThreadList()) {
1433 eventQueueSizes
+= thread
->SizeOfEventQueues(MallocSizeOf
);
1434 wrapperSizes
+= thread
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1436 if (!thread
->StackBase()) {
1440 #if defined(XP_LINUX)
1441 int idx
= mappings
.BinaryIndexOf(thread
->StackBase());
1445 // Referenced() is the combined size of all pages in the region which
1446 // have ever been touched, and are therefore consuming memory. For stack
1447 // regions, these pages are guaranteed to be un-shared unless we fork
1448 // after creating threads (which we don't).
1449 size_t privateSize
= mappings
[idx
].Referenced();
1451 // On Linux, we have to be very careful matching memory regions to
1454 // To begin with, the kernel only reports VM stats for regions of all
1455 // adjacent pages with the same flags, protection, and backing file.
1456 // There's no way to get finer-grained usage information for a subset of
1459 // Stack segments always have a guard page at the bottom of the stack
1460 // (assuming we only support stacks that grow down), so there's no
1461 // danger of them being merged with other stack regions. At the top,
1462 // there's no protection page, and no way to allocate one without using
1463 // pthreads directly and allocating our own stacks. So we get around the
1464 // problem by adding an extra VM flag (NOHUGEPAGES) to our stack region,
1465 // which we don't expect to be set on any heap regions. But this is not
1468 // A second kink is that different C libraries (and different versions
1469 // thereof) report stack base locations and sizes differently with
1470 // regard to the guard page. For the libraries that include the guard
1471 // page in the stack size base pointer, we need to adjust those values
1472 // to compensate. But it's possible that our logic will get out of sync
1473 // with library changes, or someone will compile with an unexpected
1477 // The upshot of all of this is that there may be configurations that
1478 // our special cases don't cover. And if there are, we want to know
1479 // about it. So assert that total size of the memory region we're
1480 // reporting actually matches the allocated size of the thread stack.
1482 MOZ_ASSERT(mappings
[idx
].Size() == thread
->StackSize(),
1483 "Mapping region size doesn't match stack allocation size");
1485 #elif defined(XP_WIN)
1487 MemoryInfo::Get(thread
->StackBase(), thread
->StackSize());
1488 size_t privateSize
= memInfo
.Committed();
1490 size_t privateSize
= thread
->StackSize();
1491 MOZ_ASSERT_UNREACHABLE(
1492 "Shouldn't have stack base pointer on this "
1496 nsCString threadName
;
1497 thread
->GetThreadName(threadName
);
1498 threads
.AppendElement(ThreadData
{
1499 std::move(threadName
),
1501 // On Linux, it's possible (but unlikely) that our stack region will
1502 // have been merged with adjacent heap regions, in which case we'll
1503 // get combined size information for both. So we take the minimum of
1504 // the reported private size and the requested stack size to avoid
1505 // the possible of majorly over-reporting in that case.
1506 std::min(privateSize
, thread
->StackSize()),
1511 for (auto& thread
: threads
) {
1512 nsPrintfCString
path("explicit/threads/stacks/%s (tid=%u)",
1513 thread
.mName
.get(), thread
.mThreadId
);
1515 aHandleReport
->Callback(
1516 ""_ns
, path
, KIND_NONHEAP
, UNITS_BYTES
, thread
.mPrivateSize
,
1517 nsLiteralCString("The sizes of thread stacks which have been "
1518 "committed to memory."),
1522 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP
,
1523 UNITS_BYTES
, eventQueueSizes
,
1524 "The sizes of nsThread event queues and observers.");
1526 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP
,
1527 UNITS_BYTES
, wrapperSizes
,
1528 "The sizes of nsThread/PRThread wrappers.");
1531 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1532 // that's 12K. For 64 bit, it's 24K.
1535 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1536 constexpr size_t kKernelSize
= (sizeof(void*) == 8 ? 24 : 12) * 1024;
1537 #elif defined(XP_LINUX)
1538 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1539 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1540 // have no way of knowing which ones do, so all we can do is guess.
1541 # if defined(__x86_64__) || defined(__i386__)
1542 constexpr size_t kKernelSize
= 4 * 1024;
1544 constexpr size_t kKernelSize
= 8 * 1024;
1546 #elif defined(XP_MACOSX)
1547 // On Darwin, kernel stacks are 16K:
1549 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1550 constexpr size_t kKernelSize
= 16 * 1024;
1552 // Elsewhere, just assume that kernel stacks require at least 8K.
1553 constexpr size_t kKernelSize
= 8 * 1024;
1556 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP
,
1557 UNITS_BYTES
, threadCount
* kKernelSize
,
1558 "The total kernel overhead for all active threads.");
1563 NS_IMPL_ISUPPORTS(ThreadsReporter
, nsIMemoryReporter
)
1567 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1568 // However, this ends up breaking the linking step of various unit tests due
1569 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1570 // in DMD builds. So instead we do it here.
1571 class DeadlockDetectorReporter final
: public nsIMemoryReporter
{
1572 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
)
1574 ~DeadlockDetectorReporter() = default;
1579 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1580 nsISupports
* aData
, bool aAnonymize
) override
{
1582 "explicit/deadlock-detector", KIND_HEAP
, UNITS_BYTES
,
1583 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf
),
1584 "Memory used by the deadlock detector.");
1589 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter
, nsIMemoryReporter
)
1598 class DMDReporter final
: public nsIMemoryReporter
{
1602 NS_IMETHOD
CollectReports(nsIHandleReportCallback
* aHandleReport
,
1603 nsISupports
* aData
, bool aAnonymize
) override
{
1605 dmd::SizeOf(&sizes
);
1608 "explicit/dmd/stack-traces/used", KIND_HEAP
, UNITS_BYTES
,
1609 sizes
.mStackTracesUsed
,
1610 "Memory used by stack traces which correspond to at least "
1611 "one heap block DMD is tracking.");
1614 "explicit/dmd/stack-traces/unused", KIND_HEAP
, UNITS_BYTES
,
1615 sizes
.mStackTracesUnused
,
1616 "Memory used by stack traces which don't correspond to any heap "
1617 "blocks DMD is currently tracking.");
1619 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP
,
1620 UNITS_BYTES
, sizes
.mStackTraceTable
,
1621 "Memory used by DMD's stack trace table.");
1623 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP
, UNITS_BYTES
,
1624 sizes
.mLiveBlockTable
,
1625 "Memory used by DMD's live block table.");
1627 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP
, UNITS_BYTES
,
1628 sizes
.mDeadBlockTable
,
1629 "Memory used by DMD's dead block list.");
1635 ~DMDReporter() = default;
1637 NS_IMPL_ISUPPORTS(DMDReporter
, nsIMemoryReporter
)
1640 } // namespace mozilla
1644 #ifdef MOZ_WIDGET_ANDROID
1645 class AndroidMemoryReporter final
: public nsIMemoryReporter
{
1649 AndroidMemoryReporter() = default;
1652 CollectReports(nsIHandleReportCallback
* aHandleReport
, nsISupports
* aData
,
1653 bool aAnonymize
) override
{
1654 if (!jni::IsAvailable() || jni::GetAPIVersion() < 23) {
1658 int32_t heap
= java::GeckoAppShell::GetMemoryUsage("summary.java-heap"_ns
);
1660 MOZ_COLLECT_REPORT("java-heap", KIND_OTHER
, UNITS_BYTES
, heap
* 1024,
1661 "The private Java Heap usage");
1667 ~AndroidMemoryReporter() = default;
1670 NS_IMPL_ISUPPORTS(AndroidMemoryReporter
, nsIMemoryReporter
)
1674 ** nsMemoryReporterManager implementation
1677 NS_IMPL_ISUPPORTS(nsMemoryReporterManager
, nsIMemoryReporterManager
,
1681 nsMemoryReporterManager::Init() {
1682 if (!NS_IsMainThread()) {
1686 // Under normal circumstances this function is only called once. However,
1687 // we've (infrequently) seen memory report dumps in crash reports that
1688 // suggest that this function is sometimes called multiple times. That in
1689 // turn means that multiple reporters of each kind are registered, which
1690 // leads to duplicated reports of individual measurements such as "resident",
1693 // It's unclear how these multiple calls can occur. The only plausible theory
1694 // so far is badly-written extensions, because this function is callable from
1695 // JS code via nsIMemoryReporter.idl.
1697 // Whatever the cause, it's a bad thing. So we protect against it with the
1699 static bool isInited
= false;
1701 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1706 #ifdef HAVE_JEMALLOC_STATS
1707 RegisterStrongReporter(new JemallocHeapReporter());
1710 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1711 RegisterStrongReporter(new VsizeReporter());
1712 RegisterStrongReporter(new ResidentReporter());
1715 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1716 RegisterStrongReporter(new VsizeMaxContiguousReporter());
1719 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1720 RegisterStrongReporter(new ResidentPeakReporter());
1723 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1724 RegisterStrongReporter(new ResidentUniqueReporter());
1727 #ifdef HAVE_PAGE_FAULT_REPORTERS
1728 RegisterStrongReporter(new PageFaultsSoftReporter());
1729 RegisterStrongReporter(new PageFaultsHardReporter());
1732 #ifdef HAVE_PRIVATE_REPORTER
1733 RegisterStrongReporter(new PrivateReporter());
1736 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1737 RegisterStrongReporter(new SystemHeapReporter());
1740 RegisterStrongReporter(new AtomTablesReporter());
1742 RegisterStrongReporter(new ThreadsReporter());
1745 RegisterStrongReporter(new DeadlockDetectorReporter());
1748 #ifdef MOZ_GECKO_PROFILER
1749 // We have to register this here rather than in profiler_init() because
1750 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1751 RegisterStrongReporter(new GeckoProfilerReporter());
1755 RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1759 RegisterStrongReporter(new WindowsAddressSpaceReporter());
1762 #ifdef MOZ_WIDGET_ANDROID
1763 RegisterStrongReporter(new AndroidMemoryReporter());
1767 nsMemoryInfoDumper::Initialize();
1770 // Report our own memory usage as well.
1771 RegisterWeakReporter(this);
1776 nsMemoryReporterManager::nsMemoryReporterManager()
1777 : mMutex("nsMemoryReporterManager::mMutex"),
1778 mIsRegistrationBlocked(false),
1779 mStrongReporters(new StrongReportersTable()),
1780 mWeakReporters(new WeakReportersTable()),
1781 mSavedStrongReporters(nullptr),
1782 mSavedWeakReporters(nullptr),
1784 mPendingProcessesState(nullptr),
1785 mPendingReportersState(nullptr)
1786 #ifdef HAVE_JEMALLOC_STATS
1788 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID
))
1793 nsMemoryReporterManager::~nsMemoryReporterManager() {
1794 delete mStrongReporters
;
1795 delete mWeakReporters
;
1796 NS_ASSERTION(!mSavedStrongReporters
, "failed to restore strong reporters");
1797 NS_ASSERTION(!mSavedWeakReporters
, "failed to restore weak reporters");
1801 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback
* aHandleReport
,
1802 nsISupports
* aData
, bool aAnonymize
) {
1803 size_t n
= MallocSizeOf(this);
1805 mozilla::MutexAutoLock
autoLock(mMutex
);
1806 n
+= mStrongReporters
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1807 n
+= mWeakReporters
->ShallowSizeOfIncludingThis(MallocSizeOf
);
1810 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP
, UNITS_BYTES
,
1811 n
, "Memory used by the memory reporter infrastructure.");
1816 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1817 # define MEMORY_REPORTING_LOG(format, ...) \
1818 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1820 # define MEMORY_REPORTING_LOG(...)
1824 nsMemoryReporterManager::GetReports(
1825 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1826 nsIFinishReportingCallback
* aFinishReporting
,
1827 nsISupports
* aFinishReportingData
, bool aAnonymize
) {
1828 return GetReportsExtended(aHandleReport
, aHandleReportData
, aFinishReporting
,
1829 aFinishReportingData
, aAnonymize
,
1830 /* minimize = */ false,
1831 /* DMDident = */ u
""_ns
);
1835 nsMemoryReporterManager::GetReportsExtended(
1836 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1837 nsIFinishReportingCallback
* aFinishReporting
,
1838 nsISupports
* aFinishReportingData
, bool aAnonymize
, bool aMinimize
,
1839 const nsAString
& aDMDDumpIdent
) {
1842 // Memory reporters are not necessarily threadsafe, so this function must
1843 // be called from the main thread.
1844 if (!NS_IsMainThread()) {
1848 uint32_t generation
= mNextGeneration
++;
1850 if (mPendingProcessesState
) {
1851 // A request is in flight. Don't start another one. And don't report
1852 // an error; just ignore it, and let the in-flight request finish.
1853 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation
,
1854 mPendingProcessesState
->mGeneration
);
1858 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation
);
1860 uint32_t concurrency
= Preferences::GetUint("memory.report_concurrency", 1);
1861 MOZ_ASSERT(concurrency
>= 1);
1862 if (concurrency
< 1) {
1865 mPendingProcessesState
= new PendingProcessesState(
1866 generation
, aAnonymize
, aMinimize
, concurrency
, aHandleReport
,
1867 aHandleReportData
, aFinishReporting
, aFinishReportingData
, aDMDDumpIdent
);
1870 nsCOMPtr
<nsIRunnable
> callback
=
1871 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1872 &nsMemoryReporterManager::StartGettingReports
);
1873 rv
= MinimizeMemoryUsage(callback
);
1875 rv
= StartGettingReports();
1881 nsresult
nsMemoryReporterManager::StartGettingReports() {
1882 PendingProcessesState
* s
= mPendingProcessesState
;
1885 // Get reports for this process.
1886 FILE* parentDMDFile
= nullptr;
1888 if (!s
->mDMDDumpIdent
.IsEmpty()) {
1889 rv
= nsMemoryInfoDumper::OpenDMDFile(s
->mDMDDumpIdent
, getpid(),
1891 if (NS_WARN_IF(NS_FAILED(rv
))) {
1892 // Proceed with the memory report as if DMD were disabled.
1893 parentDMDFile
= nullptr;
1899 GetReportsForThisProcessExtended(
1900 s
->mHandleReport
, s
->mHandleReportData
, s
->mAnonymize
, parentDMDFile
,
1901 s
->mFinishReporting
, s
->mFinishReportingData
);
1903 nsTArray
<dom::ContentParent
*> childWeakRefs
;
1904 dom::ContentParent::GetAll(childWeakRefs
);
1905 if (!childWeakRefs
.IsEmpty()) {
1906 // Request memory reports from child processes. This happens
1907 // after the parent report so that the parent's main thread will
1908 // be free to process the child reports, instead of causing them
1909 // to be buffered and consume (possibly scarce) memory.
1911 for (size_t i
= 0; i
< childWeakRefs
.Length(); ++i
) {
1912 s
->mChildrenPending
.AppendElement(childWeakRefs
[i
]);
1916 if (gfx::GPUProcessManager
* gpu
= gfx::GPUProcessManager::Get()) {
1917 if (RefPtr
<MemoryReportingProcess
> proc
= gpu
->GetProcessMemoryReporter()) {
1918 s
->mChildrenPending
.AppendElement(proc
.forget());
1922 if (RDDProcessManager
* rdd
= RDDProcessManager::Get()) {
1923 if (RefPtr
<MemoryReportingProcess
> proc
= rdd
->GetProcessMemoryReporter()) {
1924 s
->mChildrenPending
.AppendElement(proc
.forget());
1928 if (gfx::VRProcessManager
* vr
= gfx::VRProcessManager::Get()) {
1929 if (RefPtr
<MemoryReportingProcess
> proc
= vr
->GetProcessMemoryReporter()) {
1930 s
->mChildrenPending
.AppendElement(proc
.forget());
1934 if (!IsRegistrationBlocked() && net::gIOService
) {
1935 if (RefPtr
<MemoryReportingProcess
> proc
=
1936 net::gIOService
->GetSocketProcessMemoryReporter()) {
1937 s
->mChildrenPending
.AppendElement(proc
.forget());
1941 if (!IsRegistrationBlocked()) {
1942 if (RefPtr
<UtilityProcessManager
> utility
=
1943 UtilityProcessManager::GetIfExists()) {
1944 for (RefPtr
<UtilityProcessParent
>& parent
:
1945 utility
->GetAllProcessesProcessParent()) {
1946 if (RefPtr
<MemoryReportingProcess
> proc
=
1947 utility
->GetProcessMemoryReporter(parent
)) {
1948 s
->mChildrenPending
.AppendElement(proc
.forget());
1954 if (!s
->mChildrenPending
.IsEmpty()) {
1955 nsCOMPtr
<nsITimer
> timer
;
1956 rv
= NS_NewTimerWithFuncCallback(
1957 getter_AddRefs(timer
), TimeoutCallback
, this, kTimeoutLengthMS
,
1958 nsITimer::TYPE_ONE_SHOT
,
1959 "nsMemoryReporterManager::StartGettingReports");
1960 if (NS_WARN_IF(NS_FAILED(rv
))) {
1965 MOZ_ASSERT(!s
->mTimer
);
1966 s
->mTimer
.swap(timer
);
1972 void nsMemoryReporterManager::DispatchReporter(
1973 nsIMemoryReporter
* aReporter
, bool aIsAsync
,
1974 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
1976 MOZ_ASSERT(mPendingReportersState
);
1978 // Grab refs to everything used in the lambda function.
1979 RefPtr
<nsMemoryReporterManager
> self
= this;
1980 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
1981 nsCOMPtr
<nsIHandleReportCallback
> handleReport
= aHandleReport
;
1982 nsCOMPtr
<nsISupports
> handleReportData
= aHandleReportData
;
1984 nsCOMPtr
<nsIRunnable
> event
= NS_NewRunnableFunction(
1985 "nsMemoryReporterManager::DispatchReporter",
1986 [self
, reporter
, aIsAsync
, handleReport
, handleReportData
, aAnonymize
]() {
1987 reporter
->CollectReports(handleReport
, handleReportData
, aAnonymize
);
1993 NS_DispatchToMainThread(event
);
1994 mPendingReportersState
->mReportsPending
++;
1998 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1999 nsIHandleReportCallback
* aHandleReport
, nsISupports
* aHandleReportData
,
2000 bool aAnonymize
, FILE* aDMDFile
,
2001 nsIFinishReportingCallback
* aFinishReporting
,
2002 nsISupports
* aFinishReportingData
) {
2003 // Memory reporters are not necessarily threadsafe, so this function must
2004 // be called from the main thread.
2005 if (!NS_IsMainThread()) {
2009 if (NS_WARN_IF(mPendingReportersState
)) {
2010 // Report is already in progress.
2011 return NS_ERROR_IN_PROGRESS
;
2016 // Clear DMD's reportedness state before running the memory
2017 // reporters, to avoid spurious twice-reported warnings.
2018 dmd::ClearReports();
2021 MOZ_ASSERT(!aDMDFile
);
2024 mPendingReportersState
= new PendingReportersState(
2025 aFinishReporting
, aFinishReportingData
, aDMDFile
);
2028 mozilla::MutexAutoLock
autoLock(mMutex
);
2030 for (const auto& entry
: *mStrongReporters
) {
2031 DispatchReporter(entry
.GetKey(), entry
.GetData(), aHandleReport
,
2032 aHandleReportData
, aAnonymize
);
2035 for (const auto& entry
: *mWeakReporters
) {
2036 nsCOMPtr
<nsIMemoryReporter
> reporter
= entry
.GetKey();
2037 DispatchReporter(reporter
, entry
.GetData(), aHandleReport
,
2038 aHandleReportData
, aAnonymize
);
2047 nsMemoryReporterManager::EndReport() {
2048 if (--mPendingReportersState
->mReportsPending
== 0) {
2050 if (mPendingReportersState
->mDMDFile
) {
2051 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState
->mDMDFile
);
2054 if (mPendingProcessesState
) {
2055 // This is the parent process.
2056 EndProcessReport(mPendingProcessesState
->mGeneration
, true);
2058 mPendingReportersState
->mFinishReporting
->Callback(
2059 mPendingReportersState
->mFinishReportingData
);
2062 delete mPendingReportersState
;
2063 mPendingReportersState
= nullptr;
2069 nsMemoryReporterManager::PendingProcessesState
*
2070 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration
) {
2071 // Memory reporting only happens on the main thread.
2072 MOZ_RELEASE_ASSERT(NS_IsMainThread());
2074 PendingProcessesState
* s
= mPendingProcessesState
;
2077 // If we reach here, then:
2079 // - A child process reported back too late, and no subsequent request
2082 // So there's nothing to be done. Just ignore it.
2083 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
2088 if (aGeneration
!= s
->mGeneration
) {
2089 // If we reach here, a child process must have reported back, too late,
2090 // while a subsequent (higher-numbered) request is in flight. Again,
2092 MOZ_ASSERT(aGeneration
< s
->mGeneration
);
2093 MEMORY_REPORTING_LOG(
2094 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration
,
2102 // This function has no return value. If something goes wrong, there's no
2103 // clear place to report the problem to, but that's ok -- we will end up
2104 // hitting the timeout and executing TimeoutCallback().
2105 void nsMemoryReporterManager::HandleChildReport(
2106 uint32_t aGeneration
, const dom::MemoryReport
& aChildReport
) {
2107 PendingProcessesState
* s
= GetStateForGeneration(aGeneration
);
2112 // Child reports should have a non-empty process.
2113 MOZ_ASSERT(!aChildReport
.process().IsEmpty());
2115 // If the call fails, ignore and continue.
2116 s
->mHandleReport
->Callback(aChildReport
.process(), aChildReport
.path(),
2117 aChildReport
.kind(), aChildReport
.units(),
2118 aChildReport
.amount(), aChildReport
.desc(),
2119 s
->mHandleReportData
);
2123 bool nsMemoryReporterManager::StartChildReport(
2124 mozilla::MemoryReportingProcess
* aChild
,
2125 const PendingProcessesState
* aState
) {
2126 if (!aChild
->IsAlive()) {
2127 MEMORY_REPORTING_LOG(
2128 "StartChildReports (gen=%u): child exited before"
2129 " its report was started\n",
2130 aState
->mGeneration
);
2134 Maybe
<mozilla::ipc::FileDescriptor
> dmdFileDesc
;
2136 if (!aState
->mDMDDumpIdent
.IsEmpty()) {
2137 FILE* dmdFile
= nullptr;
2138 nsresult rv
= nsMemoryInfoDumper::OpenDMDFile(aState
->mDMDDumpIdent
,
2139 aChild
->Pid(), &dmdFile
);
2140 if (NS_WARN_IF(NS_FAILED(rv
))) {
2141 // Proceed with the memory report as if DMD were disabled.
2145 dmdFileDesc
= Some(mozilla::ipc::FILEToFileDescriptor(dmdFile
));
2150 return aChild
->SendRequestMemoryReport(
2151 aState
->mGeneration
, aState
->mAnonymize
, aState
->mMinimize
, dmdFileDesc
);
2154 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration
,
2156 PendingProcessesState
* s
= GetStateForGeneration(aGeneration
);
2161 MOZ_ASSERT(s
->mNumProcessesRunning
> 0);
2162 s
->mNumProcessesRunning
--;
2163 s
->mNumProcessesCompleted
++;
2164 MEMORY_REPORTING_LOG(
2165 "HandleChildReports (aGen=%u): process %u %s"
2166 " (%u running, %u pending)\n",
2167 aGeneration
, s
->mNumProcessesCompleted
,
2168 aSuccess
? "completed" : "exited during report", s
->mNumProcessesRunning
,
2169 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2171 // Start pending children up to the concurrency limit.
2172 while (s
->mNumProcessesRunning
< s
->mConcurrencyLimit
&&
2173 !s
->mChildrenPending
.IsEmpty()) {
2174 // Pop last element from s->mChildrenPending
2175 const RefPtr
<MemoryReportingProcess
> nextChild
=
2176 s
->mChildrenPending
.PopLastElement();
2177 // Start report (if the child is still alive).
2178 if (StartChildReport(nextChild
, s
)) {
2179 ++s
->mNumProcessesRunning
;
2180 MEMORY_REPORTING_LOG(
2181 "HandleChildReports (aGen=%u): started child report"
2182 " (%u running, %u pending)\n",
2183 aGeneration
, s
->mNumProcessesRunning
,
2184 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2188 // If all the child processes (if any) have reported, we can cancel
2189 // the timer (if started) and finish up. Otherwise, just return.
2190 if (s
->mNumProcessesRunning
== 0) {
2191 MOZ_ASSERT(s
->mChildrenPending
.IsEmpty());
2193 s
->mTimer
->Cancel();
2200 void nsMemoryReporterManager::TimeoutCallback(nsITimer
* aTimer
, void* aData
) {
2201 nsMemoryReporterManager
* mgr
= static_cast<nsMemoryReporterManager
*>(aData
);
2202 PendingProcessesState
* s
= mgr
->mPendingProcessesState
;
2204 // Release assert because: if the pointer is null we're about to
2205 // crash regardless of DEBUG, and this way the compiler doesn't
2206 // complain about unused variables.
2207 MOZ_RELEASE_ASSERT(s
, "mgr->mPendingProcessesState");
2208 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2209 s
->mGeneration
, s
->mNumProcessesRunning
,
2210 static_cast<unsigned>(s
->mChildrenPending
.Length()));
2212 // We don't bother sending any kind of cancellation message to the child
2213 // processes that haven't reported back.
2214 mgr
->FinishReporting();
2217 nsresult
nsMemoryReporterManager::FinishReporting() {
2218 // Memory reporting only happens on the main thread.
2219 if (!NS_IsMainThread()) {
2223 MOZ_ASSERT(mPendingProcessesState
);
2224 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2225 mPendingProcessesState
->mGeneration
,
2226 mPendingProcessesState
->mNumProcessesCompleted
);
2228 // Call this before deleting |mPendingProcessesState|. That way, if
2229 // |mFinishReportData| calls GetReports(), it will silently abort, as
2231 nsresult rv
= mPendingProcessesState
->mFinishReporting
->Callback(
2232 mPendingProcessesState
->mFinishReportingData
);
2234 delete mPendingProcessesState
;
2235 mPendingProcessesState
= nullptr;
2239 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2240 uint32_t aGeneration
, bool aAnonymize
, bool aMinimize
,
2241 uint32_t aConcurrencyLimit
, nsIHandleReportCallback
* aHandleReport
,
2242 nsISupports
* aHandleReportData
,
2243 nsIFinishReportingCallback
* aFinishReporting
,
2244 nsISupports
* aFinishReportingData
, const nsAString
& aDMDDumpIdent
)
2245 : mGeneration(aGeneration
),
2246 mAnonymize(aAnonymize
),
2247 mMinimize(aMinimize
),
2248 mNumProcessesRunning(1), // reporting starts with the parent
2249 mNumProcessesCompleted(0),
2250 mConcurrencyLimit(aConcurrencyLimit
),
2251 mHandleReport(aHandleReport
),
2252 mHandleReportData(aHandleReportData
),
2253 mFinishReporting(aFinishReporting
),
2254 mFinishReportingData(aFinishReportingData
),
2255 mDMDDumpIdent(aDMDDumpIdent
) {}
2257 static void CrashIfRefcountIsZero(nsISupports
* aObj
) {
2258 // This will probably crash if the object's refcount is 0.
2259 uint32_t refcnt
= NS_ADDREF(aObj
);
2261 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2266 nsresult
nsMemoryReporterManager::RegisterReporterHelper(
2267 nsIMemoryReporter
* aReporter
, bool aForce
, bool aStrong
, bool aIsAsync
) {
2268 // This method is thread-safe.
2269 mozilla::MutexAutoLock
autoLock(mMutex
);
2271 if (mIsRegistrationBlocked
&& !aForce
) {
2272 return NS_ERROR_FAILURE
;
2275 if (mStrongReporters
->Contains(aReporter
) ||
2276 mWeakReporters
->Contains(aReporter
)) {
2277 return NS_ERROR_FAILURE
;
2280 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2281 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2282 // addref'ed and released |aReporter| before finally addref'ing it for
2283 // good, it would free aReporter! The kung fu death grip could itself be
2284 // problematic if PutEntry didn't addref |aReporter| (because then when the
2285 // death grip goes out of scope, we would delete the reporter). In debug
2286 // mode, we check that this doesn't happen.
2288 // If |aStrong| is false, we require that |aReporter| have a non-zero
2292 nsCOMPtr
<nsIMemoryReporter
> kungFuDeathGrip
= aReporter
;
2293 mStrongReporters
->InsertOrUpdate(aReporter
, aIsAsync
);
2294 CrashIfRefcountIsZero(aReporter
);
2296 CrashIfRefcountIsZero(aReporter
);
2297 nsCOMPtr
<nsIXPConnectWrappedJS
> jsComponent
= do_QueryInterface(aReporter
);
2299 // We cannot allow non-native reporters (WrappedJS), since we'll be
2300 // holding onto a raw pointer, which would point to the wrapper,
2301 // and that wrapper is likely to go away as soon as this register
2302 // call finishes. This would then lead to subsequent crashes in
2303 // CollectReports().
2304 return NS_ERROR_XPC_BAD_CONVERT_JS
;
2306 mWeakReporters
->InsertOrUpdate(aReporter
, aIsAsync
);
2313 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter
* aReporter
) {
2314 return RegisterReporterHelper(aReporter
, /* force = */ false,
2315 /* strong = */ true,
2316 /* async = */ false);
2320 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2321 nsIMemoryReporter
* aReporter
) {
2322 return RegisterReporterHelper(aReporter
, /* force = */ false,
2323 /* strong = */ true,
2324 /* async = */ true);
2328 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter
* aReporter
) {
2329 return RegisterReporterHelper(aReporter
, /* force = */ false,
2330 /* strong = */ false,
2331 /* async = */ false);
2335 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2336 nsIMemoryReporter
* aReporter
) {
2337 return RegisterReporterHelper(aReporter
, /* force = */ false,
2338 /* strong = */ false,
2339 /* async = */ true);
2343 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2344 nsIMemoryReporter
* aReporter
) {
2345 return RegisterReporterHelper(aReporter
, /* force = */ true,
2346 /* strong = */ true,
2347 /* async = */ false);
2351 nsMemoryReporterManager::UnregisterStrongReporter(
2352 nsIMemoryReporter
* aReporter
) {
2353 // This method is thread-safe.
2354 mozilla::MutexAutoLock
autoLock(mMutex
);
2356 MOZ_ASSERT(!mWeakReporters
->Contains(aReporter
));
2358 if (mStrongReporters
->Contains(aReporter
)) {
2359 mStrongReporters
->Remove(aReporter
);
2363 // We don't register new reporters when the block is in place, but we do
2364 // unregister existing reporters. This is so we don't keep holding strong
2365 // references that these reporters aren't expecting (which can keep them
2366 // alive longer than intended).
2367 if (mSavedStrongReporters
&& mSavedStrongReporters
->Contains(aReporter
)) {
2368 mSavedStrongReporters
->Remove(aReporter
);
2372 return NS_ERROR_FAILURE
;
2376 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter
* aReporter
) {
2377 // This method is thread-safe.
2378 mozilla::MutexAutoLock
autoLock(mMutex
);
2380 MOZ_ASSERT(!mStrongReporters
->Contains(aReporter
));
2382 if (mWeakReporters
->Contains(aReporter
)) {
2383 mWeakReporters
->Remove(aReporter
);
2387 // We don't register new reporters when the block is in place, but we do
2388 // unregister existing reporters. This is so we don't keep holding weak
2389 // references that the old reporters aren't expecting (which can end up as
2390 // dangling pointers that lead to use-after-frees).
2391 if (mSavedWeakReporters
&& mSavedWeakReporters
->Contains(aReporter
)) {
2392 mSavedWeakReporters
->Remove(aReporter
);
2396 return NS_ERROR_FAILURE
;
2400 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2401 // This method is thread-safe.
2402 mozilla::MutexAutoLock
autoLock(mMutex
);
2403 if (mIsRegistrationBlocked
) {
2404 return NS_ERROR_FAILURE
;
2406 mIsRegistrationBlocked
= true;
2408 // Hide the existing reporters, saving them for later restoration.
2409 MOZ_ASSERT(!mSavedStrongReporters
);
2410 MOZ_ASSERT(!mSavedWeakReporters
);
2411 mSavedStrongReporters
= mStrongReporters
;
2412 mSavedWeakReporters
= mWeakReporters
;
2413 mStrongReporters
= new StrongReportersTable();
2414 mWeakReporters
= new WeakReportersTable();
2420 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2421 // This method is thread-safe.
2422 mozilla::MutexAutoLock
autoLock(mMutex
);
2423 if (!mIsRegistrationBlocked
) {
2424 return NS_ERROR_FAILURE
;
2427 // Banish the current reporters, and restore the hidden ones.
2428 delete mStrongReporters
;
2429 delete mWeakReporters
;
2430 mStrongReporters
= mSavedStrongReporters
;
2431 mWeakReporters
= mSavedWeakReporters
;
2432 mSavedStrongReporters
= nullptr;
2433 mSavedWeakReporters
= nullptr;
2435 mIsRegistrationBlocked
= false;
2440 nsMemoryReporterManager::GetVsize(int64_t* aVsize
) {
2441 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2442 return VsizeDistinguishedAmount(aVsize
);
2445 return NS_ERROR_NOT_AVAILABLE
;
2450 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount
) {
2451 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2452 return VsizeMaxContiguousDistinguishedAmount(aAmount
);
2455 return NS_ERROR_NOT_AVAILABLE
;
2460 nsMemoryReporterManager::GetResident(int64_t* aAmount
) {
2461 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2462 return ResidentDistinguishedAmount(aAmount
);
2465 return NS_ERROR_NOT_AVAILABLE
;
2470 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount
) {
2471 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2472 return ResidentFastDistinguishedAmount(aAmount
);
2475 return NS_ERROR_NOT_AVAILABLE
;
2480 int64_t nsMemoryReporterManager::ResidentFast() {
2481 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2483 nsresult rv
= ResidentFastDistinguishedAmount(&amount
);
2484 NS_ENSURE_SUCCESS(rv
, 0);
2492 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount
) {
2493 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2494 return ResidentPeakDistinguishedAmount(aAmount
);
2497 return NS_ERROR_NOT_AVAILABLE
;
2502 int64_t nsMemoryReporterManager::ResidentPeak() {
2503 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2505 nsresult rv
= ResidentPeakDistinguishedAmount(&amount
);
2506 NS_ENSURE_SUCCESS(rv
, 0);
2514 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount
) {
2515 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2516 return ResidentUniqueDistinguishedAmount(aAmount
);
2519 return NS_ERROR_NOT_AVAILABLE
;
2525 int64_t nsMemoryReporterManager::PhysicalFootprint(mach_port_t aPort
) {
2527 nsresult rv
= PhysicalFootprintAmount(&amount
, aPort
);
2528 NS_ENSURE_SUCCESS(rv
, 0);
2545 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2548 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess
) {
2550 nsresult rv
= ResidentUniqueDistinguishedAmount(&amount
, aProcess
);
2551 NS_ENSURE_SUCCESS(rv
, 0);
2558 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg
) {
2559 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2561 nsresult rv
= ResidentUniqueDistinguishedAmount(&amount
);
2562 NS_ENSURE_SUCCESS(rv
, 0);
2569 #endif // XP_{WIN, MACOSX, LINUX, *}
2571 #ifdef HAVE_JEMALLOC_STATS
2573 size_t nsMemoryReporterManager::HeapAllocated(const jemalloc_stats_t
& aStats
) {
2574 return aStats
.allocated
;
2579 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount
) {
2580 #ifdef HAVE_JEMALLOC_STATS
2581 jemalloc_stats_t stats
;
2582 jemalloc_stats(&stats
);
2583 *aAmount
= HeapAllocated(stats
);
2587 return NS_ERROR_NOT_AVAILABLE
;
2591 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2593 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount
) {
2594 #ifdef HAVE_JEMALLOC_STATS
2595 jemalloc_stats_t stats
;
2596 jemalloc_stats(&stats
);
2597 *aAmount
= HeapOverheadFraction(stats
);
2601 return NS_ERROR_NOT_AVAILABLE
;
2605 [[nodiscard
]] static nsresult
GetInfallibleAmount(InfallibleAmountFn aAmountFn
,
2608 *aAmount
= aAmountFn();
2612 return NS_ERROR_NOT_AVAILABLE
;
2616 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount
) {
2617 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeGCHeap
, aAmount
);
2621 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount
) {
2622 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeTemporaryPeak
, aAmount
);
2626 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount
) {
2627 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeCompartmentsSystem
,
2632 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount
) {
2633 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeCompartmentsUser
,
2638 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount
) {
2639 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeRealmsSystem
, aAmount
);
2643 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount
) {
2644 return GetInfallibleAmount(mAmountFns
.mJSMainRuntimeRealmsUser
, aAmount
);
2648 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount
) {
2649 return GetInfallibleAmount(mAmountFns
.mImagesContentUsedUncompressed
,
2654 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount
) {
2655 return GetInfallibleAmount(mAmountFns
.mStorageSQLite
, aAmount
);
2659 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount
) {
2660 return GetInfallibleAmount(mAmountFns
.mLowMemoryEventsPhysical
, aAmount
);
2664 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount
) {
2665 return GetInfallibleAmount(mAmountFns
.mGhostWindows
, aAmount
);
2669 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount
) {
2670 #ifdef HAVE_PAGE_FAULT_REPORTERS
2671 return PageFaultsHardDistinguishedAmount(aAmount
);
2674 return NS_ERROR_NOT_AVAILABLE
;
2679 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas
) {
2680 void* p
= malloc(16);
2682 return NS_ERROR_OUT_OF_MEMORY
;
2684 size_t usable
= moz_malloc_usable_size(p
);
2686 *aHas
= !!(usable
> 0);
2691 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled
) {
2695 *aIsEnabled
= false;
2701 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning
) {
2703 *aIsRunning
= dmd::IsRunning();
2705 *aIsRunning
= false;
2713 * This runnable lets us implement
2714 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2715 * notification, spin the event loop, and repeat this process a few times.
2717 * When this sequence finishes, we invoke the callback function passed to the
2718 * runnable's constructor.
2720 class MinimizeMemoryUsageRunnable
: public Runnable
{
2722 explicit MinimizeMemoryUsageRunnable(nsIRunnable
* aCallback
)
2723 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2724 mCallback(aCallback
),
2725 mRemainingIters(sNumIters
) {}
2727 NS_IMETHOD
Run() override
{
2728 nsCOMPtr
<nsIObserverService
> os
= services::GetObserverService();
2730 return NS_ERROR_FAILURE
;
2733 if (mRemainingIters
== 0) {
2734 os
->NotifyObservers(nullptr, "after-minimize-memory-usage",
2735 u
"MinimizeMemoryUsageRunnable");
2742 os
->NotifyObservers(nullptr, "memory-pressure", u
"heap-minimize");
2744 NS_DispatchToMainThread(this);
2750 // Send sNumIters heap-minimize notifications, spinning the event
2751 // loop after each notification (see bug 610166 comment 12 for an
2752 // explanation), because one notification doesn't cut it.
2753 static const uint32_t sNumIters
= 3;
2755 nsCOMPtr
<nsIRunnable
> mCallback
;
2756 uint32_t mRemainingIters
;
2762 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable
* aCallback
) {
2763 RefPtr
<MinimizeMemoryUsageRunnable
> runnable
=
2764 new MinimizeMemoryUsageRunnable(aCallback
);
2766 return NS_DispatchToMainThread(runnable
);
2770 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy
* aTopWindow
,
2771 int64_t* aJSObjectsSize
,
2772 int64_t* aJSStringsSize
,
2773 int64_t* aJSOtherSize
, int64_t* aDomSize
,
2774 int64_t* aStyleSize
, int64_t* aOtherSize
,
2775 int64_t* aTotalSize
, double* aJSMilliseconds
,
2776 double* aNonJSMilliseconds
) {
2777 nsCOMPtr
<nsIGlobalObject
> global
= do_QueryInterface(aTopWindow
);
2778 auto* piWindow
= nsPIDOMWindowOuter::From(aTopWindow
);
2779 if (NS_WARN_IF(!global
) || NS_WARN_IF(!piWindow
)) {
2780 return NS_ERROR_FAILURE
;
2783 TimeStamp t1
= TimeStamp::Now();
2785 // Measure JS memory consumption (and possibly some non-JS consumption, via
2786 // |jsPrivateSize|).
2787 size_t jsObjectsSize
, jsStringsSize
, jsPrivateSize
, jsOtherSize
;
2788 nsresult rv
= mSizeOfTabFns
.mJS(global
->GetGlobalJSObject(), &jsObjectsSize
,
2789 &jsStringsSize
, &jsPrivateSize
, &jsOtherSize
);
2790 if (NS_WARN_IF(NS_FAILED(rv
))) {
2794 TimeStamp t2
= TimeStamp::Now();
2796 // Measure non-JS memory consumption.
2797 size_t domSize
, styleSize
, otherSize
;
2798 rv
= mSizeOfTabFns
.mNonJS(piWindow
, &domSize
, &styleSize
, &otherSize
);
2799 if (NS_WARN_IF(NS_FAILED(rv
))) {
2803 TimeStamp t3
= TimeStamp::Now();
2809 *aTotalSize += (n); \
2811 DO(aJSObjectsSize
, jsObjectsSize
);
2812 DO(aJSStringsSize
, jsStringsSize
);
2813 DO(aJSOtherSize
, jsOtherSize
);
2814 DO(aDomSize
, jsPrivateSize
+ domSize
);
2815 DO(aStyleSize
, styleSize
);
2816 DO(aOtherSize
, otherSize
);
2819 *aJSMilliseconds
= (t2
- t1
).ToMilliseconds();
2820 *aNonJSMilliseconds
= (t3
- t2
).ToMilliseconds();
2827 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2828 RefPtr<nsMemoryReporterManager> mgr = \
2829 nsMemoryReporterManager::GetOrCreate(); \
2831 return NS_ERROR_FAILURE; \
2834 nsresult
RegisterStrongMemoryReporter(nsIMemoryReporter
* aReporter
) {
2835 // Hold a strong reference to the argument to make sure it gets released if
2836 // we return early below.
2837 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
2838 GET_MEMORY_REPORTER_MANAGER(mgr
)
2839 return mgr
->RegisterStrongReporter(reporter
);
2842 nsresult
RegisterStrongAsyncMemoryReporter(nsIMemoryReporter
* aReporter
) {
2843 // Hold a strong reference to the argument to make sure it gets released if
2844 // we return early below.
2845 nsCOMPtr
<nsIMemoryReporter
> reporter
= aReporter
;
2846 GET_MEMORY_REPORTER_MANAGER(mgr
)
2847 return mgr
->RegisterStrongAsyncReporter(reporter
);
2850 nsresult
RegisterWeakMemoryReporter(nsIMemoryReporter
* aReporter
) {
2851 GET_MEMORY_REPORTER_MANAGER(mgr
)
2852 return mgr
->RegisterWeakReporter(aReporter
);
2855 nsresult
RegisterWeakAsyncMemoryReporter(nsIMemoryReporter
* aReporter
) {
2856 GET_MEMORY_REPORTER_MANAGER(mgr
)
2857 return mgr
->RegisterWeakAsyncReporter(aReporter
);
2860 nsresult
UnregisterStrongMemoryReporter(nsIMemoryReporter
* aReporter
) {
2861 GET_MEMORY_REPORTER_MANAGER(mgr
)
2862 return mgr
->UnregisterStrongReporter(aReporter
);
2865 nsresult
UnregisterWeakMemoryReporter(nsIMemoryReporter
* aReporter
) {
2866 GET_MEMORY_REPORTER_MANAGER(mgr
)
2867 return mgr
->UnregisterWeakReporter(aReporter
);
2870 // Macro for generating functions that register distinguished amount functions
2871 // with the memory reporter manager.
2872 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2873 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2874 GET_MEMORY_REPORTER_MANAGER(mgr) \
2875 mgr->mAmountFns.m##name = aAmountFn; \
2879 // Macro for generating functions that unregister distinguished amount
2880 // functions with the memory reporter manager.
2881 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2882 nsresult Unregister##name##DistinguishedAmount() { \
2883 GET_MEMORY_REPORTER_MANAGER(mgr) \
2884 mgr->mAmountFns.m##name = nullptr; \
2888 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeGCHeap
)
2889 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeTemporaryPeak
)
2890 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
,
2891 JSMainRuntimeCompartmentsSystem
)
2892 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeCompartmentsUser
)
2893 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeRealmsSystem
)
2894 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, JSMainRuntimeRealmsUser
)
2896 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, ImagesContentUsedUncompressed
)
2897 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed
)
2899 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, StorageSQLite
)
2900 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite
)
2902 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, LowMemoryEventsPhysical
)
2904 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible
, GhostWindows
)
2906 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2907 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2909 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2910 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2911 GET_MEMORY_REPORTER_MANAGER(mgr) \
2912 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2916 DEFINE_REGISTER_SIZE_OF_TAB(JS
);
2917 DEFINE_REGISTER_SIZE_OF_TAB(NonJS
);
2919 #undef DEFINE_REGISTER_SIZE_OF_TAB
2921 #undef GET_MEMORY_REPORTER_MANAGER
2923 } // namespace mozilla