Bug 1822331 [wpt PR 38990] - [@scope] Enable implicit :scope and relative selectors...
[gecko.git] / xpcom / base / nsMemoryReporterManager.cpp
blob723cc6753205fab3f9576343e44a0d2a9d161ef0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
10 #include "nsCOMPtr.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
15 #include "nsITimer.h"
16 #include "nsThreadUtils.h"
17 #include "nsPIDOMWindow.h"
18 #include "nsIObserverService.h"
19 #include "nsIOService.h"
20 #include "nsIGlobalObject.h"
21 #include "nsIXPConnect.h"
22 #ifdef MOZ_GECKO_PROFILER
23 # include "GeckoProfilerReporter.h"
24 #endif
25 #if defined(XP_UNIX) || defined(MOZ_DMD)
26 # include "nsMemoryInfoDumper.h"
27 #endif
28 #include "nsNetCID.h"
29 #include "nsThread.h"
30 #include "VRProcessManager.h"
31 #include "mozilla/Attributes.h"
32 #include "mozilla/MemoryReportingProcess.h"
33 #include "mozilla/PodOperations.h"
34 #include "mozilla/Preferences.h"
35 #include "mozilla/RDDProcessManager.h"
36 #include "mozilla/ResultExtensions.h"
37 #include "mozilla/Services.h"
38 #include "mozilla/Telemetry.h"
39 #include "mozilla/UniquePtrExtensions.h"
40 #include "mozilla/dom/MemoryReportTypes.h"
41 #include "mozilla/dom/ContentParent.h"
42 #include "mozilla/gfx/GPUProcessManager.h"
43 #include "mozilla/ipc/UtilityProcessManager.h"
44 #include "mozilla/ipc/FileDescriptorUtils.h"
46 #ifdef MOZ_WIDGET_ANDROID
47 # include "mozilla/java/GeckoAppShellWrappers.h"
48 # include "mozilla/jni/Utils.h"
49 #endif
51 #ifdef XP_WIN
52 # include "mozilla/MemoryInfo.h"
54 # include <process.h>
55 # ifndef getpid
56 # define getpid _getpid
57 # endif
58 #else
59 # include <unistd.h>
60 #endif
62 using namespace mozilla;
63 using namespace mozilla::ipc;
64 using namespace dom;
66 #if defined(MOZ_MEMORY)
67 # define HAVE_JEMALLOC_STATS 1
68 # include "mozmemory.h"
69 #endif // MOZ_MEMORY
71 #if defined(XP_LINUX)
73 # include "mozilla/MemoryMapping.h"
75 # include <malloc.h>
76 # include <string.h>
77 # include <stdlib.h>
79 [[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
80 // There are more than two fields, but we're only interested in the first
81 // two.
82 static const int MAX_FIELD = 2;
83 size_t fields[MAX_FIELD];
84 MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
85 FILE* f = fopen("/proc/self/statm", "r");
86 if (f) {
87 int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
88 fclose(f);
89 if (nread == MAX_FIELD) {
90 *aN = fields[aField] * getpagesize();
91 return NS_OK;
94 return NS_ERROR_FAILURE;
97 [[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
98 // You might be tempted to calculate USS by subtracting the "shared" value
99 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
100 // statm's "shared" value actually counts pages backed by files, which has
101 // little to do with whether the pages are actually shared. /proc/self/smaps
102 // on the other hand appears to give us the correct information.
104 nsTArray<MemoryMapping> mappings(1024);
105 MOZ_TRY(GetMemoryMappings(mappings, aPid));
107 int64_t amount = 0;
108 for (auto& mapping : mappings) {
109 amount += mapping.Private_Clean();
110 amount += mapping.Private_Dirty();
112 *aN = amount;
113 return NS_OK;
116 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
117 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
118 return GetProcSelfStatmField(0, aN);
121 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
122 return GetProcSelfStatmField(1, aN);
125 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
126 return ResidentDistinguishedAmount(aN);
129 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
130 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
131 int64_t* aN, pid_t aPid = 0) {
132 return GetProcSelfSmapsPrivate(aN, aPid);
135 # ifdef HAVE_MALLINFO
136 # define HAVE_SYSTEM_HEAP_REPORTER 1
137 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
138 struct mallinfo info = mallinfo();
140 // The documentation in the glibc man page makes it sound like |uordblks|
141 // would suffice, but that only gets the small allocations that are put in
142 // the brk heap. We need |hblkhd| as well to get the larger allocations
143 // that are mmapped.
145 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
146 // unreliable if memory usage gets high. However, the system heap size on
147 // Linux should usually be zero (so long as jemalloc is enabled) so that
148 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
149 // adding them to provide a small amount of extra overflow protection.
150 *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
151 return NS_OK;
153 # endif
155 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
156 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
158 # include <sys/param.h>
159 # include <sys/sysctl.h>
160 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
161 defined(__FreeBSD_kernel__)
162 # include <sys/user.h>
163 # endif
165 # include <unistd.h>
167 # if defined(__NetBSD__)
168 # undef KERN_PROC
169 # define KERN_PROC KERN_PROC2
170 # define KINFO_PROC struct kinfo_proc2
171 # else
172 # define KINFO_PROC struct kinfo_proc
173 # endif
175 # if defined(__DragonFly__)
176 # define KP_SIZE(kp) (kp.kp_vm_map_size)
177 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
178 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
179 # define KP_SIZE(kp) (kp.ki_size)
180 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
181 # elif defined(__NetBSD__)
182 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
183 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
184 # elif defined(__OpenBSD__)
185 # define KP_SIZE(kp) \
186 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
187 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
188 # endif
190 [[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
191 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
192 static LazyLogModule sPledgeLog("SandboxPledge");
193 MOZ_LOG(sPledgeLog, LogLevel::Debug,
194 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
195 return NS_ERROR_FAILURE;
196 # endif
197 int mib[] = {
198 CTL_KERN,
199 KERN_PROC,
200 KERN_PROC_PID,
201 getpid(),
202 # if defined(__NetBSD__) || defined(__OpenBSD__)
203 sizeof(KINFO_PROC),
205 # endif
207 u_int miblen = sizeof(mib) / sizeof(mib[0]);
208 size_t size = sizeof(KINFO_PROC);
209 if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
210 return NS_ERROR_FAILURE;
212 return NS_OK;
215 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
216 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
217 KINFO_PROC proc;
218 nsresult rv = GetKinfoProcSelf(&proc);
219 if (NS_SUCCEEDED(rv)) {
220 *aN = KP_SIZE(proc);
222 return rv;
225 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
226 KINFO_PROC proc;
227 nsresult rv = GetKinfoProcSelf(&proc);
228 if (NS_SUCCEEDED(rv)) {
229 *aN = KP_RSS(proc);
231 return rv;
234 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
235 return ResidentDistinguishedAmount(aN);
238 # ifdef __FreeBSD__
239 # include <libutil.h>
240 # include <algorithm>
242 [[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
243 uint64_t* aMaxreg) {
244 int cnt;
245 struct kinfo_vmentry* vmmap;
246 struct kinfo_vmentry* kve;
247 if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
248 return NS_ERROR_FAILURE;
250 if (aPrss) {
251 *aPrss = 0;
253 if (aMaxreg) {
254 *aMaxreg = 0;
257 for (int i = 0; i < cnt; i++) {
258 kve = &vmmap[i];
259 if (aPrss) {
260 *aPrss += kve->kve_private_resident;
262 if (aMaxreg) {
263 *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
267 free(vmmap);
268 return NS_OK;
271 # define HAVE_PRIVATE_REPORTER 1
272 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
273 int64_t priv;
274 nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
275 NS_ENSURE_SUCCESS(rv, rv);
276 *aN = priv * getpagesize();
277 return NS_OK;
280 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
281 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
282 int64_t* aN) {
283 uint64_t biggestRegion;
284 nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
285 if (NS_SUCCEEDED(rv)) {
286 *aN = biggestRegion;
288 return NS_OK;
290 # endif // FreeBSD
292 #elif defined(SOLARIS)
294 # include <procfs.h>
295 # include <fcntl.h>
296 # include <unistd.h>
298 static void XMappingIter(int64_t& aVsize, int64_t& aResident,
299 int64_t& aShared) {
300 aVsize = -1;
301 aResident = -1;
302 aShared = -1;
303 int mapfd = open("/proc/self/xmap", O_RDONLY);
304 struct stat st;
305 prxmap_t* prmapp = nullptr;
306 if (mapfd >= 0) {
307 if (!fstat(mapfd, &st)) {
308 int nmap = st.st_size / sizeof(prxmap_t);
309 while (1) {
310 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
311 // prior to the release of Solaris 11.
312 // Here is a workaround for it.
313 nmap *= 2;
314 prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
315 if (!prmapp) {
316 // out of memory
317 break;
319 int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
320 if (n < 0) {
321 break;
323 if (nmap >= n / sizeof(prxmap_t)) {
324 aVsize = 0;
325 aResident = 0;
326 aShared = 0;
327 for (int i = 0; i < n / sizeof(prxmap_t); i++) {
328 aVsize += prmapp[i].pr_size;
329 aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
330 if (prmapp[i].pr_mflags & MA_SHARED) {
331 aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
334 break;
336 free(prmapp);
338 free(prmapp);
340 close(mapfd);
344 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
345 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
346 int64_t vsize, resident, shared;
347 XMappingIter(vsize, resident, shared);
348 if (vsize == -1) {
349 return NS_ERROR_FAILURE;
351 *aN = vsize;
352 return NS_OK;
355 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
356 int64_t vsize, resident, shared;
357 XMappingIter(vsize, resident, shared);
358 if (resident == -1) {
359 return NS_ERROR_FAILURE;
361 *aN = resident;
362 return NS_OK;
365 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
366 return ResidentDistinguishedAmount(aN);
369 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
370 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
371 int64_t vsize, resident, shared;
372 XMappingIter(vsize, resident, shared);
373 if (resident == -1) {
374 return NS_ERROR_FAILURE;
376 *aN = resident - shared;
377 return NS_OK;
380 #elif defined(XP_MACOSX)
382 # include <mach/mach_init.h>
383 # include <mach/mach_vm.h>
384 # include <mach/shared_region.h>
385 # include <mach/task.h>
386 # include <sys/sysctl.h>
388 [[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
389 mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
390 kern_return_t kr =
391 task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
392 return kr == KERN_SUCCESS;
395 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
396 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
397 // it, so we might as well too.
398 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
399 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
400 task_basic_info ti;
401 if (!GetTaskBasicInfo(&ti)) {
402 return NS_ERROR_FAILURE;
404 *aN = ti.virtual_size;
405 return NS_OK;
408 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
409 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
410 // an accurate result. The OS will take away MADV_FREE'd pages when there's
411 // memory pressure, so ideally, they shouldn't count against our RSS.
413 // Purging these pages can take a long time for some users (see bug 789975),
414 // so we provide the option to get the RSS without purging first.
415 [[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
416 bool aDoPurge) {
417 # ifdef HAVE_JEMALLOC_STATS
418 if (aDoPurge) {
419 Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
420 jemalloc_purge_freed_pages();
422 # endif
424 task_basic_info ti;
425 if (!GetTaskBasicInfo(&ti)) {
426 return NS_ERROR_FAILURE;
428 *aN = ti.resident_size;
429 return NS_OK;
432 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
433 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
436 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
437 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
440 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
442 static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
443 mach_vm_address_t base;
444 mach_vm_address_t size;
446 switch (aType) {
447 case CPU_TYPE_ARM:
448 base = SHARED_REGION_BASE_ARM;
449 size = SHARED_REGION_SIZE_ARM;
450 break;
451 case CPU_TYPE_ARM64:
452 base = SHARED_REGION_BASE_ARM64;
453 size = SHARED_REGION_SIZE_ARM64;
454 break;
455 case CPU_TYPE_I386:
456 base = SHARED_REGION_BASE_I386;
457 size = SHARED_REGION_SIZE_I386;
458 break;
459 case CPU_TYPE_X86_64:
460 base = SHARED_REGION_BASE_X86_64;
461 size = SHARED_REGION_SIZE_X86_64;
462 break;
463 default:
464 return false;
467 return base <= aAddr && aAddr < (base + size);
470 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
471 int64_t* aN, mach_port_t aPort = 0) {
472 if (!aN) {
473 return NS_ERROR_FAILURE;
476 cpu_type_t cpu_type;
477 size_t len = sizeof(cpu_type);
478 if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
479 return NS_ERROR_FAILURE;
482 // Roughly based on libtop_update_vm_regions in
483 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
484 size_t privatePages = 0;
485 mach_vm_size_t topSize = 0;
486 for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += topSize) {
487 vm_region_top_info_data_t topInfo;
488 mach_msg_type_number_t topInfoCount = VM_REGION_TOP_INFO_COUNT;
489 mach_port_t topObjectName;
491 kern_return_t kr = mach_vm_region(
492 aPort ? aPort : mach_task_self(), &addr, &topSize, VM_REGION_TOP_INFO,
493 reinterpret_cast<vm_region_info_t>(&topInfo), &topInfoCount,
494 &topObjectName);
495 if (kr == KERN_INVALID_ADDRESS) {
496 // Done iterating VM regions.
497 break;
498 } else if (kr != KERN_SUCCESS) {
499 return NS_ERROR_FAILURE;
502 if (InSharedRegion(addr, cpu_type) && topInfo.share_mode != SM_PRIVATE) {
503 continue;
506 switch (topInfo.share_mode) {
507 case SM_LARGE_PAGE:
508 // NB: Large pages are not shareable and always resident.
509 case SM_PRIVATE:
510 privatePages += topInfo.private_pages_resident;
511 privatePages += topInfo.shared_pages_resident;
512 break;
513 case SM_COW:
514 privatePages += topInfo.private_pages_resident;
515 if (topInfo.ref_count == 1) {
516 // Treat copy-on-write pages as private if they only have one
517 // reference.
518 privatePages += topInfo.shared_pages_resident;
520 break;
521 case SM_SHARED: {
522 // Using mprotect() or similar to protect a page in the middle of a
523 // mapping can create aliased mappings. They look like shared mappings
524 // to the VM_REGION_TOP_INFO interface, so re-check with
525 // VM_REGION_EXTENDED_INFO.
527 mach_vm_size_t exSize = 0;
528 vm_region_extended_info_data_t exInfo;
529 mach_msg_type_number_t exInfoCount = VM_REGION_EXTENDED_INFO_COUNT;
530 mach_port_t exObjectName;
531 kr = mach_vm_region(aPort ? aPort : mach_task_self(), &addr, &exSize,
532 VM_REGION_EXTENDED_INFO,
533 reinterpret_cast<vm_region_info_t>(&exInfo),
534 &exInfoCount, &exObjectName);
535 if (kr == KERN_INVALID_ADDRESS) {
536 // Done iterating VM regions.
537 break;
538 } else if (kr != KERN_SUCCESS) {
539 return NS_ERROR_FAILURE;
542 if (exInfo.share_mode == SM_PRIVATE_ALIASED) {
543 privatePages += exInfo.pages_resident;
545 break;
547 default:
548 break;
552 vm_size_t pageSize;
553 if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
554 KERN_SUCCESS) {
555 pageSize = PAGE_SIZE;
558 *aN = privatePages * pageSize;
559 return NS_OK;
562 [[nodiscard]] static nsresult PhysicalFootprintAmount(int64_t* aN,
563 mach_port_t aPort = 0) {
564 MOZ_ASSERT(aN);
566 // The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data
567 // matches the value in the 'Memory' column of the Activity Monitor.
568 task_vm_info_data_t task_vm_info;
569 mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
570 kern_return_t kr = task_info(aPort ? aPort : mach_task_self(), TASK_VM_INFO,
571 (task_info_t)&task_vm_info, &count);
572 if (kr != KERN_SUCCESS) {
573 return NS_ERROR_FAILURE;
576 *aN = task_vm_info.phys_footprint;
577 return NS_OK;
580 #elif defined(XP_WIN)
582 # include <windows.h>
583 # include <psapi.h>
584 # include <algorithm>
586 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
587 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
588 MEMORYSTATUSEX s;
589 s.dwLength = sizeof(s);
591 if (!GlobalMemoryStatusEx(&s)) {
592 return NS_ERROR_FAILURE;
595 *aN = s.ullTotalVirtual - s.ullAvailVirtual;
596 return NS_OK;
599 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
600 PROCESS_MEMORY_COUNTERS pmc;
601 pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
603 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
604 return NS_ERROR_FAILURE;
607 *aN = pmc.WorkingSetSize;
608 return NS_OK;
611 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
612 return ResidentDistinguishedAmount(aN);
615 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
617 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
618 int64_t* aN, HANDLE aProcess = nullptr) {
619 // Determine how many entries we need.
620 PSAPI_WORKING_SET_INFORMATION tmp;
621 DWORD tmpSize = sizeof(tmp);
622 memset(&tmp, 0, tmpSize);
624 HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
625 QueryWorkingSet(proc, &tmp, tmpSize);
627 // Fudge the size in case new entries are added between calls.
628 size_t entries = tmp.NumberOfEntries * 2;
630 if (!entries) {
631 return NS_ERROR_FAILURE;
634 DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
635 UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
636 static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
638 if (!infoArray) {
639 return NS_ERROR_FAILURE;
642 if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
643 return NS_ERROR_FAILURE;
646 entries = static_cast<size_t>(infoArray->NumberOfEntries);
647 size_t privatePages = 0;
648 for (size_t i = 0; i < entries; i++) {
649 // Count shared pages that only one process is using as private.
650 if (!infoArray->WorkingSetInfo[i].Shared ||
651 infoArray->WorkingSetInfo[i].ShareCount <= 1) {
652 privatePages++;
656 SYSTEM_INFO si;
657 GetSystemInfo(&si);
659 *aN = privatePages * si.dwPageSize;
660 return NS_OK;
663 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
664 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
665 int64_t* aN) {
666 SIZE_T biggestRegion = 0;
667 MEMORY_BASIC_INFORMATION vmemInfo = {0};
668 for (size_t currentAddress = 0;;) {
669 if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
670 // Something went wrong, just return whatever we've got already.
671 break;
674 if (vmemInfo.State == MEM_FREE) {
675 biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
678 SIZE_T lastAddress = currentAddress;
679 currentAddress += vmemInfo.RegionSize;
681 // If we overflow, we've examined all of the address space.
682 if (currentAddress < lastAddress) {
683 break;
687 *aN = biggestRegion;
688 return NS_OK;
691 # define HAVE_PRIVATE_REPORTER 1
692 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
693 PROCESS_MEMORY_COUNTERS_EX pmcex;
694 pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
696 if (!GetProcessMemoryInfo(GetCurrentProcess(),
697 (PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
698 return NS_ERROR_FAILURE;
701 *aN = pmcex.PrivateUsage;
702 return NS_OK;
705 # define HAVE_SYSTEM_HEAP_REPORTER 1
706 // Windows can have multiple separate heaps, but we should not touch non-default
707 // heaps because they may be destroyed at anytime while we hold a handle. So we
708 // count only the default heap.
709 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
710 HANDLE heap = GetProcessHeap();
712 NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
714 int64_t heapSize = 0;
715 PROCESS_HEAP_ENTRY entry;
716 entry.lpData = nullptr;
717 while (HeapWalk(heap, &entry)) {
718 // We don't count entry.cbOverhead, because we just want to measure the
719 // space available to the program.
720 if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
721 heapSize += entry.cbData;
725 // Check this result only after unlocking the heap, so that we don't leave
726 // the heap locked if there was an error.
727 DWORD lastError = GetLastError();
729 // I have no idea how things would proceed if unlocking this heap failed...
730 NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
732 NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
734 *aSizeOut = heapSize;
735 return NS_OK;
738 struct SegmentKind {
739 DWORD mState;
740 DWORD mType;
741 DWORD mProtect;
742 int mIsStack;
745 struct SegmentEntry : public PLDHashEntryHdr {
746 static PLDHashNumber HashKey(const void* aKey) {
747 auto kind = static_cast<const SegmentKind*>(aKey);
748 return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
749 kind->mIsStack);
752 static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
753 auto kind = static_cast<const SegmentKind*>(aKey);
754 auto entry = static_cast<const SegmentEntry*>(aEntry);
755 return kind->mState == entry->mKind.mState &&
756 kind->mType == entry->mKind.mType &&
757 kind->mProtect == entry->mKind.mProtect &&
758 kind->mIsStack == entry->mKind.mIsStack;
761 static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
762 auto kind = static_cast<const SegmentKind*>(aKey);
763 auto entry = static_cast<SegmentEntry*>(aEntry);
764 entry->mKind = *kind;
765 entry->mCount = 0;
766 entry->mSize = 0;
769 static const PLDHashTableOps Ops;
771 SegmentKind mKind; // The segment kind.
772 uint32_t mCount; // The number of segments of this kind.
773 size_t mSize; // The combined size of segments of this kind.
776 /* static */ const PLDHashTableOps SegmentEntry::Ops = {
777 SegmentEntry::HashKey, SegmentEntry::MatchEntry,
778 PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
779 SegmentEntry::InitEntry};
781 class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
782 ~WindowsAddressSpaceReporter() {}
784 public:
785 NS_DECL_ISUPPORTS
787 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
788 nsISupports* aData, bool aAnonymize) override {
789 // First iterate over all the segments and record how many of each kind
790 // there were and their aggregate sizes. We use a hash table for this
791 // because there are a couple of dozen different kinds possible.
793 PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
794 MEMORY_BASIC_INFORMATION info = {0};
795 bool isPrevSegStackGuard = false;
796 for (size_t currentAddress = 0;;) {
797 if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
798 // Something went wrong, just return whatever we've got already.
799 break;
802 size_t size = info.RegionSize;
804 // Note that |type| and |protect| are ignored in some cases.
805 DWORD state = info.State;
806 DWORD type =
807 (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
808 DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
809 bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
810 type == MEM_PRIVATE && protect == PAGE_READWRITE;
812 SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
813 auto entry =
814 static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
815 if (entry) {
816 entry->mCount += 1;
817 entry->mSize += size;
820 isPrevSegStackGuard = info.State == MEM_COMMIT &&
821 info.Type == MEM_PRIVATE &&
822 info.Protect == (PAGE_READWRITE | PAGE_GUARD);
824 size_t lastAddress = currentAddress;
825 currentAddress += size;
827 // If we overflow, we've examined all of the address space.
828 if (currentAddress < lastAddress) {
829 break;
833 // Then iterate over the hash table and report the details for each segment
834 // kind.
836 for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
837 // For each range of pages, we consider one or more of its State, Type
838 // and Protect values. These are documented at
839 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
840 // (for State and Type) and
841 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
842 // (for Protect).
844 // Not all State values have accompanying Type and Protection values.
845 bool doType = false;
846 bool doProtect = false;
848 auto entry = static_cast<const SegmentEntry*>(iter.Get());
850 nsCString path("address-space");
852 switch (entry->mKind.mState) {
853 case MEM_FREE:
854 path.AppendLiteral("/free");
855 break;
857 case MEM_RESERVE:
858 path.AppendLiteral("/reserved");
859 doType = true;
860 break;
862 case MEM_COMMIT:
863 path.AppendLiteral("/commit");
864 doType = true;
865 doProtect = true;
866 break;
868 default:
869 // Should be impossible, but handle it just in case.
870 path.AppendLiteral("/???");
871 break;
874 if (doType) {
875 switch (entry->mKind.mType) {
876 case MEM_IMAGE:
877 path.AppendLiteral("/image");
878 break;
880 case MEM_MAPPED:
881 path.AppendLiteral("/mapped");
882 break;
884 case MEM_PRIVATE:
885 path.AppendLiteral("/private");
886 break;
888 default:
889 // Should be impossible, but handle it just in case.
890 path.AppendLiteral("/???");
891 break;
895 if (doProtect) {
896 DWORD protect = entry->mKind.mProtect;
897 // Basic attributes. Exactly one of these should be set.
898 if (protect & PAGE_EXECUTE) {
899 path.AppendLiteral("/execute");
901 if (protect & PAGE_EXECUTE_READ) {
902 path.AppendLiteral("/execute-read");
904 if (protect & PAGE_EXECUTE_READWRITE) {
905 path.AppendLiteral("/execute-readwrite");
907 if (protect & PAGE_EXECUTE_WRITECOPY) {
908 path.AppendLiteral("/execute-writecopy");
910 if (protect & PAGE_NOACCESS) {
911 path.AppendLiteral("/noaccess");
913 if (protect & PAGE_READONLY) {
914 path.AppendLiteral("/readonly");
916 if (protect & PAGE_READWRITE) {
917 path.AppendLiteral("/readwrite");
919 if (protect & PAGE_WRITECOPY) {
920 path.AppendLiteral("/writecopy");
923 // Modifiers. At most one of these should be set.
924 if (protect & PAGE_GUARD) {
925 path.AppendLiteral("+guard");
927 if (protect & PAGE_NOCACHE) {
928 path.AppendLiteral("+nocache");
930 if (protect & PAGE_WRITECOMBINE) {
931 path.AppendLiteral("+writecombine");
934 // Annotate likely stack segments, too.
935 if (entry->mKind.mIsStack) {
936 path.AppendLiteral("+stack");
940 // Append the segment count.
941 path.AppendPrintf("(segments=%u)", entry->mCount);
943 aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
944 entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
945 aData);
948 return NS_OK;
951 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
953 #endif // XP_<PLATFORM>
955 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
956 class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
957 ~VsizeMaxContiguousReporter() {}
959 public:
960 NS_DECL_ISUPPORTS
962 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
963 nsISupports* aData, bool aAnonymize) override {
964 int64_t amount;
965 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
966 MOZ_COLLECT_REPORT(
967 "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
968 "Size of the maximum contiguous block of available virtual memory.");
970 return NS_OK;
973 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
974 #endif
976 #ifdef HAVE_PRIVATE_REPORTER
977 class PrivateReporter final : public nsIMemoryReporter {
978 ~PrivateReporter() {}
980 public:
981 NS_DECL_ISUPPORTS
983 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
984 nsISupports* aData, bool aAnonymize) override {
985 int64_t amount;
986 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
987 // clang-format off
988 MOZ_COLLECT_REPORT(
989 "private", KIND_OTHER, UNITS_BYTES, amount,
990 "Memory that cannot be shared with other processes, including memory that is "
991 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
992 "pages that have been written to.");
993 // clang-format on
995 return NS_OK;
998 NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
999 #endif
1001 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1002 class VsizeReporter final : public nsIMemoryReporter {
1003 ~VsizeReporter() = default;
1005 public:
1006 NS_DECL_ISUPPORTS
1008 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1009 nsISupports* aData, bool aAnonymize) override {
1010 int64_t amount;
1011 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
1012 // clang-format off
1013 MOZ_COLLECT_REPORT(
1014 "vsize", KIND_OTHER, UNITS_BYTES, amount,
1015 "Memory mapped by the process, including code and data segments, the heap, "
1016 "thread stacks, memory explicitly mapped by the process via mmap and similar "
1017 "operations, and memory shared with other processes. This is the vsize figure "
1018 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
1019 "processes share huge amounts of memory with one another. But even on other "
1020 "operating systems, 'resident' is a much better measure of the memory "
1021 "resources used by the process.");
1022 // clang-format on
1024 return NS_OK;
1027 NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
1029 class ResidentReporter final : public nsIMemoryReporter {
1030 ~ResidentReporter() = default;
1032 public:
1033 NS_DECL_ISUPPORTS
1035 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1036 nsISupports* aData, bool aAnonymize) override {
1037 int64_t amount;
1038 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
1039 // clang-format off
1040 MOZ_COLLECT_REPORT(
1041 "resident", KIND_OTHER, UNITS_BYTES, amount,
1042 "Memory mapped by the process that is present in physical memory, also known "
1043 "as the resident set size (RSS). This is the best single figure to use when "
1044 "considering the memory resources used by the process, but it depends both on "
1045 "other processes being run and details of the OS kernel and so is best used "
1046 "for comparing the memory usage of a single process at different points in "
1047 "time.");
1048 // clang-format on
1050 return NS_OK;
1053 NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
1055 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1057 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1058 class ResidentUniqueReporter final : public nsIMemoryReporter {
1059 ~ResidentUniqueReporter() = default;
1061 public:
1062 NS_DECL_ISUPPORTS
1064 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1065 nsISupports* aData, bool aAnonymize) override {
1066 int64_t amount = 0;
1067 // clang-format off
1068 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1069 MOZ_COLLECT_REPORT(
1070 "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1071 "Memory mapped by the process that is present in physical memory and not "
1072 "shared with any other processes. This is also known as the process's unique "
1073 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1074 "closed this process.");
1076 #ifdef XP_MACOSX
1077 if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount))) {
1078 MOZ_COLLECT_REPORT(
1079 "resident-phys-footprint", KIND_OTHER, UNITS_BYTES, amount,
1080 "Memory footprint reported by MacOS's task_info API's phys_footprint field. "
1081 "This matches the memory column in Activity Monitor.");
1083 #endif
1084 // clang-format on
1085 return NS_OK;
1088 NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1090 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1092 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1094 class SystemHeapReporter final : public nsIMemoryReporter {
1095 ~SystemHeapReporter() = default;
1097 public:
1098 NS_DECL_ISUPPORTS
1100 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1101 nsISupports* aData, bool aAnonymize) override {
1102 int64_t amount;
1103 if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1104 // clang-format off
1105 MOZ_COLLECT_REPORT(
1106 "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1107 "Memory used by the system allocator that is currently allocated to the "
1108 "application. This is distinct from the jemalloc heap that Firefox uses for "
1109 "most or all of its heap allocations. Ideally this number is zero, but "
1110 "on some platforms we cannot force every heap allocation through jemalloc.");
1111 // clang-format on
1113 return NS_OK;
1116 NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1117 #endif // HAVE_SYSTEM_HEAP_REPORTER
1119 #ifdef XP_UNIX
1121 # include <sys/resource.h>
1123 # define HAVE_RESIDENT_PEAK_REPORTER 1
1125 [[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
1126 struct rusage usage;
1127 if (0 == getrusage(RUSAGE_SELF, &usage)) {
1128 // The units for ru_maxrrs:
1129 // - Mac: bytes
1130 // - Solaris: pages? But some sources it actually always returns 0, so
1131 // check for that
1132 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1133 # ifdef XP_MACOSX
1134 *aN = usage.ru_maxrss;
1135 # elif defined(SOLARIS)
1136 *aN = usage.ru_maxrss * getpagesize();
1137 # else
1138 *aN = usage.ru_maxrss * 1024;
1139 # endif
1140 if (*aN > 0) {
1141 return NS_OK;
1144 return NS_ERROR_FAILURE;
1147 class ResidentPeakReporter final : public nsIMemoryReporter {
1148 ~ResidentPeakReporter() = default;
1150 public:
1151 NS_DECL_ISUPPORTS
1153 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1154 nsISupports* aData, bool aAnonymize) override {
1155 int64_t amount = 0;
1156 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1157 MOZ_COLLECT_REPORT(
1158 "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1159 "The peak 'resident' value for the lifetime of the process.");
1161 return NS_OK;
1164 NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1166 # define HAVE_PAGE_FAULT_REPORTERS 1
1168 class PageFaultsSoftReporter final : public nsIMemoryReporter {
1169 ~PageFaultsSoftReporter() = default;
1171 public:
1172 NS_DECL_ISUPPORTS
1174 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1175 nsISupports* aData, bool aAnonymize) override {
1176 struct rusage usage;
1177 int err = getrusage(RUSAGE_SELF, &usage);
1178 if (err == 0) {
1179 int64_t amount = usage.ru_minflt;
1180 // clang-format off
1181 MOZ_COLLECT_REPORT(
1182 "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1183 "The number of soft page faults (also known as 'minor page faults') that "
1184 "have occurred since the process started. A soft page fault occurs when the "
1185 "process tries to access a page which is present in physical memory but is "
1186 "not mapped into the process's address space. For instance, a process might "
1187 "observe soft page faults when it loads a shared library which is already "
1188 "present in physical memory. A process may experience many thousands of soft "
1189 "page faults even when the machine has plenty of available physical memory, "
1190 "and because the OS services a soft page fault without accessing the disk, "
1191 "they impact performance much less than hard page faults.");
1192 // clang-format on
1194 return NS_OK;
1197 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1199 [[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
1200 int64_t* aAmount) {
1201 struct rusage usage;
1202 int err = getrusage(RUSAGE_SELF, &usage);
1203 if (err != 0) {
1204 return NS_ERROR_FAILURE;
1206 *aAmount = usage.ru_majflt;
1207 return NS_OK;
1210 class PageFaultsHardReporter final : public nsIMemoryReporter {
1211 ~PageFaultsHardReporter() = default;
1213 public:
1214 NS_DECL_ISUPPORTS
1216 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1217 nsISupports* aData, bool aAnonymize) override {
1218 int64_t amount = 0;
1219 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1220 // clang-format off
1221 MOZ_COLLECT_REPORT(
1222 "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1223 "The number of hard page faults (also known as 'major page faults') that have "
1224 "occurred since the process started. A hard page fault occurs when a process "
1225 "tries to access a page which is not present in physical memory. The "
1226 "operating system must access the disk in order to fulfill a hard page fault. "
1227 "When memory is plentiful, you should see very few hard page faults. But if "
1228 "the process tries to use more memory than your machine has available, you "
1229 "may see many thousands of hard page faults. Because accessing the disk is up "
1230 "to a million times slower than accessing RAM, the program may run very "
1231 "slowly when it is experiencing more than 100 or so hard page faults a "
1232 "second.");
1233 // clang-format on
1235 return NS_OK;
1238 NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1240 #endif // XP_UNIX
1243 ** memory reporter implementation for jemalloc and OSX malloc,
1244 ** to obtain info on total memory in use (that we know about,
1245 ** at least -- on OSX, there are sometimes other zones in use).
1248 #ifdef HAVE_JEMALLOC_STATS
1250 static size_t HeapOverhead(jemalloc_stats_t* aStats) {
1251 return aStats->waste + aStats->bookkeeping + aStats->page_cache +
1252 aStats->bin_unused;
1255 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1256 // 100x for the percentage.
1257 static int64_t HeapOverheadFraction(jemalloc_stats_t* aStats) {
1258 size_t heapOverhead = HeapOverhead(aStats);
1259 size_t heapCommitted = aStats->allocated + heapOverhead;
1260 return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1263 class JemallocHeapReporter final : public nsIMemoryReporter {
1264 ~JemallocHeapReporter() = default;
1266 public:
1267 NS_DECL_ISUPPORTS
1269 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1270 nsISupports* aData, bool aAnonymize) override {
1271 jemalloc_stats_t stats;
1272 const size_t num_bins = jemalloc_stats_num_bins();
1273 nsTArray<jemalloc_bin_stats_t> bin_stats(num_bins);
1274 bin_stats.SetLength(num_bins);
1275 jemalloc_stats(&stats, bin_stats.Elements());
1277 // clang-format off
1278 MOZ_COLLECT_REPORT(
1279 "heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1280 "Memory mapped by the heap allocator that is currently allocated to the "
1281 "application. This may exceed the amount of memory requested by the "
1282 "application because the allocator regularly rounds up request sizes. (The "
1283 "exact amount requested is not recorded.)");
1285 MOZ_COLLECT_REPORT(
1286 "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1287 "The same as 'heap-committed/allocated'.");
1289 // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1290 // because KIND_HEAP memory means "counted in heap-allocated", which
1291 // this is not.
1292 for (auto& bin : bin_stats) {
1293 MOZ_ASSERT(bin.size);
1294 nsPrintfCString path("explicit/heap-overhead/bin-unused/bin-%zu",
1295 bin.size);
1296 aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
1297 bin.bytes_unused,
1298 nsLiteralCString(
1299 "Unused bytes in all runs of all bins for this size class"),
1300 aData);
1303 if (stats.waste > 0) {
1304 MOZ_COLLECT_REPORT(
1305 "explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
1306 stats.waste,
1307 "Committed bytes which do not correspond to an active allocation and which the "
1308 "allocator is not intentionally keeping alive (i.e., not "
1309 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1312 MOZ_COLLECT_REPORT(
1313 "explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1314 stats.bookkeeping,
1315 "Committed bytes which the heap allocator uses for internal data structures.");
1317 MOZ_COLLECT_REPORT(
1318 "explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
1319 stats.page_cache,
1320 "Memory which the allocator could return to the operating system, but hasn't. "
1321 "The allocator keeps this memory around as an optimization, so it doesn't "
1322 "have to ask the OS the next time it needs to fulfill a request. This value "
1323 "is typically not larger than a few megabytes.");
1325 MOZ_COLLECT_REPORT(
1326 "heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
1327 HeapOverhead(&stats),
1328 "The sum of 'explicit/heap-overhead/*'.");
1330 MOZ_COLLECT_REPORT(
1331 "heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
1332 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1333 "neither in physical memory nor paged to disk.");
1335 MOZ_COLLECT_REPORT(
1336 "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1337 "Size of chunks.");
1339 #ifdef MOZ_PHC
1340 mozilla::phc::MemoryUsage usage;
1341 ReplaceMalloc::PHCMemoryUsage(usage);
1343 MOZ_COLLECT_REPORT(
1344 "explicit/heap-overhead/phc/metadata", KIND_NONHEAP, UNITS_BYTES,
1345 usage.mMetadataBytes,
1346 "Memory used by PHC to store stacks and other metadata for each allocation");
1347 MOZ_COLLECT_REPORT(
1348 "explicit/heap-overhead/phc/fragmentation", KIND_NONHEAP, UNITS_BYTES,
1349 usage.mFragmentationBytes,
1350 "The amount of memory lost due to rounding up allocations to the next page "
1351 "size. "
1352 "This is also known as 'internal fragmentation'. "
1353 "Note that all allocators have some internal fragmentation, there may still "
1354 "be some internal fragmentation without PHC.");
1355 #endif
1357 // clang-format on
1359 return NS_OK;
1362 NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1364 #endif // HAVE_JEMALLOC_STATS
1366 // Why is this here? At first glance, you'd think it could be defined and
1367 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1368 // However, the obvious time to register it is when the table is initialized,
1369 // and that happens before XPCOM components are initialized, which means the
1370 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1371 class AtomTablesReporter final : public nsIMemoryReporter {
1372 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1374 ~AtomTablesReporter() = default;
1376 public:
1377 NS_DECL_ISUPPORTS
1379 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1380 nsISupports* aData, bool aAnonymize) override {
1381 AtomsSizes sizes;
1382 NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1384 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
1385 sizes.mTable, "Memory used by the atom table.");
1387 MOZ_COLLECT_REPORT(
1388 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1389 sizes.mDynamicAtoms,
1390 "Memory used by dynamic atom objects and chars (which are stored "
1391 "at the end of each atom object).");
1393 return NS_OK;
1396 NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1398 class ThreadsReporter final : public nsIMemoryReporter {
1399 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1400 ~ThreadsReporter() = default;
1402 public:
1403 NS_DECL_ISUPPORTS
1405 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1406 nsISupports* aData, bool aAnonymize) override {
1407 #ifdef XP_LINUX
1408 nsTArray<MemoryMapping> mappings(1024);
1409 MOZ_TRY(GetMemoryMappings(mappings));
1410 #endif
1412 // Enumerating over active threads requires holding a lock, so we collect
1413 // info on all threads, and then call our reporter callbacks after releasing
1414 // the lock.
1415 struct ThreadData {
1416 nsCString mName;
1417 uint32_t mThreadId;
1418 size_t mPrivateSize;
1420 AutoTArray<ThreadData, 32> threads;
1422 size_t eventQueueSizes = 0;
1423 size_t wrapperSizes = 0;
1424 size_t threadCount = 0;
1426 for (auto* thread : nsThread::Enumerate()) {
1427 threadCount++;
1428 eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1429 wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1431 if (!thread->StackBase()) {
1432 continue;
1435 #if defined(XP_LINUX)
1436 int idx = mappings.BinaryIndexOf(thread->StackBase());
1437 if (idx < 0) {
1438 continue;
1440 // Referenced() is the combined size of all pages in the region which have
1441 // ever been touched, and are therefore consuming memory. For stack
1442 // regions, these pages are guaranteed to be un-shared unless we fork
1443 // after creating threads (which we don't).
1444 size_t privateSize = mappings[idx].Referenced();
1446 // On Linux, we have to be very careful matching memory regions to thread
1447 // stacks.
1449 // To begin with, the kernel only reports VM stats for regions of all
1450 // adjacent pages with the same flags, protection, and backing file.
1451 // There's no way to get finer-grained usage information for a subset of
1452 // those pages.
1454 // Stack segments always have a guard page at the bottom of the stack
1455 // (assuming we only support stacks that grow down), so there's no danger
1456 // of them being merged with other stack regions. At the top, there's no
1457 // protection page, and no way to allocate one without using pthreads
1458 // directly and allocating our own stacks. So we get around the problem by
1459 // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1460 // don't expect to be set on any heap regions. But this is not fool-proof.
1462 // A second kink is that different C libraries (and different versions
1463 // thereof) report stack base locations and sizes differently with regard
1464 // to the guard page. For the libraries that include the guard page in the
1465 // stack size base pointer, we need to adjust those values to compensate.
1466 // But it's possible that our logic will get out of sync with library
1467 // changes, or someone will compile with an unexpected library.
1470 // The upshot of all of this is that there may be configurations that our
1471 // special cases don't cover. And if there are, we want to know about it.
1472 // So assert that total size of the memory region we're reporting actually
1473 // matches the allocated size of the thread stack.
1474 # ifndef ANDROID
1475 MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1476 "Mapping region size doesn't match stack allocation size");
1477 # endif
1478 #elif defined(XP_WIN)
1479 auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1480 size_t privateSize = memInfo.Committed();
1481 #else
1482 size_t privateSize = thread->StackSize();
1483 MOZ_ASSERT_UNREACHABLE(
1484 "Shouldn't have stack base pointer on this "
1485 "platform");
1486 #endif
1488 threads.AppendElement(ThreadData{
1489 nsCString(PR_GetThreadName(thread->GetPRThread())),
1490 thread->ThreadId(),
1491 // On Linux, it's possible (but unlikely) that our stack region will
1492 // have been merged with adjacent heap regions, in which case we'll
1493 // get combined size information for both. So we take the minimum of
1494 // the reported private size and the requested stack size to avoid the
1495 // possible of majorly over-reporting in that case.
1496 std::min(privateSize, thread->StackSize()),
1500 for (auto& thread : threads) {
1501 nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1502 thread.mName.get(), thread.mThreadId);
1504 aHandleReport->Callback(
1505 ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
1506 nsLiteralCString("The sizes of thread stacks which have been "
1507 "committed to memory."),
1508 aData);
1511 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
1512 UNITS_BYTES, eventQueueSizes,
1513 "The sizes of nsThread event queues and observers.");
1515 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
1516 UNITS_BYTES, wrapperSizes,
1517 "The sizes of nsThread/PRThread wrappers.");
1519 #if defined(XP_WIN)
1520 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1521 // that's 12K. For 64 bit, it's 24K.
1523 // See
1524 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1525 constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1526 #elif defined(XP_LINUX)
1527 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1528 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1529 // have no way of knowing which ones do, so all we can do is guess.
1530 # if defined(__x86_64__) || defined(__i386__)
1531 constexpr size_t kKernelSize = 4 * 1024;
1532 # else
1533 constexpr size_t kKernelSize = 8 * 1024;
1534 # endif
1535 #elif defined(XP_MACOSX)
1536 // On Darwin, kernel stacks are 16K:
1538 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1539 constexpr size_t kKernelSize = 16 * 1024;
1540 #else
1541 // Elsewhere, just assume that kernel stacks require at least 8K.
1542 constexpr size_t kKernelSize = 8 * 1024;
1543 #endif
1545 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
1546 UNITS_BYTES, threadCount * kKernelSize,
1547 "The total kernel overhead for all active threads.");
1549 return NS_OK;
1552 NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1554 #ifdef DEBUG
1556 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1557 // However, this ends up breaking the linking step of various unit tests due
1558 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1559 // in DMD builds. So instead we do it here.
1560 class DeadlockDetectorReporter final : public nsIMemoryReporter {
1561 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1563 ~DeadlockDetectorReporter() = default;
1565 public:
1566 NS_DECL_ISUPPORTS
1568 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1569 nsISupports* aData, bool aAnonymize) override {
1570 MOZ_COLLECT_REPORT(
1571 "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1572 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1573 "Memory used by the deadlock detector.");
1575 return NS_OK;
1578 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1580 #endif
1582 #ifdef MOZ_DMD
1584 namespace mozilla {
1585 namespace dmd {
1587 class DMDReporter final : public nsIMemoryReporter {
1588 public:
1589 NS_DECL_ISUPPORTS
1591 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1592 nsISupports* aData, bool aAnonymize) override {
1593 dmd::Sizes sizes;
1594 dmd::SizeOf(&sizes);
1596 MOZ_COLLECT_REPORT(
1597 "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1598 sizes.mStackTracesUsed,
1599 "Memory used by stack traces which correspond to at least "
1600 "one heap block DMD is tracking.");
1602 MOZ_COLLECT_REPORT(
1603 "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1604 sizes.mStackTracesUnused,
1605 "Memory used by stack traces which don't correspond to any heap "
1606 "blocks DMD is currently tracking.");
1608 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
1609 UNITS_BYTES, sizes.mStackTraceTable,
1610 "Memory used by DMD's stack trace table.");
1612 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1613 sizes.mLiveBlockTable,
1614 "Memory used by DMD's live block table.");
1616 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1617 sizes.mDeadBlockTable,
1618 "Memory used by DMD's dead block list.");
1620 return NS_OK;
1623 private:
1624 ~DMDReporter() = default;
1626 NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1628 } // namespace dmd
1629 } // namespace mozilla
1631 #endif // MOZ_DMD
1633 #ifdef MOZ_WIDGET_ANDROID
1634 class AndroidMemoryReporter final : public nsIMemoryReporter {
1635 public:
1636 NS_DECL_ISUPPORTS
1638 AndroidMemoryReporter() = default;
1640 NS_IMETHOD
1641 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1642 bool aAnonymize) override {
1643 if (!jni::IsAvailable() || jni::GetAPIVersion() < 23) {
1644 return NS_OK;
1647 int32_t heap = java::GeckoAppShell::GetMemoryUsage("summary.java-heap"_ns);
1648 if (heap > 0) {
1649 MOZ_COLLECT_REPORT("java-heap", KIND_OTHER, UNITS_BYTES, heap * 1024,
1650 "The private Java Heap usage");
1652 return NS_OK;
1655 private:
1656 ~AndroidMemoryReporter() = default;
1659 NS_IMPL_ISUPPORTS(AndroidMemoryReporter, nsIMemoryReporter)
1660 #endif
1663 ** nsMemoryReporterManager implementation
1666 NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
1667 nsIMemoryReporter)
1669 NS_IMETHODIMP
1670 nsMemoryReporterManager::Init() {
1671 if (!NS_IsMainThread()) {
1672 MOZ_CRASH();
1675 // Under normal circumstances this function is only called once. However,
1676 // we've (infrequently) seen memory report dumps in crash reports that
1677 // suggest that this function is sometimes called multiple times. That in
1678 // turn means that multiple reporters of each kind are registered, which
1679 // leads to duplicated reports of individual measurements such as "resident",
1680 // "vsize", etc.
1682 // It's unclear how these multiple calls can occur. The only plausible theory
1683 // so far is badly-written extensions, because this function is callable from
1684 // JS code via nsIMemoryReporter.idl.
1686 // Whatever the cause, it's a bad thing. So we protect against it with the
1687 // following check.
1688 static bool isInited = false;
1689 if (isInited) {
1690 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1691 return NS_OK;
1693 isInited = true;
1695 #ifdef HAVE_JEMALLOC_STATS
1696 RegisterStrongReporter(new JemallocHeapReporter());
1697 #endif
1699 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1700 RegisterStrongReporter(new VsizeReporter());
1701 RegisterStrongReporter(new ResidentReporter());
1702 #endif
1704 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1705 RegisterStrongReporter(new VsizeMaxContiguousReporter());
1706 #endif
1708 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1709 RegisterStrongReporter(new ResidentPeakReporter());
1710 #endif
1712 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1713 RegisterStrongReporter(new ResidentUniqueReporter());
1714 #endif
1716 #ifdef HAVE_PAGE_FAULT_REPORTERS
1717 RegisterStrongReporter(new PageFaultsSoftReporter());
1718 RegisterStrongReporter(new PageFaultsHardReporter());
1719 #endif
1721 #ifdef HAVE_PRIVATE_REPORTER
1722 RegisterStrongReporter(new PrivateReporter());
1723 #endif
1725 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1726 RegisterStrongReporter(new SystemHeapReporter());
1727 #endif
1729 RegisterStrongReporter(new AtomTablesReporter());
1731 RegisterStrongReporter(new ThreadsReporter());
1733 #ifdef DEBUG
1734 RegisterStrongReporter(new DeadlockDetectorReporter());
1735 #endif
1737 #ifdef MOZ_GECKO_PROFILER
1738 // We have to register this here rather than in profiler_init() because
1739 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1740 RegisterStrongReporter(new GeckoProfilerReporter());
1741 #endif
1743 #ifdef MOZ_DMD
1744 RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1745 #endif
1747 #ifdef XP_WIN
1748 RegisterStrongReporter(new WindowsAddressSpaceReporter());
1749 #endif
1751 #ifdef MOZ_WIDGET_ANDROID
1752 RegisterStrongReporter(new AndroidMemoryReporter());
1753 #endif
1755 #ifdef XP_UNIX
1756 nsMemoryInfoDumper::Initialize();
1757 #endif
1759 // Report our own memory usage as well.
1760 RegisterWeakReporter(this);
1762 return NS_OK;
1765 nsMemoryReporterManager::nsMemoryReporterManager()
1766 : mMutex("nsMemoryReporterManager::mMutex"),
1767 mIsRegistrationBlocked(false),
1768 mStrongReporters(new StrongReportersTable()),
1769 mWeakReporters(new WeakReportersTable()),
1770 mSavedStrongReporters(nullptr),
1771 mSavedWeakReporters(nullptr),
1772 mNextGeneration(1),
1773 mPendingProcessesState(nullptr),
1774 mPendingReportersState(nullptr)
1775 #ifdef HAVE_JEMALLOC_STATS
1777 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1778 #endif
1782 nsMemoryReporterManager::~nsMemoryReporterManager() {
1783 delete mStrongReporters;
1784 delete mWeakReporters;
1785 NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1786 NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1789 NS_IMETHODIMP
1790 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1791 nsISupports* aData, bool aAnonymize) {
1792 size_t n = MallocSizeOf(this);
1794 mozilla::MutexAutoLock autoLock(mMutex);
1795 n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1796 n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1799 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1800 n, "Memory used by the memory reporter infrastructure.");
1802 return NS_OK;
1805 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1806 # define MEMORY_REPORTING_LOG(format, ...) \
1807 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1808 #else
1809 # define MEMORY_REPORTING_LOG(...)
1810 #endif
1812 NS_IMETHODIMP
1813 nsMemoryReporterManager::GetReports(
1814 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1815 nsIFinishReportingCallback* aFinishReporting,
1816 nsISupports* aFinishReportingData, bool aAnonymize) {
1817 return GetReportsExtended(aHandleReport, aHandleReportData, aFinishReporting,
1818 aFinishReportingData, aAnonymize,
1819 /* minimize = */ false,
1820 /* DMDident = */ u""_ns);
1823 NS_IMETHODIMP
1824 nsMemoryReporterManager::GetReportsExtended(
1825 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1826 nsIFinishReportingCallback* aFinishReporting,
1827 nsISupports* aFinishReportingData, bool aAnonymize, bool aMinimize,
1828 const nsAString& aDMDDumpIdent) {
1829 nsresult rv;
1831 // Memory reporters are not necessarily threadsafe, so this function must
1832 // be called from the main thread.
1833 if (!NS_IsMainThread()) {
1834 MOZ_CRASH();
1837 uint32_t generation = mNextGeneration++;
1839 if (mPendingProcessesState) {
1840 // A request is in flight. Don't start another one. And don't report
1841 // an error; just ignore it, and let the in-flight request finish.
1842 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
1843 mPendingProcessesState->mGeneration);
1844 return NS_OK;
1847 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1849 uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1850 MOZ_ASSERT(concurrency >= 1);
1851 if (concurrency < 1) {
1852 concurrency = 1;
1854 mPendingProcessesState = new PendingProcessesState(
1855 generation, aAnonymize, aMinimize, concurrency, aHandleReport,
1856 aHandleReportData, aFinishReporting, aFinishReportingData, aDMDDumpIdent);
1858 if (aMinimize) {
1859 nsCOMPtr<nsIRunnable> callback =
1860 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1861 &nsMemoryReporterManager::StartGettingReports);
1862 rv = MinimizeMemoryUsage(callback);
1863 } else {
1864 rv = StartGettingReports();
1866 return rv;
1869 // MainThread only
1870 nsresult nsMemoryReporterManager::StartGettingReports() {
1871 PendingProcessesState* s = mPendingProcessesState;
1872 nsresult rv;
1874 // Get reports for this process.
1875 FILE* parentDMDFile = nullptr;
1876 #ifdef MOZ_DMD
1877 if (!s->mDMDDumpIdent.IsEmpty()) {
1878 rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1879 &parentDMDFile);
1880 if (NS_WARN_IF(NS_FAILED(rv))) {
1881 // Proceed with the memory report as if DMD were disabled.
1882 parentDMDFile = nullptr;
1885 #endif
1887 // This is async.
1888 GetReportsForThisProcessExtended(
1889 s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
1890 s->mFinishReporting, s->mFinishReportingData);
1892 nsTArray<dom::ContentParent*> childWeakRefs;
1893 dom::ContentParent::GetAll(childWeakRefs);
1894 if (!childWeakRefs.IsEmpty()) {
1895 // Request memory reports from child processes. This happens
1896 // after the parent report so that the parent's main thread will
1897 // be free to process the child reports, instead of causing them
1898 // to be buffered and consume (possibly scarce) memory.
1900 for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1901 s->mChildrenPending.AppendElement(childWeakRefs[i]);
1905 if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1906 if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1907 s->mChildrenPending.AppendElement(proc.forget());
1911 if (RDDProcessManager* rdd = RDDProcessManager::Get()) {
1912 if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
1913 s->mChildrenPending.AppendElement(proc.forget());
1917 if (gfx::VRProcessManager* vr = gfx::VRProcessManager::Get()) {
1918 if (RefPtr<MemoryReportingProcess> proc = vr->GetProcessMemoryReporter()) {
1919 s->mChildrenPending.AppendElement(proc.forget());
1923 if (!IsRegistrationBlocked() && net::gIOService) {
1924 if (RefPtr<MemoryReportingProcess> proc =
1925 net::gIOService->GetSocketProcessMemoryReporter()) {
1926 s->mChildrenPending.AppendElement(proc.forget());
1930 if (!IsRegistrationBlocked()) {
1931 if (RefPtr<UtilityProcessManager> utility =
1932 UtilityProcessManager::GetIfExists()) {
1933 for (RefPtr<UtilityProcessParent>& parent :
1934 utility->GetAllProcessesProcessParent()) {
1935 if (RefPtr<MemoryReportingProcess> proc =
1936 utility->GetProcessMemoryReporter(parent)) {
1937 s->mChildrenPending.AppendElement(proc.forget());
1943 if (!s->mChildrenPending.IsEmpty()) {
1944 nsCOMPtr<nsITimer> timer;
1945 rv = NS_NewTimerWithFuncCallback(
1946 getter_AddRefs(timer), TimeoutCallback, this, kTimeoutLengthMS,
1947 nsITimer::TYPE_ONE_SHOT,
1948 "nsMemoryReporterManager::StartGettingReports");
1949 if (NS_WARN_IF(NS_FAILED(rv))) {
1950 FinishReporting();
1951 return rv;
1954 MOZ_ASSERT(!s->mTimer);
1955 s->mTimer.swap(timer);
1958 return NS_OK;
1961 void nsMemoryReporterManager::DispatchReporter(
1962 nsIMemoryReporter* aReporter, bool aIsAsync,
1963 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1964 bool aAnonymize) {
1965 MOZ_ASSERT(mPendingReportersState);
1967 // Grab refs to everything used in the lambda function.
1968 RefPtr<nsMemoryReporterManager> self = this;
1969 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
1970 nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
1971 nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
1973 nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
1974 "nsMemoryReporterManager::DispatchReporter",
1975 [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
1976 reporter->CollectReports(handleReport, handleReportData, aAnonymize);
1977 if (!aIsAsync) {
1978 self->EndReport();
1982 NS_DispatchToMainThread(event);
1983 mPendingReportersState->mReportsPending++;
1986 NS_IMETHODIMP
1987 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1988 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1989 bool aAnonymize, FILE* aDMDFile,
1990 nsIFinishReportingCallback* aFinishReporting,
1991 nsISupports* aFinishReportingData) {
1992 // Memory reporters are not necessarily threadsafe, so this function must
1993 // be called from the main thread.
1994 if (!NS_IsMainThread()) {
1995 MOZ_CRASH();
1998 if (NS_WARN_IF(mPendingReportersState)) {
1999 // Report is already in progress.
2000 return NS_ERROR_IN_PROGRESS;
2003 #ifdef MOZ_DMD
2004 if (aDMDFile) {
2005 // Clear DMD's reportedness state before running the memory
2006 // reporters, to avoid spurious twice-reported warnings.
2007 dmd::ClearReports();
2009 #else
2010 MOZ_ASSERT(!aDMDFile);
2011 #endif
2013 mPendingReportersState = new PendingReportersState(
2014 aFinishReporting, aFinishReportingData, aDMDFile);
2017 mozilla::MutexAutoLock autoLock(mMutex);
2019 for (const auto& entry : *mStrongReporters) {
2020 DispatchReporter(entry.GetKey(), entry.GetData(), aHandleReport,
2021 aHandleReportData, aAnonymize);
2024 for (const auto& entry : *mWeakReporters) {
2025 nsCOMPtr<nsIMemoryReporter> reporter = entry.GetKey();
2026 DispatchReporter(reporter, entry.GetData(), aHandleReport,
2027 aHandleReportData, aAnonymize);
2031 return NS_OK;
2034 // MainThread only
2035 NS_IMETHODIMP
2036 nsMemoryReporterManager::EndReport() {
2037 if (--mPendingReportersState->mReportsPending == 0) {
2038 #ifdef MOZ_DMD
2039 if (mPendingReportersState->mDMDFile) {
2040 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
2042 #endif
2043 if (mPendingProcessesState) {
2044 // This is the parent process.
2045 EndProcessReport(mPendingProcessesState->mGeneration, true);
2046 } else {
2047 mPendingReportersState->mFinishReporting->Callback(
2048 mPendingReportersState->mFinishReportingData);
2051 delete mPendingReportersState;
2052 mPendingReportersState = nullptr;
2055 return NS_OK;
2058 nsMemoryReporterManager::PendingProcessesState*
2059 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration) {
2060 // Memory reporting only happens on the main thread.
2061 MOZ_RELEASE_ASSERT(NS_IsMainThread());
2063 PendingProcessesState* s = mPendingProcessesState;
2065 if (!s) {
2066 // If we reach here, then:
2068 // - A child process reported back too late, and no subsequent request
2069 // is in flight.
2071 // So there's nothing to be done. Just ignore it.
2072 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
2073 aGeneration);
2074 return nullptr;
2077 if (aGeneration != s->mGeneration) {
2078 // If we reach here, a child process must have reported back, too late,
2079 // while a subsequent (higher-numbered) request is in flight. Again,
2080 // ignore it.
2081 MOZ_ASSERT(aGeneration < s->mGeneration);
2082 MEMORY_REPORTING_LOG(
2083 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration,
2084 s->mGeneration);
2085 return nullptr;
2088 return s;
2091 // This function has no return value. If something goes wrong, there's no
2092 // clear place to report the problem to, but that's ok -- we will end up
2093 // hitting the timeout and executing TimeoutCallback().
2094 void nsMemoryReporterManager::HandleChildReport(
2095 uint32_t aGeneration, const dom::MemoryReport& aChildReport) {
2096 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2097 if (!s) {
2098 return;
2101 // Child reports should have a non-empty process.
2102 MOZ_ASSERT(!aChildReport.process().IsEmpty());
2104 // If the call fails, ignore and continue.
2105 s->mHandleReport->Callback(aChildReport.process(), aChildReport.path(),
2106 aChildReport.kind(), aChildReport.units(),
2107 aChildReport.amount(), aChildReport.desc(),
2108 s->mHandleReportData);
2111 /* static */
2112 bool nsMemoryReporterManager::StartChildReport(
2113 mozilla::MemoryReportingProcess* aChild,
2114 const PendingProcessesState* aState) {
2115 if (!aChild->IsAlive()) {
2116 MEMORY_REPORTING_LOG(
2117 "StartChildReports (gen=%u): child exited before"
2118 " its report was started\n",
2119 aState->mGeneration);
2120 return false;
2123 Maybe<mozilla::ipc::FileDescriptor> dmdFileDesc;
2124 #ifdef MOZ_DMD
2125 if (!aState->mDMDDumpIdent.IsEmpty()) {
2126 FILE* dmdFile = nullptr;
2127 nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
2128 aChild->Pid(), &dmdFile);
2129 if (NS_WARN_IF(NS_FAILED(rv))) {
2130 // Proceed with the memory report as if DMD were disabled.
2131 dmdFile = nullptr;
2133 if (dmdFile) {
2134 dmdFileDesc = Some(mozilla::ipc::FILEToFileDescriptor(dmdFile));
2135 fclose(dmdFile);
2138 #endif
2139 return aChild->SendRequestMemoryReport(
2140 aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2143 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration,
2144 bool aSuccess) {
2145 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2146 if (!s) {
2147 return;
2150 MOZ_ASSERT(s->mNumProcessesRunning > 0);
2151 s->mNumProcessesRunning--;
2152 s->mNumProcessesCompleted++;
2153 MEMORY_REPORTING_LOG(
2154 "HandleChildReports (aGen=%u): process %u %s"
2155 " (%u running, %u pending)\n",
2156 aGeneration, s->mNumProcessesCompleted,
2157 aSuccess ? "completed" : "exited during report", s->mNumProcessesRunning,
2158 static_cast<unsigned>(s->mChildrenPending.Length()));
2160 // Start pending children up to the concurrency limit.
2161 while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2162 !s->mChildrenPending.IsEmpty()) {
2163 // Pop last element from s->mChildrenPending
2164 const RefPtr<MemoryReportingProcess> nextChild =
2165 s->mChildrenPending.PopLastElement();
2166 // Start report (if the child is still alive).
2167 if (StartChildReport(nextChild, s)) {
2168 ++s->mNumProcessesRunning;
2169 MEMORY_REPORTING_LOG(
2170 "HandleChildReports (aGen=%u): started child report"
2171 " (%u running, %u pending)\n",
2172 aGeneration, s->mNumProcessesRunning,
2173 static_cast<unsigned>(s->mChildrenPending.Length()));
2177 // If all the child processes (if any) have reported, we can cancel
2178 // the timer (if started) and finish up. Otherwise, just return.
2179 if (s->mNumProcessesRunning == 0) {
2180 MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2181 if (s->mTimer) {
2182 s->mTimer->Cancel();
2184 FinishReporting();
2188 /* static */
2189 void nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData) {
2190 nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2191 PendingProcessesState* s = mgr->mPendingProcessesState;
2193 // Release assert because: if the pointer is null we're about to
2194 // crash regardless of DEBUG, and this way the compiler doesn't
2195 // complain about unused variables.
2196 MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2197 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2198 s->mGeneration, s->mNumProcessesRunning,
2199 static_cast<unsigned>(s->mChildrenPending.Length()));
2201 // We don't bother sending any kind of cancellation message to the child
2202 // processes that haven't reported back.
2203 mgr->FinishReporting();
2206 nsresult nsMemoryReporterManager::FinishReporting() {
2207 // Memory reporting only happens on the main thread.
2208 if (!NS_IsMainThread()) {
2209 MOZ_CRASH();
2212 MOZ_ASSERT(mPendingProcessesState);
2213 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2214 mPendingProcessesState->mGeneration,
2215 mPendingProcessesState->mNumProcessesCompleted);
2217 // Call this before deleting |mPendingProcessesState|. That way, if
2218 // |mFinishReportData| calls GetReports(), it will silently abort, as
2219 // required.
2220 nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2221 mPendingProcessesState->mFinishReportingData);
2223 delete mPendingProcessesState;
2224 mPendingProcessesState = nullptr;
2225 return rv;
2228 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2229 uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2230 uint32_t aConcurrencyLimit, nsIHandleReportCallback* aHandleReport,
2231 nsISupports* aHandleReportData,
2232 nsIFinishReportingCallback* aFinishReporting,
2233 nsISupports* aFinishReportingData, const nsAString& aDMDDumpIdent)
2234 : mGeneration(aGeneration),
2235 mAnonymize(aAnonymize),
2236 mMinimize(aMinimize),
2237 mChildrenPending(),
2238 mNumProcessesRunning(1), // reporting starts with the parent
2239 mNumProcessesCompleted(0),
2240 mConcurrencyLimit(aConcurrencyLimit),
2241 mHandleReport(aHandleReport),
2242 mHandleReportData(aHandleReportData),
2243 mFinishReporting(aFinishReporting),
2244 mFinishReportingData(aFinishReportingData),
2245 mDMDDumpIdent(aDMDDumpIdent) {}
2247 static void CrashIfRefcountIsZero(nsISupports* aObj) {
2248 // This will probably crash if the object's refcount is 0.
2249 uint32_t refcnt = NS_ADDREF(aObj);
2250 if (refcnt <= 1) {
2251 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2253 NS_RELEASE(aObj);
2256 nsresult nsMemoryReporterManager::RegisterReporterHelper(
2257 nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync) {
2258 // This method is thread-safe.
2259 mozilla::MutexAutoLock autoLock(mMutex);
2261 if (mIsRegistrationBlocked && !aForce) {
2262 return NS_ERROR_FAILURE;
2265 if (mStrongReporters->Contains(aReporter) ||
2266 mWeakReporters->Contains(aReporter)) {
2267 return NS_ERROR_FAILURE;
2270 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2271 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2272 // addref'ed and released |aReporter| before finally addref'ing it for
2273 // good, it would free aReporter! The kung fu death grip could itself be
2274 // problematic if PutEntry didn't addref |aReporter| (because then when the
2275 // death grip goes out of scope, we would delete the reporter). In debug
2276 // mode, we check that this doesn't happen.
2278 // If |aStrong| is false, we require that |aReporter| have a non-zero
2279 // refcnt.
2281 if (aStrong) {
2282 nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2283 mStrongReporters->InsertOrUpdate(aReporter, aIsAsync);
2284 CrashIfRefcountIsZero(aReporter);
2285 } else {
2286 CrashIfRefcountIsZero(aReporter);
2287 nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2288 if (jsComponent) {
2289 // We cannot allow non-native reporters (WrappedJS), since we'll be
2290 // holding onto a raw pointer, which would point to the wrapper,
2291 // and that wrapper is likely to go away as soon as this register
2292 // call finishes. This would then lead to subsequent crashes in
2293 // CollectReports().
2294 return NS_ERROR_XPC_BAD_CONVERT_JS;
2296 mWeakReporters->InsertOrUpdate(aReporter, aIsAsync);
2299 return NS_OK;
2302 NS_IMETHODIMP
2303 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter) {
2304 return RegisterReporterHelper(aReporter, /* force = */ false,
2305 /* strong = */ true,
2306 /* async = */ false);
2309 NS_IMETHODIMP
2310 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2311 nsIMemoryReporter* aReporter) {
2312 return RegisterReporterHelper(aReporter, /* force = */ false,
2313 /* strong = */ true,
2314 /* async = */ true);
2317 NS_IMETHODIMP
2318 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter) {
2319 return RegisterReporterHelper(aReporter, /* force = */ false,
2320 /* strong = */ false,
2321 /* async = */ false);
2324 NS_IMETHODIMP
2325 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2326 nsIMemoryReporter* aReporter) {
2327 return RegisterReporterHelper(aReporter, /* force = */ false,
2328 /* strong = */ false,
2329 /* async = */ true);
2332 NS_IMETHODIMP
2333 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2334 nsIMemoryReporter* aReporter) {
2335 return RegisterReporterHelper(aReporter, /* force = */ true,
2336 /* strong = */ true,
2337 /* async = */ false);
2340 NS_IMETHODIMP
2341 nsMemoryReporterManager::UnregisterStrongReporter(
2342 nsIMemoryReporter* aReporter) {
2343 // This method is thread-safe.
2344 mozilla::MutexAutoLock autoLock(mMutex);
2346 MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2348 if (mStrongReporters->Contains(aReporter)) {
2349 mStrongReporters->Remove(aReporter);
2350 return NS_OK;
2353 // We don't register new reporters when the block is in place, but we do
2354 // unregister existing reporters. This is so we don't keep holding strong
2355 // references that these reporters aren't expecting (which can keep them
2356 // alive longer than intended).
2357 if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2358 mSavedStrongReporters->Remove(aReporter);
2359 return NS_OK;
2362 return NS_ERROR_FAILURE;
2365 NS_IMETHODIMP
2366 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter) {
2367 // This method is thread-safe.
2368 mozilla::MutexAutoLock autoLock(mMutex);
2370 MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2372 if (mWeakReporters->Contains(aReporter)) {
2373 mWeakReporters->Remove(aReporter);
2374 return NS_OK;
2377 // We don't register new reporters when the block is in place, but we do
2378 // unregister existing reporters. This is so we don't keep holding weak
2379 // references that the old reporters aren't expecting (which can end up as
2380 // dangling pointers that lead to use-after-frees).
2381 if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2382 mSavedWeakReporters->Remove(aReporter);
2383 return NS_OK;
2386 return NS_ERROR_FAILURE;
2389 NS_IMETHODIMP
2390 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2391 // This method is thread-safe.
2392 mozilla::MutexAutoLock autoLock(mMutex);
2393 if (mIsRegistrationBlocked) {
2394 return NS_ERROR_FAILURE;
2396 mIsRegistrationBlocked = true;
2398 // Hide the existing reporters, saving them for later restoration.
2399 MOZ_ASSERT(!mSavedStrongReporters);
2400 MOZ_ASSERT(!mSavedWeakReporters);
2401 mSavedStrongReporters = mStrongReporters;
2402 mSavedWeakReporters = mWeakReporters;
2403 mStrongReporters = new StrongReportersTable();
2404 mWeakReporters = new WeakReportersTable();
2406 return NS_OK;
2409 NS_IMETHODIMP
2410 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2411 // This method is thread-safe.
2412 mozilla::MutexAutoLock autoLock(mMutex);
2413 if (!mIsRegistrationBlocked) {
2414 return NS_ERROR_FAILURE;
2417 // Banish the current reporters, and restore the hidden ones.
2418 delete mStrongReporters;
2419 delete mWeakReporters;
2420 mStrongReporters = mSavedStrongReporters;
2421 mWeakReporters = mSavedWeakReporters;
2422 mSavedStrongReporters = nullptr;
2423 mSavedWeakReporters = nullptr;
2425 mIsRegistrationBlocked = false;
2426 return NS_OK;
2429 NS_IMETHODIMP
2430 nsMemoryReporterManager::GetVsize(int64_t* aVsize) {
2431 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2432 return VsizeDistinguishedAmount(aVsize);
2433 #else
2434 *aVsize = 0;
2435 return NS_ERROR_NOT_AVAILABLE;
2436 #endif
2439 NS_IMETHODIMP
2440 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount) {
2441 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2442 return VsizeMaxContiguousDistinguishedAmount(aAmount);
2443 #else
2444 *aAmount = 0;
2445 return NS_ERROR_NOT_AVAILABLE;
2446 #endif
2449 NS_IMETHODIMP
2450 nsMemoryReporterManager::GetResident(int64_t* aAmount) {
2451 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2452 return ResidentDistinguishedAmount(aAmount);
2453 #else
2454 *aAmount = 0;
2455 return NS_ERROR_NOT_AVAILABLE;
2456 #endif
2459 NS_IMETHODIMP
2460 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount) {
2461 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2462 return ResidentFastDistinguishedAmount(aAmount);
2463 #else
2464 *aAmount = 0;
2465 return NS_ERROR_NOT_AVAILABLE;
2466 #endif
2469 /*static*/
2470 int64_t nsMemoryReporterManager::ResidentFast() {
2471 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2472 int64_t amount;
2473 nsresult rv = ResidentFastDistinguishedAmount(&amount);
2474 NS_ENSURE_SUCCESS(rv, 0);
2475 return amount;
2476 #else
2477 return 0;
2478 #endif
2481 NS_IMETHODIMP
2482 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount) {
2483 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2484 return ResidentPeakDistinguishedAmount(aAmount);
2485 #else
2486 *aAmount = 0;
2487 return NS_ERROR_NOT_AVAILABLE;
2488 #endif
2491 /*static*/
2492 int64_t nsMemoryReporterManager::ResidentPeak() {
2493 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2494 int64_t amount = 0;
2495 nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2496 NS_ENSURE_SUCCESS(rv, 0);
2497 return amount;
2498 #else
2499 return 0;
2500 #endif
2503 NS_IMETHODIMP
2504 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount) {
2505 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2506 return ResidentUniqueDistinguishedAmount(aAmount);
2507 #else
2508 *aAmount = 0;
2509 return NS_ERROR_NOT_AVAILABLE;
2510 #endif
2513 #ifdef XP_MACOSX
2514 /*static*/
2515 int64_t nsMemoryReporterManager::PhysicalFootprint(mach_port_t aPort) {
2516 int64_t amount = 0;
2517 nsresult rv = PhysicalFootprintAmount(&amount, aPort);
2518 NS_ENSURE_SUCCESS(rv, 0);
2519 return amount;
2521 #endif
2523 typedef
2524 #ifdef XP_WIN
2525 HANDLE
2526 #elif XP_MACOSX
2527 mach_port_t
2528 #elif XP_LINUX
2529 pid_t
2530 #else
2531 int /*dummy type */
2532 #endif
2533 ResidentUniqueArg;
2535 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2537 /*static*/
2538 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess) {
2539 int64_t amount = 0;
2540 nsresult rv = ResidentUniqueDistinguishedAmount(&amount, aProcess);
2541 NS_ENSURE_SUCCESS(rv, 0);
2542 return amount;
2545 #else
2547 /*static*/
2548 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg) {
2549 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2550 int64_t amount = 0;
2551 nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2552 NS_ENSURE_SUCCESS(rv, 0);
2553 return amount;
2554 # else
2555 return 0;
2556 # endif
2559 #endif // XP_{WIN, MACOSX, LINUX, *}
2561 NS_IMETHODIMP
2562 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount) {
2563 #ifdef HAVE_JEMALLOC_STATS
2564 jemalloc_stats_t stats;
2565 jemalloc_stats(&stats);
2566 *aAmount = stats.allocated;
2567 return NS_OK;
2568 #else
2569 *aAmount = 0;
2570 return NS_ERROR_NOT_AVAILABLE;
2571 #endif
2574 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2575 NS_IMETHODIMP
2576 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount) {
2577 #ifdef HAVE_JEMALLOC_STATS
2578 jemalloc_stats_t stats;
2579 jemalloc_stats(&stats);
2580 *aAmount = HeapOverheadFraction(&stats);
2581 return NS_OK;
2582 #else
2583 *aAmount = 0;
2584 return NS_ERROR_NOT_AVAILABLE;
2585 #endif
2588 [[nodiscard]] static nsresult GetInfallibleAmount(InfallibleAmountFn aAmountFn,
2589 int64_t* aAmount) {
2590 if (aAmountFn) {
2591 *aAmount = aAmountFn();
2592 return NS_OK;
2594 *aAmount = 0;
2595 return NS_ERROR_NOT_AVAILABLE;
2598 NS_IMETHODIMP
2599 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount) {
2600 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2603 NS_IMETHODIMP
2604 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount) {
2605 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2608 NS_IMETHODIMP
2609 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount) {
2610 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsSystem,
2611 aAmount);
2614 NS_IMETHODIMP
2615 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount) {
2616 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsUser,
2617 aAmount);
2620 NS_IMETHODIMP
2621 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount) {
2622 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem, aAmount);
2625 NS_IMETHODIMP
2626 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount) {
2627 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser, aAmount);
2630 NS_IMETHODIMP
2631 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount) {
2632 return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2633 aAmount);
2636 NS_IMETHODIMP
2637 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount) {
2638 return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2641 NS_IMETHODIMP
2642 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount) {
2643 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2646 NS_IMETHODIMP
2647 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount) {
2648 return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2651 NS_IMETHODIMP
2652 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount) {
2653 #ifdef HAVE_PAGE_FAULT_REPORTERS
2654 return PageFaultsHardDistinguishedAmount(aAmount);
2655 #else
2656 *aAmount = 0;
2657 return NS_ERROR_NOT_AVAILABLE;
2658 #endif
2661 NS_IMETHODIMP
2662 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas) {
2663 void* p = malloc(16);
2664 if (!p) {
2665 return NS_ERROR_OUT_OF_MEMORY;
2667 size_t usable = moz_malloc_usable_size(p);
2668 free(p);
2669 *aHas = !!(usable > 0);
2670 return NS_OK;
2673 NS_IMETHODIMP
2674 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled) {
2675 #ifdef MOZ_DMD
2676 *aIsEnabled = true;
2677 #else
2678 *aIsEnabled = false;
2679 #endif
2680 return NS_OK;
2683 NS_IMETHODIMP
2684 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning) {
2685 #ifdef MOZ_DMD
2686 *aIsRunning = dmd::IsRunning();
2687 #else
2688 *aIsRunning = false;
2689 #endif
2690 return NS_OK;
2693 namespace {
2696 * This runnable lets us implement
2697 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2698 * notification, spin the event loop, and repeat this process a few times.
2700 * When this sequence finishes, we invoke the callback function passed to the
2701 * runnable's constructor.
2703 class MinimizeMemoryUsageRunnable : public Runnable {
2704 public:
2705 explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2706 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2707 mCallback(aCallback),
2708 mRemainingIters(sNumIters) {}
2710 NS_IMETHOD Run() override {
2711 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2712 if (!os) {
2713 return NS_ERROR_FAILURE;
2716 if (mRemainingIters == 0) {
2717 os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2718 u"MinimizeMemoryUsageRunnable");
2719 if (mCallback) {
2720 mCallback->Run();
2722 return NS_OK;
2725 os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2726 mRemainingIters--;
2727 NS_DispatchToMainThread(this);
2729 return NS_OK;
2732 private:
2733 // Send sNumIters heap-minimize notifications, spinning the event
2734 // loop after each notification (see bug 610166 comment 12 for an
2735 // explanation), because one notification doesn't cut it.
2736 static const uint32_t sNumIters = 3;
2738 nsCOMPtr<nsIRunnable> mCallback;
2739 uint32_t mRemainingIters;
2742 } // namespace
2744 NS_IMETHODIMP
2745 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback) {
2746 RefPtr<MinimizeMemoryUsageRunnable> runnable =
2747 new MinimizeMemoryUsageRunnable(aCallback);
2749 return NS_DispatchToMainThread(runnable);
2752 NS_IMETHODIMP
2753 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2754 int64_t* aJSObjectsSize,
2755 int64_t* aJSStringsSize,
2756 int64_t* aJSOtherSize, int64_t* aDomSize,
2757 int64_t* aStyleSize, int64_t* aOtherSize,
2758 int64_t* aTotalSize, double* aJSMilliseconds,
2759 double* aNonJSMilliseconds) {
2760 nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2761 auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2762 if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2763 return NS_ERROR_FAILURE;
2766 TimeStamp t1 = TimeStamp::Now();
2768 // Measure JS memory consumption (and possibly some non-JS consumption, via
2769 // |jsPrivateSize|).
2770 size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2771 nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(), &jsObjectsSize,
2772 &jsStringsSize, &jsPrivateSize, &jsOtherSize);
2773 if (NS_WARN_IF(NS_FAILED(rv))) {
2774 return rv;
2777 TimeStamp t2 = TimeStamp::Now();
2779 // Measure non-JS memory consumption.
2780 size_t domSize, styleSize, otherSize;
2781 rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2782 if (NS_WARN_IF(NS_FAILED(rv))) {
2783 return rv;
2786 TimeStamp t3 = TimeStamp::Now();
2788 *aTotalSize = 0;
2789 #define DO(aN, n) \
2791 *aN = (n); \
2792 *aTotalSize += (n); \
2794 DO(aJSObjectsSize, jsObjectsSize);
2795 DO(aJSStringsSize, jsStringsSize);
2796 DO(aJSOtherSize, jsOtherSize);
2797 DO(aDomSize, jsPrivateSize + domSize);
2798 DO(aStyleSize, styleSize);
2799 DO(aOtherSize, otherSize);
2800 #undef DO
2802 *aJSMilliseconds = (t2 - t1).ToMilliseconds();
2803 *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2805 return NS_OK;
2808 namespace mozilla {
2810 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2811 RefPtr<nsMemoryReporterManager> mgr = \
2812 nsMemoryReporterManager::GetOrCreate(); \
2813 if (!mgr) { \
2814 return NS_ERROR_FAILURE; \
2817 nsresult RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2818 // Hold a strong reference to the argument to make sure it gets released if
2819 // we return early below.
2820 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2821 GET_MEMORY_REPORTER_MANAGER(mgr)
2822 return mgr->RegisterStrongReporter(reporter);
2825 nsresult RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2826 // Hold a strong reference to the argument to make sure it gets released if
2827 // we return early below.
2828 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2829 GET_MEMORY_REPORTER_MANAGER(mgr)
2830 return mgr->RegisterStrongAsyncReporter(reporter);
2833 nsresult RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2834 GET_MEMORY_REPORTER_MANAGER(mgr)
2835 return mgr->RegisterWeakReporter(aReporter);
2838 nsresult RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2839 GET_MEMORY_REPORTER_MANAGER(mgr)
2840 return mgr->RegisterWeakAsyncReporter(aReporter);
2843 nsresult UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2844 GET_MEMORY_REPORTER_MANAGER(mgr)
2845 return mgr->UnregisterStrongReporter(aReporter);
2848 nsresult UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2849 GET_MEMORY_REPORTER_MANAGER(mgr)
2850 return mgr->UnregisterWeakReporter(aReporter);
2853 // Macro for generating functions that register distinguished amount functions
2854 // with the memory reporter manager.
2855 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2856 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2857 GET_MEMORY_REPORTER_MANAGER(mgr) \
2858 mgr->mAmountFns.m##name = aAmountFn; \
2859 return NS_OK; \
2862 // Macro for generating functions that unregister distinguished amount
2863 // functions with the memory reporter manager.
2864 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2865 nsresult Unregister##name##DistinguishedAmount() { \
2866 GET_MEMORY_REPORTER_MANAGER(mgr) \
2867 mgr->mAmountFns.m##name = nullptr; \
2868 return NS_OK; \
2871 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2872 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2873 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible,
2874 JSMainRuntimeCompartmentsSystem)
2875 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeCompartmentsUser)
2876 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2877 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2879 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2880 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2882 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2883 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2885 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2887 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2889 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2890 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2892 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2893 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2894 GET_MEMORY_REPORTER_MANAGER(mgr) \
2895 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2896 return NS_OK; \
2899 DEFINE_REGISTER_SIZE_OF_TAB(JS);
2900 DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2902 #undef DEFINE_REGISTER_SIZE_OF_TAB
2904 #undef GET_MEMORY_REPORTER_MANAGER
2906 } // namespace mozilla