Bug 1816170 - Disable perftest-on-autoland cron. r=aglavic
[gecko.git] / xpcom / base / nsMemoryReporterManager.cpp
blob58b7bce261de206ed41dc245995ef7acea4352b1
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
10 #include "nsCOMPtr.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
15 #include "nsITimer.h"
16 #include "nsThreadUtils.h"
17 #include "nsPIDOMWindow.h"
18 #include "nsIObserverService.h"
19 #include "nsIOService.h"
20 #include "nsIGlobalObject.h"
21 #include "nsIXPConnect.h"
22 #ifdef MOZ_GECKO_PROFILER
23 # include "GeckoProfilerReporter.h"
24 #endif
25 #if defined(XP_UNIX) || defined(MOZ_DMD)
26 # include "nsMemoryInfoDumper.h"
27 #endif
28 #include "nsNetCID.h"
29 #include "nsThread.h"
30 #include "VRProcessManager.h"
31 #include "mozilla/Attributes.h"
32 #include "mozilla/MemoryReportingProcess.h"
33 #include "mozilla/PodOperations.h"
34 #include "mozilla/Preferences.h"
35 #include "mozilla/RDDProcessManager.h"
36 #include "mozilla/ResultExtensions.h"
37 #include "mozilla/Services.h"
38 #include "mozilla/Telemetry.h"
39 #include "mozilla/UniquePtrExtensions.h"
40 #include "mozilla/dom/MemoryReportTypes.h"
41 #include "mozilla/dom/ContentParent.h"
42 #include "mozilla/gfx/GPUProcessManager.h"
43 #include "mozilla/ipc/UtilityProcessManager.h"
44 #include "mozilla/ipc/FileDescriptorUtils.h"
46 #ifdef XP_WIN
47 # include "mozilla/MemoryInfo.h"
49 # include <process.h>
50 # ifndef getpid
51 # define getpid _getpid
52 # endif
53 #else
54 # include <unistd.h>
55 #endif
57 using namespace mozilla;
58 using namespace mozilla::ipc;
59 using namespace dom;
61 #if defined(MOZ_MEMORY)
62 # define HAVE_JEMALLOC_STATS 1
63 # include "mozmemory.h"
64 #endif // MOZ_MEMORY
66 #if defined(XP_LINUX)
68 # include "mozilla/MemoryMapping.h"
70 # include <malloc.h>
71 # include <string.h>
72 # include <stdlib.h>
74 [[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
75 // There are more than two fields, but we're only interested in the first
76 // two.
77 static const int MAX_FIELD = 2;
78 size_t fields[MAX_FIELD];
79 MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
80 FILE* f = fopen("/proc/self/statm", "r");
81 if (f) {
82 int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
83 fclose(f);
84 if (nread == MAX_FIELD) {
85 *aN = fields[aField] * getpagesize();
86 return NS_OK;
89 return NS_ERROR_FAILURE;
92 [[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
93 // You might be tempted to calculate USS by subtracting the "shared" value
94 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
95 // statm's "shared" value actually counts pages backed by files, which has
96 // little to do with whether the pages are actually shared. /proc/self/smaps
97 // on the other hand appears to give us the correct information.
99 nsTArray<MemoryMapping> mappings(1024);
100 MOZ_TRY(GetMemoryMappings(mappings, aPid));
102 int64_t amount = 0;
103 for (auto& mapping : mappings) {
104 amount += mapping.Private_Clean();
105 amount += mapping.Private_Dirty();
107 *aN = amount;
108 return NS_OK;
111 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
112 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
113 return GetProcSelfStatmField(0, aN);
116 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
117 return GetProcSelfStatmField(1, aN);
120 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
121 return ResidentDistinguishedAmount(aN);
124 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
125 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
126 int64_t* aN, pid_t aPid = 0) {
127 return GetProcSelfSmapsPrivate(aN, aPid);
130 # ifdef HAVE_MALLINFO
131 # define HAVE_SYSTEM_HEAP_REPORTER 1
132 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
133 struct mallinfo info = mallinfo();
135 // The documentation in the glibc man page makes it sound like |uordblks|
136 // would suffice, but that only gets the small allocations that are put in
137 // the brk heap. We need |hblkhd| as well to get the larger allocations
138 // that are mmapped.
140 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
141 // unreliable if memory usage gets high. However, the system heap size on
142 // Linux should usually be zero (so long as jemalloc is enabled) so that
143 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
144 // adding them to provide a small amount of extra overflow protection.
145 *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
146 return NS_OK;
148 # endif
150 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
151 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
153 # include <sys/param.h>
154 # include <sys/sysctl.h>
155 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
156 defined(__FreeBSD_kernel__)
157 # include <sys/user.h>
158 # endif
160 # include <unistd.h>
162 # if defined(__NetBSD__)
163 # undef KERN_PROC
164 # define KERN_PROC KERN_PROC2
165 # define KINFO_PROC struct kinfo_proc2
166 # else
167 # define KINFO_PROC struct kinfo_proc
168 # endif
170 # if defined(__DragonFly__)
171 # define KP_SIZE(kp) (kp.kp_vm_map_size)
172 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
173 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
174 # define KP_SIZE(kp) (kp.ki_size)
175 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
176 # elif defined(__NetBSD__)
177 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
178 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
179 # elif defined(__OpenBSD__)
180 # define KP_SIZE(kp) \
181 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
182 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
183 # endif
185 [[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
186 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
187 static LazyLogModule sPledgeLog("SandboxPledge");
188 MOZ_LOG(sPledgeLog, LogLevel::Debug,
189 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
190 return NS_ERROR_FAILURE;
191 # endif
192 int mib[] = {
193 CTL_KERN,
194 KERN_PROC,
195 KERN_PROC_PID,
196 getpid(),
197 # if defined(__NetBSD__) || defined(__OpenBSD__)
198 sizeof(KINFO_PROC),
200 # endif
202 u_int miblen = sizeof(mib) / sizeof(mib[0]);
203 size_t size = sizeof(KINFO_PROC);
204 if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
205 return NS_ERROR_FAILURE;
207 return NS_OK;
210 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
211 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
212 KINFO_PROC proc;
213 nsresult rv = GetKinfoProcSelf(&proc);
214 if (NS_SUCCEEDED(rv)) {
215 *aN = KP_SIZE(proc);
217 return rv;
220 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
221 KINFO_PROC proc;
222 nsresult rv = GetKinfoProcSelf(&proc);
223 if (NS_SUCCEEDED(rv)) {
224 *aN = KP_RSS(proc);
226 return rv;
229 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
230 return ResidentDistinguishedAmount(aN);
233 # ifdef __FreeBSD__
234 # include <libutil.h>
235 # include <algorithm>
237 [[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
238 uint64_t* aMaxreg) {
239 int cnt;
240 struct kinfo_vmentry* vmmap;
241 struct kinfo_vmentry* kve;
242 if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
243 return NS_ERROR_FAILURE;
245 if (aPrss) {
246 *aPrss = 0;
248 if (aMaxreg) {
249 *aMaxreg = 0;
252 for (int i = 0; i < cnt; i++) {
253 kve = &vmmap[i];
254 if (aPrss) {
255 *aPrss += kve->kve_private_resident;
257 if (aMaxreg) {
258 *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
262 free(vmmap);
263 return NS_OK;
266 # define HAVE_PRIVATE_REPORTER 1
267 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
268 int64_t priv;
269 nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
270 NS_ENSURE_SUCCESS(rv, rv);
271 *aN = priv * getpagesize();
272 return NS_OK;
275 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
276 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
277 int64_t* aN) {
278 uint64_t biggestRegion;
279 nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
280 if (NS_SUCCEEDED(rv)) {
281 *aN = biggestRegion;
283 return NS_OK;
285 # endif // FreeBSD
287 #elif defined(SOLARIS)
289 # include <procfs.h>
290 # include <fcntl.h>
291 # include <unistd.h>
293 static void XMappingIter(int64_t& aVsize, int64_t& aResident,
294 int64_t& aShared) {
295 aVsize = -1;
296 aResident = -1;
297 aShared = -1;
298 int mapfd = open("/proc/self/xmap", O_RDONLY);
299 struct stat st;
300 prxmap_t* prmapp = nullptr;
301 if (mapfd >= 0) {
302 if (!fstat(mapfd, &st)) {
303 int nmap = st.st_size / sizeof(prxmap_t);
304 while (1) {
305 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
306 // prior to the release of Solaris 11.
307 // Here is a workaround for it.
308 nmap *= 2;
309 prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
310 if (!prmapp) {
311 // out of memory
312 break;
314 int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
315 if (n < 0) {
316 break;
318 if (nmap >= n / sizeof(prxmap_t)) {
319 aVsize = 0;
320 aResident = 0;
321 aShared = 0;
322 for (int i = 0; i < n / sizeof(prxmap_t); i++) {
323 aVsize += prmapp[i].pr_size;
324 aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
325 if (prmapp[i].pr_mflags & MA_SHARED) {
326 aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
329 break;
331 free(prmapp);
333 free(prmapp);
335 close(mapfd);
339 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
340 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
341 int64_t vsize, resident, shared;
342 XMappingIter(vsize, resident, shared);
343 if (vsize == -1) {
344 return NS_ERROR_FAILURE;
346 *aN = vsize;
347 return NS_OK;
350 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
351 int64_t vsize, resident, shared;
352 XMappingIter(vsize, resident, shared);
353 if (resident == -1) {
354 return NS_ERROR_FAILURE;
356 *aN = resident;
357 return NS_OK;
360 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
361 return ResidentDistinguishedAmount(aN);
364 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
365 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
366 int64_t vsize, resident, shared;
367 XMappingIter(vsize, resident, shared);
368 if (resident == -1) {
369 return NS_ERROR_FAILURE;
371 *aN = resident - shared;
372 return NS_OK;
375 #elif defined(XP_MACOSX)
377 # include <mach/mach_init.h>
378 # include <mach/mach_vm.h>
379 # include <mach/shared_region.h>
380 # include <mach/task.h>
381 # include <sys/sysctl.h>
383 [[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
384 mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
385 kern_return_t kr =
386 task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
387 return kr == KERN_SUCCESS;
390 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
391 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
392 // it, so we might as well too.
393 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
394 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
395 task_basic_info ti;
396 if (!GetTaskBasicInfo(&ti)) {
397 return NS_ERROR_FAILURE;
399 *aN = ti.virtual_size;
400 return NS_OK;
403 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
404 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
405 // an accurate result. The OS will take away MADV_FREE'd pages when there's
406 // memory pressure, so ideally, they shouldn't count against our RSS.
408 // Purging these pages can take a long time for some users (see bug 789975),
409 // so we provide the option to get the RSS without purging first.
410 [[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
411 bool aDoPurge) {
412 # ifdef HAVE_JEMALLOC_STATS
413 if (aDoPurge) {
414 Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
415 jemalloc_purge_freed_pages();
417 # endif
419 task_basic_info ti;
420 if (!GetTaskBasicInfo(&ti)) {
421 return NS_ERROR_FAILURE;
423 *aN = ti.resident_size;
424 return NS_OK;
427 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
428 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
431 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
432 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
435 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
437 static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
438 mach_vm_address_t base;
439 mach_vm_address_t size;
441 switch (aType) {
442 case CPU_TYPE_ARM:
443 base = SHARED_REGION_BASE_ARM;
444 size = SHARED_REGION_SIZE_ARM;
445 break;
446 case CPU_TYPE_ARM64:
447 base = SHARED_REGION_BASE_ARM64;
448 size = SHARED_REGION_SIZE_ARM64;
449 break;
450 case CPU_TYPE_I386:
451 base = SHARED_REGION_BASE_I386;
452 size = SHARED_REGION_SIZE_I386;
453 break;
454 case CPU_TYPE_X86_64:
455 base = SHARED_REGION_BASE_X86_64;
456 size = SHARED_REGION_SIZE_X86_64;
457 break;
458 default:
459 return false;
462 return base <= aAddr && aAddr < (base + size);
465 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
466 int64_t* aN, mach_port_t aPort = 0) {
467 if (!aN) {
468 return NS_ERROR_FAILURE;
471 cpu_type_t cpu_type;
472 size_t len = sizeof(cpu_type);
473 if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
474 return NS_ERROR_FAILURE;
477 // Roughly based on libtop_update_vm_regions in
478 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
479 size_t privatePages = 0;
480 mach_vm_size_t topSize = 0;
481 for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += topSize) {
482 vm_region_top_info_data_t topInfo;
483 mach_msg_type_number_t topInfoCount = VM_REGION_TOP_INFO_COUNT;
484 mach_port_t topObjectName;
486 kern_return_t kr = mach_vm_region(
487 aPort ? aPort : mach_task_self(), &addr, &topSize, VM_REGION_TOP_INFO,
488 reinterpret_cast<vm_region_info_t>(&topInfo), &topInfoCount,
489 &topObjectName);
490 if (kr == KERN_INVALID_ADDRESS) {
491 // Done iterating VM regions.
492 break;
493 } else if (kr != KERN_SUCCESS) {
494 return NS_ERROR_FAILURE;
497 if (InSharedRegion(addr, cpu_type) && topInfo.share_mode != SM_PRIVATE) {
498 continue;
501 switch (topInfo.share_mode) {
502 case SM_LARGE_PAGE:
503 // NB: Large pages are not shareable and always resident.
504 case SM_PRIVATE:
505 privatePages += topInfo.private_pages_resident;
506 privatePages += topInfo.shared_pages_resident;
507 break;
508 case SM_COW:
509 privatePages += topInfo.private_pages_resident;
510 if (topInfo.ref_count == 1) {
511 // Treat copy-on-write pages as private if they only have one
512 // reference.
513 privatePages += topInfo.shared_pages_resident;
515 break;
516 case SM_SHARED: {
517 // Using mprotect() or similar to protect a page in the middle of a
518 // mapping can create aliased mappings. They look like shared mappings
519 // to the VM_REGION_TOP_INFO interface, so re-check with
520 // VM_REGION_EXTENDED_INFO.
522 mach_vm_size_t exSize = 0;
523 vm_region_extended_info_data_t exInfo;
524 mach_msg_type_number_t exInfoCount = VM_REGION_EXTENDED_INFO_COUNT;
525 mach_port_t exObjectName;
526 kr = mach_vm_region(aPort ? aPort : mach_task_self(), &addr, &exSize,
527 VM_REGION_EXTENDED_INFO,
528 reinterpret_cast<vm_region_info_t>(&exInfo),
529 &exInfoCount, &exObjectName);
530 if (kr == KERN_INVALID_ADDRESS) {
531 // Done iterating VM regions.
532 break;
533 } else if (kr != KERN_SUCCESS) {
534 return NS_ERROR_FAILURE;
537 if (exInfo.share_mode == SM_PRIVATE_ALIASED) {
538 privatePages += exInfo.pages_resident;
540 break;
542 default:
543 break;
547 vm_size_t pageSize;
548 if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
549 KERN_SUCCESS) {
550 pageSize = PAGE_SIZE;
553 *aN = privatePages * pageSize;
554 return NS_OK;
557 [[nodiscard]] static nsresult PhysicalFootprintAmount(int64_t* aN,
558 mach_port_t aPort = 0) {
559 MOZ_ASSERT(aN);
561 // The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data
562 // matches the value in the 'Memory' column of the Activity Monitor.
563 task_vm_info_data_t task_vm_info;
564 mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
565 kern_return_t kr = task_info(aPort ? aPort : mach_task_self(), TASK_VM_INFO,
566 (task_info_t)&task_vm_info, &count);
567 if (kr != KERN_SUCCESS) {
568 return NS_ERROR_FAILURE;
571 *aN = task_vm_info.phys_footprint;
572 return NS_OK;
575 #elif defined(XP_WIN)
577 # include <windows.h>
578 # include <psapi.h>
579 # include <algorithm>
581 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
582 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
583 MEMORYSTATUSEX s;
584 s.dwLength = sizeof(s);
586 if (!GlobalMemoryStatusEx(&s)) {
587 return NS_ERROR_FAILURE;
590 *aN = s.ullTotalVirtual - s.ullAvailVirtual;
591 return NS_OK;
594 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
595 PROCESS_MEMORY_COUNTERS pmc;
596 pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
598 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
599 return NS_ERROR_FAILURE;
602 *aN = pmc.WorkingSetSize;
603 return NS_OK;
606 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
607 return ResidentDistinguishedAmount(aN);
610 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
612 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
613 int64_t* aN, HANDLE aProcess = nullptr) {
614 // Determine how many entries we need.
615 PSAPI_WORKING_SET_INFORMATION tmp;
616 DWORD tmpSize = sizeof(tmp);
617 memset(&tmp, 0, tmpSize);
619 HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
620 QueryWorkingSet(proc, &tmp, tmpSize);
622 // Fudge the size in case new entries are added between calls.
623 size_t entries = tmp.NumberOfEntries * 2;
625 if (!entries) {
626 return NS_ERROR_FAILURE;
629 DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
630 UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
631 static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
633 if (!infoArray) {
634 return NS_ERROR_FAILURE;
637 if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
638 return NS_ERROR_FAILURE;
641 entries = static_cast<size_t>(infoArray->NumberOfEntries);
642 size_t privatePages = 0;
643 for (size_t i = 0; i < entries; i++) {
644 // Count shared pages that only one process is using as private.
645 if (!infoArray->WorkingSetInfo[i].Shared ||
646 infoArray->WorkingSetInfo[i].ShareCount <= 1) {
647 privatePages++;
651 SYSTEM_INFO si;
652 GetSystemInfo(&si);
654 *aN = privatePages * si.dwPageSize;
655 return NS_OK;
658 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
659 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
660 int64_t* aN) {
661 SIZE_T biggestRegion = 0;
662 MEMORY_BASIC_INFORMATION vmemInfo = {0};
663 for (size_t currentAddress = 0;;) {
664 if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
665 // Something went wrong, just return whatever we've got already.
666 break;
669 if (vmemInfo.State == MEM_FREE) {
670 biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
673 SIZE_T lastAddress = currentAddress;
674 currentAddress += vmemInfo.RegionSize;
676 // If we overflow, we've examined all of the address space.
677 if (currentAddress < lastAddress) {
678 break;
682 *aN = biggestRegion;
683 return NS_OK;
686 # define HAVE_PRIVATE_REPORTER 1
687 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
688 PROCESS_MEMORY_COUNTERS_EX pmcex;
689 pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
691 if (!GetProcessMemoryInfo(GetCurrentProcess(),
692 (PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
693 return NS_ERROR_FAILURE;
696 *aN = pmcex.PrivateUsage;
697 return NS_OK;
700 # define HAVE_SYSTEM_HEAP_REPORTER 1
701 // Windows can have multiple separate heaps, but we should not touch non-default
702 // heaps because they may be destroyed at anytime while we hold a handle. So we
703 // count only the default heap.
704 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
705 HANDLE heap = GetProcessHeap();
707 NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
709 int64_t heapSize = 0;
710 PROCESS_HEAP_ENTRY entry;
711 entry.lpData = nullptr;
712 while (HeapWalk(heap, &entry)) {
713 // We don't count entry.cbOverhead, because we just want to measure the
714 // space available to the program.
715 if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
716 heapSize += entry.cbData;
720 // Check this result only after unlocking the heap, so that we don't leave
721 // the heap locked if there was an error.
722 DWORD lastError = GetLastError();
724 // I have no idea how things would proceed if unlocking this heap failed...
725 NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
727 NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
729 *aSizeOut = heapSize;
730 return NS_OK;
733 struct SegmentKind {
734 DWORD mState;
735 DWORD mType;
736 DWORD mProtect;
737 int mIsStack;
740 struct SegmentEntry : public PLDHashEntryHdr {
741 static PLDHashNumber HashKey(const void* aKey) {
742 auto kind = static_cast<const SegmentKind*>(aKey);
743 return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
744 kind->mIsStack);
747 static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
748 auto kind = static_cast<const SegmentKind*>(aKey);
749 auto entry = static_cast<const SegmentEntry*>(aEntry);
750 return kind->mState == entry->mKind.mState &&
751 kind->mType == entry->mKind.mType &&
752 kind->mProtect == entry->mKind.mProtect &&
753 kind->mIsStack == entry->mKind.mIsStack;
756 static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
757 auto kind = static_cast<const SegmentKind*>(aKey);
758 auto entry = static_cast<SegmentEntry*>(aEntry);
759 entry->mKind = *kind;
760 entry->mCount = 0;
761 entry->mSize = 0;
764 static const PLDHashTableOps Ops;
766 SegmentKind mKind; // The segment kind.
767 uint32_t mCount; // The number of segments of this kind.
768 size_t mSize; // The combined size of segments of this kind.
771 /* static */ const PLDHashTableOps SegmentEntry::Ops = {
772 SegmentEntry::HashKey, SegmentEntry::MatchEntry,
773 PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
774 SegmentEntry::InitEntry};
776 class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
777 ~WindowsAddressSpaceReporter() {}
779 public:
780 NS_DECL_ISUPPORTS
782 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
783 nsISupports* aData, bool aAnonymize) override {
784 // First iterate over all the segments and record how many of each kind
785 // there were and their aggregate sizes. We use a hash table for this
786 // because there are a couple of dozen different kinds possible.
788 PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
789 MEMORY_BASIC_INFORMATION info = {0};
790 bool isPrevSegStackGuard = false;
791 for (size_t currentAddress = 0;;) {
792 if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
793 // Something went wrong, just return whatever we've got already.
794 break;
797 size_t size = info.RegionSize;
799 // Note that |type| and |protect| are ignored in some cases.
800 DWORD state = info.State;
801 DWORD type =
802 (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
803 DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
804 bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
805 type == MEM_PRIVATE && protect == PAGE_READWRITE;
807 SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
808 auto entry =
809 static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
810 if (entry) {
811 entry->mCount += 1;
812 entry->mSize += size;
815 isPrevSegStackGuard = info.State == MEM_COMMIT &&
816 info.Type == MEM_PRIVATE &&
817 info.Protect == (PAGE_READWRITE | PAGE_GUARD);
819 size_t lastAddress = currentAddress;
820 currentAddress += size;
822 // If we overflow, we've examined all of the address space.
823 if (currentAddress < lastAddress) {
824 break;
828 // Then iterate over the hash table and report the details for each segment
829 // kind.
831 for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
832 // For each range of pages, we consider one or more of its State, Type
833 // and Protect values. These are documented at
834 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
835 // (for State and Type) and
836 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
837 // (for Protect).
839 // Not all State values have accompanying Type and Protection values.
840 bool doType = false;
841 bool doProtect = false;
843 auto entry = static_cast<const SegmentEntry*>(iter.Get());
845 nsCString path("address-space");
847 switch (entry->mKind.mState) {
848 case MEM_FREE:
849 path.AppendLiteral("/free");
850 break;
852 case MEM_RESERVE:
853 path.AppendLiteral("/reserved");
854 doType = true;
855 break;
857 case MEM_COMMIT:
858 path.AppendLiteral("/commit");
859 doType = true;
860 doProtect = true;
861 break;
863 default:
864 // Should be impossible, but handle it just in case.
865 path.AppendLiteral("/???");
866 break;
869 if (doType) {
870 switch (entry->mKind.mType) {
871 case MEM_IMAGE:
872 path.AppendLiteral("/image");
873 break;
875 case MEM_MAPPED:
876 path.AppendLiteral("/mapped");
877 break;
879 case MEM_PRIVATE:
880 path.AppendLiteral("/private");
881 break;
883 default:
884 // Should be impossible, but handle it just in case.
885 path.AppendLiteral("/???");
886 break;
890 if (doProtect) {
891 DWORD protect = entry->mKind.mProtect;
892 // Basic attributes. Exactly one of these should be set.
893 if (protect & PAGE_EXECUTE) {
894 path.AppendLiteral("/execute");
896 if (protect & PAGE_EXECUTE_READ) {
897 path.AppendLiteral("/execute-read");
899 if (protect & PAGE_EXECUTE_READWRITE) {
900 path.AppendLiteral("/execute-readwrite");
902 if (protect & PAGE_EXECUTE_WRITECOPY) {
903 path.AppendLiteral("/execute-writecopy");
905 if (protect & PAGE_NOACCESS) {
906 path.AppendLiteral("/noaccess");
908 if (protect & PAGE_READONLY) {
909 path.AppendLiteral("/readonly");
911 if (protect & PAGE_READWRITE) {
912 path.AppendLiteral("/readwrite");
914 if (protect & PAGE_WRITECOPY) {
915 path.AppendLiteral("/writecopy");
918 // Modifiers. At most one of these should be set.
919 if (protect & PAGE_GUARD) {
920 path.AppendLiteral("+guard");
922 if (protect & PAGE_NOCACHE) {
923 path.AppendLiteral("+nocache");
925 if (protect & PAGE_WRITECOMBINE) {
926 path.AppendLiteral("+writecombine");
929 // Annotate likely stack segments, too.
930 if (entry->mKind.mIsStack) {
931 path.AppendLiteral("+stack");
935 // Append the segment count.
936 path.AppendPrintf("(segments=%u)", entry->mCount);
938 aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
939 entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
940 aData);
943 return NS_OK;
946 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
948 #endif // XP_<PLATFORM>
950 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
951 class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
952 ~VsizeMaxContiguousReporter() {}
954 public:
955 NS_DECL_ISUPPORTS
957 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
958 nsISupports* aData, bool aAnonymize) override {
959 int64_t amount;
960 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
961 MOZ_COLLECT_REPORT(
962 "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
963 "Size of the maximum contiguous block of available virtual memory.");
965 return NS_OK;
968 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
969 #endif
971 #ifdef HAVE_PRIVATE_REPORTER
972 class PrivateReporter final : public nsIMemoryReporter {
973 ~PrivateReporter() {}
975 public:
976 NS_DECL_ISUPPORTS
978 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
979 nsISupports* aData, bool aAnonymize) override {
980 int64_t amount;
981 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
982 // clang-format off
983 MOZ_COLLECT_REPORT(
984 "private", KIND_OTHER, UNITS_BYTES, amount,
985 "Memory that cannot be shared with other processes, including memory that is "
986 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
987 "pages that have been written to.");
988 // clang-format on
990 return NS_OK;
993 NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
994 #endif
996 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
997 class VsizeReporter final : public nsIMemoryReporter {
998 ~VsizeReporter() = default;
1000 public:
1001 NS_DECL_ISUPPORTS
1003 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1004 nsISupports* aData, bool aAnonymize) override {
1005 int64_t amount;
1006 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
1007 // clang-format off
1008 MOZ_COLLECT_REPORT(
1009 "vsize", KIND_OTHER, UNITS_BYTES, amount,
1010 "Memory mapped by the process, including code and data segments, the heap, "
1011 "thread stacks, memory explicitly mapped by the process via mmap and similar "
1012 "operations, and memory shared with other processes. This is the vsize figure "
1013 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
1014 "processes share huge amounts of memory with one another. But even on other "
1015 "operating systems, 'resident' is a much better measure of the memory "
1016 "resources used by the process.");
1017 // clang-format on
1019 return NS_OK;
1022 NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
1024 class ResidentReporter final : public nsIMemoryReporter {
1025 ~ResidentReporter() = default;
1027 public:
1028 NS_DECL_ISUPPORTS
1030 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1031 nsISupports* aData, bool aAnonymize) override {
1032 int64_t amount;
1033 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
1034 // clang-format off
1035 MOZ_COLLECT_REPORT(
1036 "resident", KIND_OTHER, UNITS_BYTES, amount,
1037 "Memory mapped by the process that is present in physical memory, also known "
1038 "as the resident set size (RSS). This is the best single figure to use when "
1039 "considering the memory resources used by the process, but it depends both on "
1040 "other processes being run and details of the OS kernel and so is best used "
1041 "for comparing the memory usage of a single process at different points in "
1042 "time.");
1043 // clang-format on
1045 return NS_OK;
1048 NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
1050 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1052 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1053 class ResidentUniqueReporter final : public nsIMemoryReporter {
1054 ~ResidentUniqueReporter() = default;
1056 public:
1057 NS_DECL_ISUPPORTS
1059 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1060 nsISupports* aData, bool aAnonymize) override {
1061 int64_t amount = 0;
1062 // clang-format off
1063 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1064 MOZ_COLLECT_REPORT(
1065 "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1066 "Memory mapped by the process that is present in physical memory and not "
1067 "shared with any other processes. This is also known as the process's unique "
1068 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1069 "closed this process.");
1071 #ifdef XP_MACOSX
1072 if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount))) {
1073 MOZ_COLLECT_REPORT(
1074 "resident-phys-footprint", KIND_OTHER, UNITS_BYTES, amount,
1075 "Memory footprint reported by MacOS's task_info API's phys_footprint field. "
1076 "This matches the memory column in Activity Monitor.");
1078 #endif
1079 // clang-format on
1080 return NS_OK;
1083 NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1085 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1087 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1089 class SystemHeapReporter final : public nsIMemoryReporter {
1090 ~SystemHeapReporter() = default;
1092 public:
1093 NS_DECL_ISUPPORTS
1095 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1096 nsISupports* aData, bool aAnonymize) override {
1097 int64_t amount;
1098 if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1099 // clang-format off
1100 MOZ_COLLECT_REPORT(
1101 "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1102 "Memory used by the system allocator that is currently allocated to the "
1103 "application. This is distinct from the jemalloc heap that Firefox uses for "
1104 "most or all of its heap allocations. Ideally this number is zero, but "
1105 "on some platforms we cannot force every heap allocation through jemalloc.");
1106 // clang-format on
1108 return NS_OK;
1111 NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1112 #endif // HAVE_SYSTEM_HEAP_REPORTER
1114 #ifdef XP_UNIX
1116 # include <sys/resource.h>
1118 # define HAVE_RESIDENT_PEAK_REPORTER 1
1120 [[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
1121 struct rusage usage;
1122 if (0 == getrusage(RUSAGE_SELF, &usage)) {
1123 // The units for ru_maxrrs:
1124 // - Mac: bytes
1125 // - Solaris: pages? But some sources it actually always returns 0, so
1126 // check for that
1127 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1128 # ifdef XP_MACOSX
1129 *aN = usage.ru_maxrss;
1130 # elif defined(SOLARIS)
1131 *aN = usage.ru_maxrss * getpagesize();
1132 # else
1133 *aN = usage.ru_maxrss * 1024;
1134 # endif
1135 if (*aN > 0) {
1136 return NS_OK;
1139 return NS_ERROR_FAILURE;
1142 class ResidentPeakReporter final : public nsIMemoryReporter {
1143 ~ResidentPeakReporter() = default;
1145 public:
1146 NS_DECL_ISUPPORTS
1148 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1149 nsISupports* aData, bool aAnonymize) override {
1150 int64_t amount = 0;
1151 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1152 MOZ_COLLECT_REPORT(
1153 "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1154 "The peak 'resident' value for the lifetime of the process.");
1156 return NS_OK;
1159 NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1161 # define HAVE_PAGE_FAULT_REPORTERS 1
1163 class PageFaultsSoftReporter final : public nsIMemoryReporter {
1164 ~PageFaultsSoftReporter() = default;
1166 public:
1167 NS_DECL_ISUPPORTS
1169 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1170 nsISupports* aData, bool aAnonymize) override {
1171 struct rusage usage;
1172 int err = getrusage(RUSAGE_SELF, &usage);
1173 if (err == 0) {
1174 int64_t amount = usage.ru_minflt;
1175 // clang-format off
1176 MOZ_COLLECT_REPORT(
1177 "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1178 "The number of soft page faults (also known as 'minor page faults') that "
1179 "have occurred since the process started. A soft page fault occurs when the "
1180 "process tries to access a page which is present in physical memory but is "
1181 "not mapped into the process's address space. For instance, a process might "
1182 "observe soft page faults when it loads a shared library which is already "
1183 "present in physical memory. A process may experience many thousands of soft "
1184 "page faults even when the machine has plenty of available physical memory, "
1185 "and because the OS services a soft page fault without accessing the disk, "
1186 "they impact performance much less than hard page faults.");
1187 // clang-format on
1189 return NS_OK;
1192 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1194 [[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
1195 int64_t* aAmount) {
1196 struct rusage usage;
1197 int err = getrusage(RUSAGE_SELF, &usage);
1198 if (err != 0) {
1199 return NS_ERROR_FAILURE;
1201 *aAmount = usage.ru_majflt;
1202 return NS_OK;
1205 class PageFaultsHardReporter final : public nsIMemoryReporter {
1206 ~PageFaultsHardReporter() = default;
1208 public:
1209 NS_DECL_ISUPPORTS
1211 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1212 nsISupports* aData, bool aAnonymize) override {
1213 int64_t amount = 0;
1214 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1215 // clang-format off
1216 MOZ_COLLECT_REPORT(
1217 "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1218 "The number of hard page faults (also known as 'major page faults') that have "
1219 "occurred since the process started. A hard page fault occurs when a process "
1220 "tries to access a page which is not present in physical memory. The "
1221 "operating system must access the disk in order to fulfill a hard page fault. "
1222 "When memory is plentiful, you should see very few hard page faults. But if "
1223 "the process tries to use more memory than your machine has available, you "
1224 "may see many thousands of hard page faults. Because accessing the disk is up "
1225 "to a million times slower than accessing RAM, the program may run very "
1226 "slowly when it is experiencing more than 100 or so hard page faults a "
1227 "second.");
1228 // clang-format on
1230 return NS_OK;
1233 NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1235 #endif // XP_UNIX
1238 ** memory reporter implementation for jemalloc and OSX malloc,
1239 ** to obtain info on total memory in use (that we know about,
1240 ** at least -- on OSX, there are sometimes other zones in use).
1243 #ifdef HAVE_JEMALLOC_STATS
1245 static size_t HeapOverhead(jemalloc_stats_t* aStats) {
1246 return aStats->waste + aStats->bookkeeping + aStats->page_cache +
1247 aStats->bin_unused;
1250 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1251 // 100x for the percentage.
1252 static int64_t HeapOverheadFraction(jemalloc_stats_t* aStats) {
1253 size_t heapOverhead = HeapOverhead(aStats);
1254 size_t heapCommitted = aStats->allocated + heapOverhead;
1255 return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1258 class JemallocHeapReporter final : public nsIMemoryReporter {
1259 ~JemallocHeapReporter() = default;
1261 public:
1262 NS_DECL_ISUPPORTS
1264 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1265 nsISupports* aData, bool aAnonymize) override {
1266 jemalloc_stats_t stats;
1267 const size_t num_bins = jemalloc_stats_num_bins();
1268 nsTArray<jemalloc_bin_stats_t> bin_stats(num_bins);
1269 bin_stats.SetLength(num_bins);
1270 jemalloc_stats(&stats, bin_stats.Elements());
1272 // clang-format off
1273 MOZ_COLLECT_REPORT(
1274 "heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1275 "Memory mapped by the heap allocator that is currently allocated to the "
1276 "application. This may exceed the amount of memory requested by the "
1277 "application because the allocator regularly rounds up request sizes. (The "
1278 "exact amount requested is not recorded.)");
1280 MOZ_COLLECT_REPORT(
1281 "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1282 "The same as 'heap-committed/allocated'.");
1284 // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1285 // because KIND_HEAP memory means "counted in heap-allocated", which
1286 // this is not.
1287 for (auto& bin : bin_stats) {
1288 MOZ_ASSERT(bin.size);
1289 nsPrintfCString path("explicit/heap-overhead/bin-unused/bin-%zu",
1290 bin.size);
1291 aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
1292 bin.bytes_unused,
1293 nsLiteralCString(
1294 "Unused bytes in all runs of all bins for this size class"),
1295 aData);
1298 if (stats.waste > 0) {
1299 MOZ_COLLECT_REPORT(
1300 "explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
1301 stats.waste,
1302 "Committed bytes which do not correspond to an active allocation and which the "
1303 "allocator is not intentionally keeping alive (i.e., not "
1304 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1307 MOZ_COLLECT_REPORT(
1308 "explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1309 stats.bookkeeping,
1310 "Committed bytes which the heap allocator uses for internal data structures.");
1312 MOZ_COLLECT_REPORT(
1313 "explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
1314 stats.page_cache,
1315 "Memory which the allocator could return to the operating system, but hasn't. "
1316 "The allocator keeps this memory around as an optimization, so it doesn't "
1317 "have to ask the OS the next time it needs to fulfill a request. This value "
1318 "is typically not larger than a few megabytes.");
1320 MOZ_COLLECT_REPORT(
1321 "heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
1322 HeapOverhead(&stats),
1323 "The sum of 'explicit/heap-overhead/*'.");
1325 MOZ_COLLECT_REPORT(
1326 "heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
1327 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1328 "neither in physical memory nor paged to disk.");
1330 MOZ_COLLECT_REPORT(
1331 "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1332 "Size of chunks.");
1333 // clang-format on
1335 return NS_OK;
1338 NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1340 #endif // HAVE_JEMALLOC_STATS
1342 // Why is this here? At first glance, you'd think it could be defined and
1343 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1344 // However, the obvious time to register it is when the table is initialized,
1345 // and that happens before XPCOM components are initialized, which means the
1346 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1347 class AtomTablesReporter final : public nsIMemoryReporter {
1348 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1350 ~AtomTablesReporter() = default;
1352 public:
1353 NS_DECL_ISUPPORTS
1355 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1356 nsISupports* aData, bool aAnonymize) override {
1357 AtomsSizes sizes;
1358 NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1360 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
1361 sizes.mTable, "Memory used by the atom table.");
1363 MOZ_COLLECT_REPORT(
1364 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1365 sizes.mDynamicAtoms,
1366 "Memory used by dynamic atom objects and chars (which are stored "
1367 "at the end of each atom object).");
1369 return NS_OK;
1372 NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1374 class ThreadsReporter final : public nsIMemoryReporter {
1375 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1376 ~ThreadsReporter() = default;
1378 public:
1379 NS_DECL_ISUPPORTS
1381 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1382 nsISupports* aData, bool aAnonymize) override {
1383 #ifdef XP_LINUX
1384 nsTArray<MemoryMapping> mappings(1024);
1385 MOZ_TRY(GetMemoryMappings(mappings));
1386 #endif
1388 // Enumerating over active threads requires holding a lock, so we collect
1389 // info on all threads, and then call our reporter callbacks after releasing
1390 // the lock.
1391 struct ThreadData {
1392 nsCString mName;
1393 uint32_t mThreadId;
1394 size_t mPrivateSize;
1396 AutoTArray<ThreadData, 32> threads;
1398 size_t eventQueueSizes = 0;
1399 size_t wrapperSizes = 0;
1400 size_t threadCount = 0;
1402 for (auto* thread : nsThread::Enumerate()) {
1403 threadCount++;
1404 eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1405 wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1407 if (!thread->StackBase()) {
1408 continue;
1411 #if defined(XP_LINUX)
1412 int idx = mappings.BinaryIndexOf(thread->StackBase());
1413 if (idx < 0) {
1414 continue;
1416 // Referenced() is the combined size of all pages in the region which have
1417 // ever been touched, and are therefore consuming memory. For stack
1418 // regions, these pages are guaranteed to be un-shared unless we fork
1419 // after creating threads (which we don't).
1420 size_t privateSize = mappings[idx].Referenced();
1422 // On Linux, we have to be very careful matching memory regions to thread
1423 // stacks.
1425 // To begin with, the kernel only reports VM stats for regions of all
1426 // adjacent pages with the same flags, protection, and backing file.
1427 // There's no way to get finer-grained usage information for a subset of
1428 // those pages.
1430 // Stack segments always have a guard page at the bottom of the stack
1431 // (assuming we only support stacks that grow down), so there's no danger
1432 // of them being merged with other stack regions. At the top, there's no
1433 // protection page, and no way to allocate one without using pthreads
1434 // directly and allocating our own stacks. So we get around the problem by
1435 // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1436 // don't expect to be set on any heap regions. But this is not fool-proof.
1438 // A second kink is that different C libraries (and different versions
1439 // thereof) report stack base locations and sizes differently with regard
1440 // to the guard page. For the libraries that include the guard page in the
1441 // stack size base pointer, we need to adjust those values to compensate.
1442 // But it's possible that our logic will get out of sync with library
1443 // changes, or someone will compile with an unexpected library.
1446 // The upshot of all of this is that there may be configurations that our
1447 // special cases don't cover. And if there are, we want to know about it.
1448 // So assert that total size of the memory region we're reporting actually
1449 // matches the allocated size of the thread stack.
1450 # ifndef ANDROID
1451 MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1452 "Mapping region size doesn't match stack allocation size");
1453 # endif
1454 #elif defined(XP_WIN)
1455 auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1456 size_t privateSize = memInfo.Committed();
1457 #else
1458 size_t privateSize = thread->StackSize();
1459 MOZ_ASSERT_UNREACHABLE(
1460 "Shouldn't have stack base pointer on this "
1461 "platform");
1462 #endif
1464 threads.AppendElement(ThreadData{
1465 nsCString(PR_GetThreadName(thread->GetPRThread())),
1466 thread->ThreadId(),
1467 // On Linux, it's possible (but unlikely) that our stack region will
1468 // have been merged with adjacent heap regions, in which case we'll
1469 // get combined size information for both. So we take the minimum of
1470 // the reported private size and the requested stack size to avoid the
1471 // possible of majorly over-reporting in that case.
1472 std::min(privateSize, thread->StackSize()),
1476 for (auto& thread : threads) {
1477 nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1478 thread.mName.get(), thread.mThreadId);
1480 aHandleReport->Callback(
1481 ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
1482 nsLiteralCString("The sizes of thread stacks which have been "
1483 "committed to memory."),
1484 aData);
1487 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
1488 UNITS_BYTES, eventQueueSizes,
1489 "The sizes of nsThread event queues and observers.");
1491 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
1492 UNITS_BYTES, wrapperSizes,
1493 "The sizes of nsThread/PRThread wrappers.");
1495 #if defined(XP_WIN)
1496 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1497 // that's 12K. For 64 bit, it's 24K.
1499 // See
1500 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1501 constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1502 #elif defined(XP_LINUX)
1503 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1504 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1505 // have no way of knowing which ones do, so all we can do is guess.
1506 # if defined(__x86_64__) || defined(__i386__)
1507 constexpr size_t kKernelSize = 4 * 1024;
1508 # else
1509 constexpr size_t kKernelSize = 8 * 1024;
1510 # endif
1511 #elif defined(XP_MACOSX)
1512 // On Darwin, kernel stacks are 16K:
1514 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1515 constexpr size_t kKernelSize = 16 * 1024;
1516 #else
1517 // Elsewhere, just assume that kernel stacks require at least 8K.
1518 constexpr size_t kKernelSize = 8 * 1024;
1519 #endif
1521 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
1522 UNITS_BYTES, threadCount * kKernelSize,
1523 "The total kernel overhead for all active threads.");
1525 return NS_OK;
1528 NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1530 #ifdef DEBUG
1532 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1533 // However, this ends up breaking the linking step of various unit tests due
1534 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1535 // in DMD builds. So instead we do it here.
1536 class DeadlockDetectorReporter final : public nsIMemoryReporter {
1537 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1539 ~DeadlockDetectorReporter() = default;
1541 public:
1542 NS_DECL_ISUPPORTS
1544 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1545 nsISupports* aData, bool aAnonymize) override {
1546 MOZ_COLLECT_REPORT(
1547 "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1548 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1549 "Memory used by the deadlock detector.");
1551 return NS_OK;
1554 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1556 #endif
1558 #ifdef MOZ_DMD
1560 namespace mozilla {
1561 namespace dmd {
1563 class DMDReporter final : public nsIMemoryReporter {
1564 public:
1565 NS_DECL_ISUPPORTS
1567 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1568 nsISupports* aData, bool aAnonymize) override {
1569 dmd::Sizes sizes;
1570 dmd::SizeOf(&sizes);
1572 MOZ_COLLECT_REPORT(
1573 "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1574 sizes.mStackTracesUsed,
1575 "Memory used by stack traces which correspond to at least "
1576 "one heap block DMD is tracking.");
1578 MOZ_COLLECT_REPORT(
1579 "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1580 sizes.mStackTracesUnused,
1581 "Memory used by stack traces which don't correspond to any heap "
1582 "blocks DMD is currently tracking.");
1584 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
1585 UNITS_BYTES, sizes.mStackTraceTable,
1586 "Memory used by DMD's stack trace table.");
1588 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1589 sizes.mLiveBlockTable,
1590 "Memory used by DMD's live block table.");
1592 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1593 sizes.mDeadBlockTable,
1594 "Memory used by DMD's dead block list.");
1596 return NS_OK;
1599 private:
1600 ~DMDReporter() = default;
1602 NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1604 } // namespace dmd
1605 } // namespace mozilla
1607 #endif // MOZ_DMD
1610 ** nsMemoryReporterManager implementation
1613 NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
1614 nsIMemoryReporter)
1616 NS_IMETHODIMP
1617 nsMemoryReporterManager::Init() {
1618 if (!NS_IsMainThread()) {
1619 MOZ_CRASH();
1622 // Under normal circumstances this function is only called once. However,
1623 // we've (infrequently) seen memory report dumps in crash reports that
1624 // suggest that this function is sometimes called multiple times. That in
1625 // turn means that multiple reporters of each kind are registered, which
1626 // leads to duplicated reports of individual measurements such as "resident",
1627 // "vsize", etc.
1629 // It's unclear how these multiple calls can occur. The only plausible theory
1630 // so far is badly-written extensions, because this function is callable from
1631 // JS code via nsIMemoryReporter.idl.
1633 // Whatever the cause, it's a bad thing. So we protect against it with the
1634 // following check.
1635 static bool isInited = false;
1636 if (isInited) {
1637 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1638 return NS_OK;
1640 isInited = true;
1642 #ifdef HAVE_JEMALLOC_STATS
1643 RegisterStrongReporter(new JemallocHeapReporter());
1644 #endif
1646 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1647 RegisterStrongReporter(new VsizeReporter());
1648 RegisterStrongReporter(new ResidentReporter());
1649 #endif
1651 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1652 RegisterStrongReporter(new VsizeMaxContiguousReporter());
1653 #endif
1655 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1656 RegisterStrongReporter(new ResidentPeakReporter());
1657 #endif
1659 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1660 RegisterStrongReporter(new ResidentUniqueReporter());
1661 #endif
1663 #ifdef HAVE_PAGE_FAULT_REPORTERS
1664 RegisterStrongReporter(new PageFaultsSoftReporter());
1665 RegisterStrongReporter(new PageFaultsHardReporter());
1666 #endif
1668 #ifdef HAVE_PRIVATE_REPORTER
1669 RegisterStrongReporter(new PrivateReporter());
1670 #endif
1672 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1673 RegisterStrongReporter(new SystemHeapReporter());
1674 #endif
1676 RegisterStrongReporter(new AtomTablesReporter());
1678 RegisterStrongReporter(new ThreadsReporter());
1680 #ifdef DEBUG
1681 RegisterStrongReporter(new DeadlockDetectorReporter());
1682 #endif
1684 #ifdef MOZ_GECKO_PROFILER
1685 // We have to register this here rather than in profiler_init() because
1686 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1687 RegisterStrongReporter(new GeckoProfilerReporter());
1688 #endif
1690 #ifdef MOZ_DMD
1691 RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1692 #endif
1694 #ifdef XP_WIN
1695 RegisterStrongReporter(new WindowsAddressSpaceReporter());
1696 #endif
1698 #ifdef XP_UNIX
1699 nsMemoryInfoDumper::Initialize();
1700 #endif
1702 // Report our own memory usage as well.
1703 RegisterWeakReporter(this);
1705 return NS_OK;
1708 nsMemoryReporterManager::nsMemoryReporterManager()
1709 : mMutex("nsMemoryReporterManager::mMutex"),
1710 mIsRegistrationBlocked(false),
1711 mStrongReporters(new StrongReportersTable()),
1712 mWeakReporters(new WeakReportersTable()),
1713 mSavedStrongReporters(nullptr),
1714 mSavedWeakReporters(nullptr),
1715 mNextGeneration(1),
1716 mPendingProcessesState(nullptr),
1717 mPendingReportersState(nullptr)
1718 #ifdef HAVE_JEMALLOC_STATS
1720 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1721 #endif
1725 nsMemoryReporterManager::~nsMemoryReporterManager() {
1726 delete mStrongReporters;
1727 delete mWeakReporters;
1728 NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1729 NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1732 NS_IMETHODIMP
1733 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1734 nsISupports* aData, bool aAnonymize) {
1735 size_t n = MallocSizeOf(this);
1737 mozilla::MutexAutoLock autoLock(mMutex);
1738 n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1739 n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1742 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1743 n, "Memory used by the memory reporter infrastructure.");
1745 return NS_OK;
1748 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1749 # define MEMORY_REPORTING_LOG(format, ...) \
1750 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1751 #else
1752 # define MEMORY_REPORTING_LOG(...)
1753 #endif
1755 NS_IMETHODIMP
1756 nsMemoryReporterManager::GetReports(
1757 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1758 nsIFinishReportingCallback* aFinishReporting,
1759 nsISupports* aFinishReportingData, bool aAnonymize) {
1760 return GetReportsExtended(aHandleReport, aHandleReportData, aFinishReporting,
1761 aFinishReportingData, aAnonymize,
1762 /* minimize = */ false,
1763 /* DMDident = */ u""_ns);
1766 NS_IMETHODIMP
1767 nsMemoryReporterManager::GetReportsExtended(
1768 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1769 nsIFinishReportingCallback* aFinishReporting,
1770 nsISupports* aFinishReportingData, bool aAnonymize, bool aMinimize,
1771 const nsAString& aDMDDumpIdent) {
1772 nsresult rv;
1774 // Memory reporters are not necessarily threadsafe, so this function must
1775 // be called from the main thread.
1776 if (!NS_IsMainThread()) {
1777 MOZ_CRASH();
1780 uint32_t generation = mNextGeneration++;
1782 if (mPendingProcessesState) {
1783 // A request is in flight. Don't start another one. And don't report
1784 // an error; just ignore it, and let the in-flight request finish.
1785 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
1786 mPendingProcessesState->mGeneration);
1787 return NS_OK;
1790 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1792 uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1793 MOZ_ASSERT(concurrency >= 1);
1794 if (concurrency < 1) {
1795 concurrency = 1;
1797 mPendingProcessesState = new PendingProcessesState(
1798 generation, aAnonymize, aMinimize, concurrency, aHandleReport,
1799 aHandleReportData, aFinishReporting, aFinishReportingData, aDMDDumpIdent);
1801 if (aMinimize) {
1802 nsCOMPtr<nsIRunnable> callback =
1803 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1804 &nsMemoryReporterManager::StartGettingReports);
1805 rv = MinimizeMemoryUsage(callback);
1806 } else {
1807 rv = StartGettingReports();
1809 return rv;
1812 // MainThread only
1813 nsresult nsMemoryReporterManager::StartGettingReports() {
1814 PendingProcessesState* s = mPendingProcessesState;
1815 nsresult rv;
1817 // Get reports for this process.
1818 FILE* parentDMDFile = nullptr;
1819 #ifdef MOZ_DMD
1820 if (!s->mDMDDumpIdent.IsEmpty()) {
1821 rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1822 &parentDMDFile);
1823 if (NS_WARN_IF(NS_FAILED(rv))) {
1824 // Proceed with the memory report as if DMD were disabled.
1825 parentDMDFile = nullptr;
1828 #endif
1830 // This is async.
1831 GetReportsForThisProcessExtended(
1832 s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
1833 s->mFinishReporting, s->mFinishReportingData);
1835 nsTArray<dom::ContentParent*> childWeakRefs;
1836 dom::ContentParent::GetAll(childWeakRefs);
1837 if (!childWeakRefs.IsEmpty()) {
1838 // Request memory reports from child processes. This happens
1839 // after the parent report so that the parent's main thread will
1840 // be free to process the child reports, instead of causing them
1841 // to be buffered and consume (possibly scarce) memory.
1843 for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1844 s->mChildrenPending.AppendElement(childWeakRefs[i]);
1848 if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1849 if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1850 s->mChildrenPending.AppendElement(proc.forget());
1854 if (RDDProcessManager* rdd = RDDProcessManager::Get()) {
1855 if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
1856 s->mChildrenPending.AppendElement(proc.forget());
1860 if (gfx::VRProcessManager* vr = gfx::VRProcessManager::Get()) {
1861 if (RefPtr<MemoryReportingProcess> proc = vr->GetProcessMemoryReporter()) {
1862 s->mChildrenPending.AppendElement(proc.forget());
1866 if (!IsRegistrationBlocked() && net::gIOService) {
1867 if (RefPtr<MemoryReportingProcess> proc =
1868 net::gIOService->GetSocketProcessMemoryReporter()) {
1869 s->mChildrenPending.AppendElement(proc.forget());
1873 if (!IsRegistrationBlocked()) {
1874 if (RefPtr<UtilityProcessManager> utility =
1875 UtilityProcessManager::GetIfExists()) {
1876 for (RefPtr<UtilityProcessParent>& parent :
1877 utility->GetAllProcessesProcessParent()) {
1878 if (RefPtr<MemoryReportingProcess> proc =
1879 utility->GetProcessMemoryReporter(parent)) {
1880 s->mChildrenPending.AppendElement(proc.forget());
1886 if (!s->mChildrenPending.IsEmpty()) {
1887 nsCOMPtr<nsITimer> timer;
1888 rv = NS_NewTimerWithFuncCallback(
1889 getter_AddRefs(timer), TimeoutCallback, this, kTimeoutLengthMS,
1890 nsITimer::TYPE_ONE_SHOT,
1891 "nsMemoryReporterManager::StartGettingReports");
1892 if (NS_WARN_IF(NS_FAILED(rv))) {
1893 FinishReporting();
1894 return rv;
1897 MOZ_ASSERT(!s->mTimer);
1898 s->mTimer.swap(timer);
1901 return NS_OK;
1904 void nsMemoryReporterManager::DispatchReporter(
1905 nsIMemoryReporter* aReporter, bool aIsAsync,
1906 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1907 bool aAnonymize) {
1908 MOZ_ASSERT(mPendingReportersState);
1910 // Grab refs to everything used in the lambda function.
1911 RefPtr<nsMemoryReporterManager> self = this;
1912 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
1913 nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
1914 nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
1916 nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
1917 "nsMemoryReporterManager::DispatchReporter",
1918 [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
1919 reporter->CollectReports(handleReport, handleReportData, aAnonymize);
1920 if (!aIsAsync) {
1921 self->EndReport();
1925 NS_DispatchToMainThread(event);
1926 mPendingReportersState->mReportsPending++;
1929 NS_IMETHODIMP
1930 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1931 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1932 bool aAnonymize, FILE* aDMDFile,
1933 nsIFinishReportingCallback* aFinishReporting,
1934 nsISupports* aFinishReportingData) {
1935 // Memory reporters are not necessarily threadsafe, so this function must
1936 // be called from the main thread.
1937 if (!NS_IsMainThread()) {
1938 MOZ_CRASH();
1941 if (NS_WARN_IF(mPendingReportersState)) {
1942 // Report is already in progress.
1943 return NS_ERROR_IN_PROGRESS;
1946 #ifdef MOZ_DMD
1947 if (aDMDFile) {
1948 // Clear DMD's reportedness state before running the memory
1949 // reporters, to avoid spurious twice-reported warnings.
1950 dmd::ClearReports();
1952 #else
1953 MOZ_ASSERT(!aDMDFile);
1954 #endif
1956 mPendingReportersState = new PendingReportersState(
1957 aFinishReporting, aFinishReportingData, aDMDFile);
1960 mozilla::MutexAutoLock autoLock(mMutex);
1962 for (const auto& entry : *mStrongReporters) {
1963 DispatchReporter(entry.GetKey(), entry.GetData(), aHandleReport,
1964 aHandleReportData, aAnonymize);
1967 for (const auto& entry : *mWeakReporters) {
1968 nsCOMPtr<nsIMemoryReporter> reporter = entry.GetKey();
1969 DispatchReporter(reporter, entry.GetData(), aHandleReport,
1970 aHandleReportData, aAnonymize);
1974 return NS_OK;
1977 // MainThread only
1978 NS_IMETHODIMP
1979 nsMemoryReporterManager::EndReport() {
1980 if (--mPendingReportersState->mReportsPending == 0) {
1981 #ifdef MOZ_DMD
1982 if (mPendingReportersState->mDMDFile) {
1983 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
1985 #endif
1986 if (mPendingProcessesState) {
1987 // This is the parent process.
1988 EndProcessReport(mPendingProcessesState->mGeneration, true);
1989 } else {
1990 mPendingReportersState->mFinishReporting->Callback(
1991 mPendingReportersState->mFinishReportingData);
1994 delete mPendingReportersState;
1995 mPendingReportersState = nullptr;
1998 return NS_OK;
2001 nsMemoryReporterManager::PendingProcessesState*
2002 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration) {
2003 // Memory reporting only happens on the main thread.
2004 MOZ_RELEASE_ASSERT(NS_IsMainThread());
2006 PendingProcessesState* s = mPendingProcessesState;
2008 if (!s) {
2009 // If we reach here, then:
2011 // - A child process reported back too late, and no subsequent request
2012 // is in flight.
2014 // So there's nothing to be done. Just ignore it.
2015 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
2016 aGeneration);
2017 return nullptr;
2020 if (aGeneration != s->mGeneration) {
2021 // If we reach here, a child process must have reported back, too late,
2022 // while a subsequent (higher-numbered) request is in flight. Again,
2023 // ignore it.
2024 MOZ_ASSERT(aGeneration < s->mGeneration);
2025 MEMORY_REPORTING_LOG(
2026 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration,
2027 s->mGeneration);
2028 return nullptr;
2031 return s;
2034 // This function has no return value. If something goes wrong, there's no
2035 // clear place to report the problem to, but that's ok -- we will end up
2036 // hitting the timeout and executing TimeoutCallback().
2037 void nsMemoryReporterManager::HandleChildReport(
2038 uint32_t aGeneration, const dom::MemoryReport& aChildReport) {
2039 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2040 if (!s) {
2041 return;
2044 // Child reports should have a non-empty process.
2045 MOZ_ASSERT(!aChildReport.process().IsEmpty());
2047 // If the call fails, ignore and continue.
2048 s->mHandleReport->Callback(aChildReport.process(), aChildReport.path(),
2049 aChildReport.kind(), aChildReport.units(),
2050 aChildReport.amount(), aChildReport.desc(),
2051 s->mHandleReportData);
2054 /* static */
2055 bool nsMemoryReporterManager::StartChildReport(
2056 mozilla::MemoryReportingProcess* aChild,
2057 const PendingProcessesState* aState) {
2058 if (!aChild->IsAlive()) {
2059 MEMORY_REPORTING_LOG(
2060 "StartChildReports (gen=%u): child exited before"
2061 " its report was started\n",
2062 aState->mGeneration);
2063 return false;
2066 Maybe<mozilla::ipc::FileDescriptor> dmdFileDesc;
2067 #ifdef MOZ_DMD
2068 if (!aState->mDMDDumpIdent.IsEmpty()) {
2069 FILE* dmdFile = nullptr;
2070 nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
2071 aChild->Pid(), &dmdFile);
2072 if (NS_WARN_IF(NS_FAILED(rv))) {
2073 // Proceed with the memory report as if DMD were disabled.
2074 dmdFile = nullptr;
2076 if (dmdFile) {
2077 dmdFileDesc = Some(mozilla::ipc::FILEToFileDescriptor(dmdFile));
2078 fclose(dmdFile);
2081 #endif
2082 return aChild->SendRequestMemoryReport(
2083 aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2086 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration,
2087 bool aSuccess) {
2088 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2089 if (!s) {
2090 return;
2093 MOZ_ASSERT(s->mNumProcessesRunning > 0);
2094 s->mNumProcessesRunning--;
2095 s->mNumProcessesCompleted++;
2096 MEMORY_REPORTING_LOG(
2097 "HandleChildReports (aGen=%u): process %u %s"
2098 " (%u running, %u pending)\n",
2099 aGeneration, s->mNumProcessesCompleted,
2100 aSuccess ? "completed" : "exited during report", s->mNumProcessesRunning,
2101 static_cast<unsigned>(s->mChildrenPending.Length()));
2103 // Start pending children up to the concurrency limit.
2104 while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2105 !s->mChildrenPending.IsEmpty()) {
2106 // Pop last element from s->mChildrenPending
2107 const RefPtr<MemoryReportingProcess> nextChild =
2108 s->mChildrenPending.PopLastElement();
2109 // Start report (if the child is still alive).
2110 if (StartChildReport(nextChild, s)) {
2111 ++s->mNumProcessesRunning;
2112 MEMORY_REPORTING_LOG(
2113 "HandleChildReports (aGen=%u): started child report"
2114 " (%u running, %u pending)\n",
2115 aGeneration, s->mNumProcessesRunning,
2116 static_cast<unsigned>(s->mChildrenPending.Length()));
2120 // If all the child processes (if any) have reported, we can cancel
2121 // the timer (if started) and finish up. Otherwise, just return.
2122 if (s->mNumProcessesRunning == 0) {
2123 MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2124 if (s->mTimer) {
2125 s->mTimer->Cancel();
2127 FinishReporting();
2131 /* static */
2132 void nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData) {
2133 nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2134 PendingProcessesState* s = mgr->mPendingProcessesState;
2136 // Release assert because: if the pointer is null we're about to
2137 // crash regardless of DEBUG, and this way the compiler doesn't
2138 // complain about unused variables.
2139 MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2140 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2141 s->mGeneration, s->mNumProcessesRunning,
2142 static_cast<unsigned>(s->mChildrenPending.Length()));
2144 // We don't bother sending any kind of cancellation message to the child
2145 // processes that haven't reported back.
2146 mgr->FinishReporting();
2149 nsresult nsMemoryReporterManager::FinishReporting() {
2150 // Memory reporting only happens on the main thread.
2151 if (!NS_IsMainThread()) {
2152 MOZ_CRASH();
2155 MOZ_ASSERT(mPendingProcessesState);
2156 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2157 mPendingProcessesState->mGeneration,
2158 mPendingProcessesState->mNumProcessesCompleted);
2160 // Call this before deleting |mPendingProcessesState|. That way, if
2161 // |mFinishReportData| calls GetReports(), it will silently abort, as
2162 // required.
2163 nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2164 mPendingProcessesState->mFinishReportingData);
2166 delete mPendingProcessesState;
2167 mPendingProcessesState = nullptr;
2168 return rv;
2171 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2172 uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2173 uint32_t aConcurrencyLimit, nsIHandleReportCallback* aHandleReport,
2174 nsISupports* aHandleReportData,
2175 nsIFinishReportingCallback* aFinishReporting,
2176 nsISupports* aFinishReportingData, const nsAString& aDMDDumpIdent)
2177 : mGeneration(aGeneration),
2178 mAnonymize(aAnonymize),
2179 mMinimize(aMinimize),
2180 mChildrenPending(),
2181 mNumProcessesRunning(1), // reporting starts with the parent
2182 mNumProcessesCompleted(0),
2183 mConcurrencyLimit(aConcurrencyLimit),
2184 mHandleReport(aHandleReport),
2185 mHandleReportData(aHandleReportData),
2186 mFinishReporting(aFinishReporting),
2187 mFinishReportingData(aFinishReportingData),
2188 mDMDDumpIdent(aDMDDumpIdent) {}
2190 static void CrashIfRefcountIsZero(nsISupports* aObj) {
2191 // This will probably crash if the object's refcount is 0.
2192 uint32_t refcnt = NS_ADDREF(aObj);
2193 if (refcnt <= 1) {
2194 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2196 NS_RELEASE(aObj);
2199 nsresult nsMemoryReporterManager::RegisterReporterHelper(
2200 nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync) {
2201 // This method is thread-safe.
2202 mozilla::MutexAutoLock autoLock(mMutex);
2204 if (mIsRegistrationBlocked && !aForce) {
2205 return NS_ERROR_FAILURE;
2208 if (mStrongReporters->Contains(aReporter) ||
2209 mWeakReporters->Contains(aReporter)) {
2210 return NS_ERROR_FAILURE;
2213 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2214 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2215 // addref'ed and released |aReporter| before finally addref'ing it for
2216 // good, it would free aReporter! The kung fu death grip could itself be
2217 // problematic if PutEntry didn't addref |aReporter| (because then when the
2218 // death grip goes out of scope, we would delete the reporter). In debug
2219 // mode, we check that this doesn't happen.
2221 // If |aStrong| is false, we require that |aReporter| have a non-zero
2222 // refcnt.
2224 if (aStrong) {
2225 nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2226 mStrongReporters->InsertOrUpdate(aReporter, aIsAsync);
2227 CrashIfRefcountIsZero(aReporter);
2228 } else {
2229 CrashIfRefcountIsZero(aReporter);
2230 nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2231 if (jsComponent) {
2232 // We cannot allow non-native reporters (WrappedJS), since we'll be
2233 // holding onto a raw pointer, which would point to the wrapper,
2234 // and that wrapper is likely to go away as soon as this register
2235 // call finishes. This would then lead to subsequent crashes in
2236 // CollectReports().
2237 return NS_ERROR_XPC_BAD_CONVERT_JS;
2239 mWeakReporters->InsertOrUpdate(aReporter, aIsAsync);
2242 return NS_OK;
2245 NS_IMETHODIMP
2246 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter) {
2247 return RegisterReporterHelper(aReporter, /* force = */ false,
2248 /* strong = */ true,
2249 /* async = */ false);
2252 NS_IMETHODIMP
2253 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2254 nsIMemoryReporter* aReporter) {
2255 return RegisterReporterHelper(aReporter, /* force = */ false,
2256 /* strong = */ true,
2257 /* async = */ true);
2260 NS_IMETHODIMP
2261 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter) {
2262 return RegisterReporterHelper(aReporter, /* force = */ false,
2263 /* strong = */ false,
2264 /* async = */ false);
2267 NS_IMETHODIMP
2268 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2269 nsIMemoryReporter* aReporter) {
2270 return RegisterReporterHelper(aReporter, /* force = */ false,
2271 /* strong = */ false,
2272 /* async = */ true);
2275 NS_IMETHODIMP
2276 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2277 nsIMemoryReporter* aReporter) {
2278 return RegisterReporterHelper(aReporter, /* force = */ true,
2279 /* strong = */ true,
2280 /* async = */ false);
2283 NS_IMETHODIMP
2284 nsMemoryReporterManager::UnregisterStrongReporter(
2285 nsIMemoryReporter* aReporter) {
2286 // This method is thread-safe.
2287 mozilla::MutexAutoLock autoLock(mMutex);
2289 MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2291 if (mStrongReporters->Contains(aReporter)) {
2292 mStrongReporters->Remove(aReporter);
2293 return NS_OK;
2296 // We don't register new reporters when the block is in place, but we do
2297 // unregister existing reporters. This is so we don't keep holding strong
2298 // references that these reporters aren't expecting (which can keep them
2299 // alive longer than intended).
2300 if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2301 mSavedStrongReporters->Remove(aReporter);
2302 return NS_OK;
2305 return NS_ERROR_FAILURE;
2308 NS_IMETHODIMP
2309 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter) {
2310 // This method is thread-safe.
2311 mozilla::MutexAutoLock autoLock(mMutex);
2313 MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2315 if (mWeakReporters->Contains(aReporter)) {
2316 mWeakReporters->Remove(aReporter);
2317 return NS_OK;
2320 // We don't register new reporters when the block is in place, but we do
2321 // unregister existing reporters. This is so we don't keep holding weak
2322 // references that the old reporters aren't expecting (which can end up as
2323 // dangling pointers that lead to use-after-frees).
2324 if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2325 mSavedWeakReporters->Remove(aReporter);
2326 return NS_OK;
2329 return NS_ERROR_FAILURE;
2332 NS_IMETHODIMP
2333 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2334 // This method is thread-safe.
2335 mozilla::MutexAutoLock autoLock(mMutex);
2336 if (mIsRegistrationBlocked) {
2337 return NS_ERROR_FAILURE;
2339 mIsRegistrationBlocked = true;
2341 // Hide the existing reporters, saving them for later restoration.
2342 MOZ_ASSERT(!mSavedStrongReporters);
2343 MOZ_ASSERT(!mSavedWeakReporters);
2344 mSavedStrongReporters = mStrongReporters;
2345 mSavedWeakReporters = mWeakReporters;
2346 mStrongReporters = new StrongReportersTable();
2347 mWeakReporters = new WeakReportersTable();
2349 return NS_OK;
2352 NS_IMETHODIMP
2353 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2354 // This method is thread-safe.
2355 mozilla::MutexAutoLock autoLock(mMutex);
2356 if (!mIsRegistrationBlocked) {
2357 return NS_ERROR_FAILURE;
2360 // Banish the current reporters, and restore the hidden ones.
2361 delete mStrongReporters;
2362 delete mWeakReporters;
2363 mStrongReporters = mSavedStrongReporters;
2364 mWeakReporters = mSavedWeakReporters;
2365 mSavedStrongReporters = nullptr;
2366 mSavedWeakReporters = nullptr;
2368 mIsRegistrationBlocked = false;
2369 return NS_OK;
2372 NS_IMETHODIMP
2373 nsMemoryReporterManager::GetVsize(int64_t* aVsize) {
2374 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2375 return VsizeDistinguishedAmount(aVsize);
2376 #else
2377 *aVsize = 0;
2378 return NS_ERROR_NOT_AVAILABLE;
2379 #endif
2382 NS_IMETHODIMP
2383 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount) {
2384 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2385 return VsizeMaxContiguousDistinguishedAmount(aAmount);
2386 #else
2387 *aAmount = 0;
2388 return NS_ERROR_NOT_AVAILABLE;
2389 #endif
2392 NS_IMETHODIMP
2393 nsMemoryReporterManager::GetResident(int64_t* aAmount) {
2394 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2395 return ResidentDistinguishedAmount(aAmount);
2396 #else
2397 *aAmount = 0;
2398 return NS_ERROR_NOT_AVAILABLE;
2399 #endif
2402 NS_IMETHODIMP
2403 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount) {
2404 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2405 return ResidentFastDistinguishedAmount(aAmount);
2406 #else
2407 *aAmount = 0;
2408 return NS_ERROR_NOT_AVAILABLE;
2409 #endif
2412 /*static*/
2413 int64_t nsMemoryReporterManager::ResidentFast() {
2414 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2415 int64_t amount;
2416 nsresult rv = ResidentFastDistinguishedAmount(&amount);
2417 NS_ENSURE_SUCCESS(rv, 0);
2418 return amount;
2419 #else
2420 return 0;
2421 #endif
2424 NS_IMETHODIMP
2425 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount) {
2426 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2427 return ResidentPeakDistinguishedAmount(aAmount);
2428 #else
2429 *aAmount = 0;
2430 return NS_ERROR_NOT_AVAILABLE;
2431 #endif
2434 /*static*/
2435 int64_t nsMemoryReporterManager::ResidentPeak() {
2436 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2437 int64_t amount = 0;
2438 nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2439 NS_ENSURE_SUCCESS(rv, 0);
2440 return amount;
2441 #else
2442 return 0;
2443 #endif
2446 NS_IMETHODIMP
2447 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount) {
2448 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2449 return ResidentUniqueDistinguishedAmount(aAmount);
2450 #else
2451 *aAmount = 0;
2452 return NS_ERROR_NOT_AVAILABLE;
2453 #endif
2456 #ifdef XP_MACOSX
2457 /*static*/
2458 int64_t nsMemoryReporterManager::PhysicalFootprint(mach_port_t aPort) {
2459 int64_t amount = 0;
2460 nsresult rv = PhysicalFootprintAmount(&amount, aPort);
2461 NS_ENSURE_SUCCESS(rv, 0);
2462 return amount;
2464 #endif
2466 typedef
2467 #ifdef XP_WIN
2468 HANDLE
2469 #elif XP_MACOSX
2470 mach_port_t
2471 #elif XP_LINUX
2472 pid_t
2473 #else
2474 int /*dummy type */
2475 #endif
2476 ResidentUniqueArg;
2478 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2480 /*static*/
2481 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess) {
2482 int64_t amount = 0;
2483 nsresult rv = ResidentUniqueDistinguishedAmount(&amount, aProcess);
2484 NS_ENSURE_SUCCESS(rv, 0);
2485 return amount;
2488 #else
2490 /*static*/
2491 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg) {
2492 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2493 int64_t amount = 0;
2494 nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2495 NS_ENSURE_SUCCESS(rv, 0);
2496 return amount;
2497 # else
2498 return 0;
2499 # endif
2502 #endif // XP_{WIN, MACOSX, LINUX, *}
2504 NS_IMETHODIMP
2505 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount) {
2506 #ifdef HAVE_JEMALLOC_STATS
2507 jemalloc_stats_t stats;
2508 jemalloc_stats(&stats);
2509 *aAmount = stats.allocated;
2510 return NS_OK;
2511 #else
2512 *aAmount = 0;
2513 return NS_ERROR_NOT_AVAILABLE;
2514 #endif
2517 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2518 NS_IMETHODIMP
2519 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount) {
2520 #ifdef HAVE_JEMALLOC_STATS
2521 jemalloc_stats_t stats;
2522 jemalloc_stats(&stats);
2523 *aAmount = HeapOverheadFraction(&stats);
2524 return NS_OK;
2525 #else
2526 *aAmount = 0;
2527 return NS_ERROR_NOT_AVAILABLE;
2528 #endif
2531 [[nodiscard]] static nsresult GetInfallibleAmount(InfallibleAmountFn aAmountFn,
2532 int64_t* aAmount) {
2533 if (aAmountFn) {
2534 *aAmount = aAmountFn();
2535 return NS_OK;
2537 *aAmount = 0;
2538 return NS_ERROR_NOT_AVAILABLE;
2541 NS_IMETHODIMP
2542 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount) {
2543 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2546 NS_IMETHODIMP
2547 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount) {
2548 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2551 NS_IMETHODIMP
2552 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount) {
2553 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsSystem,
2554 aAmount);
2557 NS_IMETHODIMP
2558 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount) {
2559 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsUser,
2560 aAmount);
2563 NS_IMETHODIMP
2564 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount) {
2565 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem, aAmount);
2568 NS_IMETHODIMP
2569 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount) {
2570 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser, aAmount);
2573 NS_IMETHODIMP
2574 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount) {
2575 return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2576 aAmount);
2579 NS_IMETHODIMP
2580 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount) {
2581 return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2584 NS_IMETHODIMP
2585 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount) {
2586 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2589 NS_IMETHODIMP
2590 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount) {
2591 return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2594 NS_IMETHODIMP
2595 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount) {
2596 #ifdef HAVE_PAGE_FAULT_REPORTERS
2597 return PageFaultsHardDistinguishedAmount(aAmount);
2598 #else
2599 *aAmount = 0;
2600 return NS_ERROR_NOT_AVAILABLE;
2601 #endif
2604 NS_IMETHODIMP
2605 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas) {
2606 void* p = malloc(16);
2607 if (!p) {
2608 return NS_ERROR_OUT_OF_MEMORY;
2610 size_t usable = moz_malloc_usable_size(p);
2611 free(p);
2612 *aHas = !!(usable > 0);
2613 return NS_OK;
2616 NS_IMETHODIMP
2617 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled) {
2618 #ifdef MOZ_DMD
2619 *aIsEnabled = true;
2620 #else
2621 *aIsEnabled = false;
2622 #endif
2623 return NS_OK;
2626 NS_IMETHODIMP
2627 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning) {
2628 #ifdef MOZ_DMD
2629 *aIsRunning = dmd::IsRunning();
2630 #else
2631 *aIsRunning = false;
2632 #endif
2633 return NS_OK;
2636 namespace {
2639 * This runnable lets us implement
2640 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2641 * notification, spin the event loop, and repeat this process a few times.
2643 * When this sequence finishes, we invoke the callback function passed to the
2644 * runnable's constructor.
2646 class MinimizeMemoryUsageRunnable : public Runnable {
2647 public:
2648 explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2649 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2650 mCallback(aCallback),
2651 mRemainingIters(sNumIters) {}
2653 NS_IMETHOD Run() override {
2654 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2655 if (!os) {
2656 return NS_ERROR_FAILURE;
2659 if (mRemainingIters == 0) {
2660 os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2661 u"MinimizeMemoryUsageRunnable");
2662 if (mCallback) {
2663 mCallback->Run();
2665 return NS_OK;
2668 os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2669 mRemainingIters--;
2670 NS_DispatchToMainThread(this);
2672 return NS_OK;
2675 private:
2676 // Send sNumIters heap-minimize notifications, spinning the event
2677 // loop after each notification (see bug 610166 comment 12 for an
2678 // explanation), because one notification doesn't cut it.
2679 static const uint32_t sNumIters = 3;
2681 nsCOMPtr<nsIRunnable> mCallback;
2682 uint32_t mRemainingIters;
2685 } // namespace
2687 NS_IMETHODIMP
2688 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback) {
2689 RefPtr<MinimizeMemoryUsageRunnable> runnable =
2690 new MinimizeMemoryUsageRunnable(aCallback);
2692 return NS_DispatchToMainThread(runnable);
2695 NS_IMETHODIMP
2696 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2697 int64_t* aJSObjectsSize,
2698 int64_t* aJSStringsSize,
2699 int64_t* aJSOtherSize, int64_t* aDomSize,
2700 int64_t* aStyleSize, int64_t* aOtherSize,
2701 int64_t* aTotalSize, double* aJSMilliseconds,
2702 double* aNonJSMilliseconds) {
2703 nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2704 auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2705 if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2706 return NS_ERROR_FAILURE;
2709 TimeStamp t1 = TimeStamp::Now();
2711 // Measure JS memory consumption (and possibly some non-JS consumption, via
2712 // |jsPrivateSize|).
2713 size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2714 nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(), &jsObjectsSize,
2715 &jsStringsSize, &jsPrivateSize, &jsOtherSize);
2716 if (NS_WARN_IF(NS_FAILED(rv))) {
2717 return rv;
2720 TimeStamp t2 = TimeStamp::Now();
2722 // Measure non-JS memory consumption.
2723 size_t domSize, styleSize, otherSize;
2724 rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2725 if (NS_WARN_IF(NS_FAILED(rv))) {
2726 return rv;
2729 TimeStamp t3 = TimeStamp::Now();
2731 *aTotalSize = 0;
2732 #define DO(aN, n) \
2734 *aN = (n); \
2735 *aTotalSize += (n); \
2737 DO(aJSObjectsSize, jsObjectsSize);
2738 DO(aJSStringsSize, jsStringsSize);
2739 DO(aJSOtherSize, jsOtherSize);
2740 DO(aDomSize, jsPrivateSize + domSize);
2741 DO(aStyleSize, styleSize);
2742 DO(aOtherSize, otherSize);
2743 #undef DO
2745 *aJSMilliseconds = (t2 - t1).ToMilliseconds();
2746 *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2748 return NS_OK;
2751 namespace mozilla {
2753 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2754 RefPtr<nsMemoryReporterManager> mgr = \
2755 nsMemoryReporterManager::GetOrCreate(); \
2756 if (!mgr) { \
2757 return NS_ERROR_FAILURE; \
2760 nsresult RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2761 // Hold a strong reference to the argument to make sure it gets released if
2762 // we return early below.
2763 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2764 GET_MEMORY_REPORTER_MANAGER(mgr)
2765 return mgr->RegisterStrongReporter(reporter);
2768 nsresult RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2769 // Hold a strong reference to the argument to make sure it gets released if
2770 // we return early below.
2771 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2772 GET_MEMORY_REPORTER_MANAGER(mgr)
2773 return mgr->RegisterStrongAsyncReporter(reporter);
2776 nsresult RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2777 GET_MEMORY_REPORTER_MANAGER(mgr)
2778 return mgr->RegisterWeakReporter(aReporter);
2781 nsresult RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2782 GET_MEMORY_REPORTER_MANAGER(mgr)
2783 return mgr->RegisterWeakAsyncReporter(aReporter);
2786 nsresult UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2787 GET_MEMORY_REPORTER_MANAGER(mgr)
2788 return mgr->UnregisterStrongReporter(aReporter);
2791 nsresult UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2792 GET_MEMORY_REPORTER_MANAGER(mgr)
2793 return mgr->UnregisterWeakReporter(aReporter);
2796 // Macro for generating functions that register distinguished amount functions
2797 // with the memory reporter manager.
2798 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2799 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2800 GET_MEMORY_REPORTER_MANAGER(mgr) \
2801 mgr->mAmountFns.m##name = aAmountFn; \
2802 return NS_OK; \
2805 // Macro for generating functions that unregister distinguished amount
2806 // functions with the memory reporter manager.
2807 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2808 nsresult Unregister##name##DistinguishedAmount() { \
2809 GET_MEMORY_REPORTER_MANAGER(mgr) \
2810 mgr->mAmountFns.m##name = nullptr; \
2811 return NS_OK; \
2814 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2815 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2816 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible,
2817 JSMainRuntimeCompartmentsSystem)
2818 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeCompartmentsUser)
2819 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2820 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2822 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2823 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2825 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2826 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2828 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2830 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2832 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2833 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2835 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2836 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2837 GET_MEMORY_REPORTER_MANAGER(mgr) \
2838 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2839 return NS_OK; \
2842 DEFINE_REGISTER_SIZE_OF_TAB(JS);
2843 DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2845 #undef DEFINE_REGISTER_SIZE_OF_TAB
2847 #undef GET_MEMORY_REPORTER_MANAGER
2849 } // namespace mozilla