Bug 1686838 [wpt PR 27194] - [webcodecs] Deprecate VideoFrame.destroy()., a=testonly
[gecko.git] / xpcom / base / nsMemoryReporterManager.cpp
blobc8322db99b2bb2eb732369eeadd8851d0df9d048
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
10 #include "nsCOMPtr.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
15 #include "nsITimer.h"
16 #include "nsThreadUtils.h"
17 #include "nsPIDOMWindow.h"
18 #include "nsIObserverService.h"
19 #include "nsIOService.h"
20 #include "nsIGlobalObject.h"
21 #include "nsIXPConnect.h"
22 #ifdef MOZ_GECKO_PROFILER
23 # include "GeckoProfilerReporter.h"
24 #endif
25 #if defined(XP_UNIX) || defined(MOZ_DMD)
26 # include "nsMemoryInfoDumper.h"
27 #endif
28 #include "nsNetCID.h"
29 #include "nsThread.h"
30 #include "VRProcessManager.h"
31 #include "mozilla/Attributes.h"
32 #include "mozilla/MemoryReportingProcess.h"
33 #include "mozilla/PodOperations.h"
34 #include "mozilla/Preferences.h"
35 #include "mozilla/RDDProcessManager.h"
36 #include "mozilla/ResultExtensions.h"
37 #include "mozilla/Services.h"
38 #include "mozilla/Telemetry.h"
39 #include "mozilla/UniquePtrExtensions.h"
40 #include "mozilla/dom/MemoryReportTypes.h"
41 #include "mozilla/dom/ContentParent.h"
42 #include "mozilla/gfx/GPUProcessManager.h"
43 #include "mozilla/ipc/FileDescriptorUtils.h"
45 #ifdef XP_WIN
46 # include "mozilla/MemoryInfo.h"
48 # include <process.h>
49 # ifndef getpid
50 # define getpid _getpid
51 # endif
52 #else
53 # include <unistd.h>
54 #endif
56 using namespace mozilla;
57 using namespace dom;
59 #if defined(MOZ_MEMORY)
60 # define HAVE_JEMALLOC_STATS 1
61 # include "mozmemory.h"
62 #endif // MOZ_MEMORY
64 #if defined(XP_LINUX)
66 # include "mozilla/MemoryMapping.h"
68 # include <malloc.h>
69 # include <string.h>
70 # include <stdlib.h>
72 [[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
73 // There are more than two fields, but we're only interested in the first
74 // two.
75 static const int MAX_FIELD = 2;
76 size_t fields[MAX_FIELD];
77 MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
78 FILE* f = fopen("/proc/self/statm", "r");
79 if (f) {
80 int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
81 fclose(f);
82 if (nread == MAX_FIELD) {
83 *aN = fields[aField] * getpagesize();
84 return NS_OK;
87 return NS_ERROR_FAILURE;
90 [[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
91 // You might be tempted to calculate USS by subtracting the "shared" value
92 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
93 // statm's "shared" value actually counts pages backed by files, which has
94 // little to do with whether the pages are actually shared. /proc/self/smaps
95 // on the other hand appears to give us the correct information.
97 nsTArray<MemoryMapping> mappings(1024);
98 MOZ_TRY(GetMemoryMappings(mappings, aPid));
100 int64_t amount = 0;
101 for (auto& mapping : mappings) {
102 amount += mapping.Private_Clean();
103 amount += mapping.Private_Dirty();
105 *aN = amount;
106 return NS_OK;
109 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
110 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
111 return GetProcSelfStatmField(0, aN);
114 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
115 return GetProcSelfStatmField(1, aN);
118 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
119 return ResidentDistinguishedAmount(aN);
122 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
123 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
124 int64_t* aN, pid_t aPid = 0) {
125 return GetProcSelfSmapsPrivate(aN, aPid);
128 # ifdef HAVE_MALLINFO
129 # define HAVE_SYSTEM_HEAP_REPORTER 1
130 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
131 struct mallinfo info = mallinfo();
133 // The documentation in the glibc man page makes it sound like |uordblks|
134 // would suffice, but that only gets the small allocations that are put in
135 // the brk heap. We need |hblkhd| as well to get the larger allocations
136 // that are mmapped.
138 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
139 // unreliable if memory usage gets high. However, the system heap size on
140 // Linux should usually be zero (so long as jemalloc is enabled) so that
141 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
142 // adding them to provide a small amount of extra overflow protection.
143 *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
144 return NS_OK;
146 # endif
148 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
149 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
151 # include <sys/param.h>
152 # include <sys/sysctl.h>
153 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
154 defined(__FreeBSD_kernel__)
155 # include <sys/user.h>
156 # endif
158 # include <unistd.h>
160 # if defined(__NetBSD__)
161 # undef KERN_PROC
162 # define KERN_PROC KERN_PROC2
163 # define KINFO_PROC struct kinfo_proc2
164 # else
165 # define KINFO_PROC struct kinfo_proc
166 # endif
168 # if defined(__DragonFly__)
169 # define KP_SIZE(kp) (kp.kp_vm_map_size)
170 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
171 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
172 # define KP_SIZE(kp) (kp.ki_size)
173 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
174 # elif defined(__NetBSD__)
175 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
176 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
177 # elif defined(__OpenBSD__)
178 # define KP_SIZE(kp) \
179 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
180 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
181 # endif
183 [[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
184 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
185 static LazyLogModule sPledgeLog("SandboxPledge");
186 MOZ_LOG(sPledgeLog, LogLevel::Debug,
187 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
188 return NS_ERROR_FAILURE;
189 # endif
190 int mib[] = {
191 CTL_KERN,
192 KERN_PROC,
193 KERN_PROC_PID,
194 getpid(),
195 # if defined(__NetBSD__) || defined(__OpenBSD__)
196 sizeof(KINFO_PROC),
198 # endif
200 u_int miblen = sizeof(mib) / sizeof(mib[0]);
201 size_t size = sizeof(KINFO_PROC);
202 if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
203 return NS_ERROR_FAILURE;
205 return NS_OK;
208 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
209 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
210 KINFO_PROC proc;
211 nsresult rv = GetKinfoProcSelf(&proc);
212 if (NS_SUCCEEDED(rv)) {
213 *aN = KP_SIZE(proc);
215 return rv;
218 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
219 KINFO_PROC proc;
220 nsresult rv = GetKinfoProcSelf(&proc);
221 if (NS_SUCCEEDED(rv)) {
222 *aN = KP_RSS(proc);
224 return rv;
227 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
228 return ResidentDistinguishedAmount(aN);
231 # ifdef __FreeBSD__
232 # include <libutil.h>
233 # include <algorithm>
235 [[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
236 uint64_t* aMaxreg) {
237 int cnt;
238 struct kinfo_vmentry* vmmap;
239 struct kinfo_vmentry* kve;
240 if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
241 return NS_ERROR_FAILURE;
243 if (aPrss) {
244 *aPrss = 0;
246 if (aMaxreg) {
247 *aMaxreg = 0;
250 for (int i = 0; i < cnt; i++) {
251 kve = &vmmap[i];
252 if (aPrss) {
253 *aPrss += kve->kve_private_resident;
255 if (aMaxreg) {
256 *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
260 free(vmmap);
261 return NS_OK;
264 # define HAVE_PRIVATE_REPORTER 1
265 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
266 int64_t priv;
267 nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
268 NS_ENSURE_SUCCESS(rv, rv);
269 *aN = priv * getpagesize();
270 return NS_OK;
273 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
274 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
275 int64_t* aN) {
276 uint64_t biggestRegion;
277 nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
278 if (NS_SUCCEEDED(rv)) {
279 *aN = biggestRegion;
281 return NS_OK;
283 # endif // FreeBSD
285 #elif defined(SOLARIS)
287 # include <procfs.h>
288 # include <fcntl.h>
289 # include <unistd.h>
291 static void XMappingIter(int64_t& aVsize, int64_t& aResident,
292 int64_t& aShared) {
293 aVsize = -1;
294 aResident = -1;
295 aShared = -1;
296 int mapfd = open("/proc/self/xmap", O_RDONLY);
297 struct stat st;
298 prxmap_t* prmapp = nullptr;
299 if (mapfd >= 0) {
300 if (!fstat(mapfd, &st)) {
301 int nmap = st.st_size / sizeof(prxmap_t);
302 while (1) {
303 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
304 // prior to the release of Solaris 11.
305 // Here is a workaround for it.
306 nmap *= 2;
307 prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
308 if (!prmapp) {
309 // out of memory
310 break;
312 int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
313 if (n < 0) {
314 break;
316 if (nmap >= n / sizeof(prxmap_t)) {
317 aVsize = 0;
318 aResident = 0;
319 aShared = 0;
320 for (int i = 0; i < n / sizeof(prxmap_t); i++) {
321 aVsize += prmapp[i].pr_size;
322 aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
323 if (prmapp[i].pr_mflags & MA_SHARED) {
324 aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
327 break;
329 free(prmapp);
331 free(prmapp);
333 close(mapfd);
337 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
338 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
339 int64_t vsize, resident, shared;
340 XMappingIter(vsize, resident, shared);
341 if (vsize == -1) {
342 return NS_ERROR_FAILURE;
344 *aN = vsize;
345 return NS_OK;
348 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
349 int64_t vsize, resident, shared;
350 XMappingIter(vsize, resident, shared);
351 if (resident == -1) {
352 return NS_ERROR_FAILURE;
354 *aN = resident;
355 return NS_OK;
358 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
359 return ResidentDistinguishedAmount(aN);
362 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
363 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
364 int64_t vsize, resident, shared;
365 XMappingIter(vsize, resident, shared);
366 if (resident == -1) {
367 return NS_ERROR_FAILURE;
369 *aN = resident - shared;
370 return NS_OK;
373 #elif defined(XP_MACOSX)
375 # include <mach/mach_init.h>
376 # include <mach/mach_vm.h>
377 # include <mach/shared_region.h>
378 # include <mach/task.h>
379 # include <sys/sysctl.h>
381 [[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
382 mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
383 kern_return_t kr =
384 task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
385 return kr == KERN_SUCCESS;
388 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
389 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
390 // it, so we might as well too.
391 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
392 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
393 task_basic_info ti;
394 if (!GetTaskBasicInfo(&ti)) {
395 return NS_ERROR_FAILURE;
397 *aN = ti.virtual_size;
398 return NS_OK;
401 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
402 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
403 // an accurate result. The OS will take away MADV_FREE'd pages when there's
404 // memory pressure, so ideally, they shouldn't count against our RSS.
406 // Purging these pages can take a long time for some users (see bug 789975),
407 // so we provide the option to get the RSS without purging first.
408 [[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
409 bool aDoPurge) {
410 # ifdef HAVE_JEMALLOC_STATS
411 if (aDoPurge) {
412 Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
413 jemalloc_purge_freed_pages();
415 # endif
417 task_basic_info ti;
418 if (!GetTaskBasicInfo(&ti)) {
419 return NS_ERROR_FAILURE;
421 *aN = ti.resident_size;
422 return NS_OK;
425 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
426 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
429 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
430 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
433 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
435 static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
436 mach_vm_address_t base;
437 mach_vm_address_t size;
439 switch (aType) {
440 case CPU_TYPE_ARM:
441 base = SHARED_REGION_BASE_ARM;
442 size = SHARED_REGION_SIZE_ARM;
443 break;
444 case CPU_TYPE_I386:
445 base = SHARED_REGION_BASE_I386;
446 size = SHARED_REGION_SIZE_I386;
447 break;
448 case CPU_TYPE_X86_64:
449 base = SHARED_REGION_BASE_X86_64;
450 size = SHARED_REGION_SIZE_X86_64;
451 break;
452 default:
453 return false;
456 return base <= aAddr && aAddr < (base + size);
459 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
460 int64_t* aN, mach_port_t aPort = 0) {
461 if (!aN) {
462 return NS_ERROR_FAILURE;
465 cpu_type_t cpu_type;
466 size_t len = sizeof(cpu_type);
467 if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
468 return NS_ERROR_FAILURE;
471 // Roughly based on libtop_update_vm_regions in
472 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
473 size_t privatePages = 0;
474 mach_vm_size_t size = 0;
475 for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += size) {
476 vm_region_top_info_data_t info;
477 mach_msg_type_number_t infoCount = VM_REGION_TOP_INFO_COUNT;
478 mach_port_t objectName;
480 kern_return_t kr = mach_vm_region(
481 aPort ? aPort : mach_task_self(), &addr, &size, VM_REGION_TOP_INFO,
482 reinterpret_cast<vm_region_info_t>(&info), &infoCount, &objectName);
483 if (kr == KERN_INVALID_ADDRESS) {
484 // Done iterating VM regions.
485 break;
486 } else if (kr != KERN_SUCCESS) {
487 return NS_ERROR_FAILURE;
490 if (InSharedRegion(addr, cpu_type) && info.share_mode != SM_PRIVATE) {
491 continue;
494 switch (info.share_mode) {
495 case SM_LARGE_PAGE:
496 // NB: Large pages are not shareable and always resident.
497 case SM_PRIVATE:
498 privatePages += info.private_pages_resident;
499 privatePages += info.shared_pages_resident;
500 break;
501 case SM_COW:
502 privatePages += info.private_pages_resident;
503 if (info.ref_count == 1) {
504 // Treat copy-on-write pages as private if they only have one
505 // reference.
506 privatePages += info.shared_pages_resident;
508 break;
509 case SM_SHARED:
510 default:
511 break;
515 vm_size_t pageSize;
516 if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
517 KERN_SUCCESS) {
518 pageSize = PAGE_SIZE;
521 *aN = privatePages * pageSize;
522 return NS_OK;
525 #elif defined(XP_WIN)
527 # include <windows.h>
528 # include <psapi.h>
529 # include <algorithm>
531 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
532 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
533 MEMORYSTATUSEX s;
534 s.dwLength = sizeof(s);
536 if (!GlobalMemoryStatusEx(&s)) {
537 return NS_ERROR_FAILURE;
540 *aN = s.ullTotalVirtual - s.ullAvailVirtual;
541 return NS_OK;
544 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
545 PROCESS_MEMORY_COUNTERS pmc;
546 pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
548 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
549 return NS_ERROR_FAILURE;
552 *aN = pmc.WorkingSetSize;
553 return NS_OK;
556 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
557 return ResidentDistinguishedAmount(aN);
560 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
562 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
563 int64_t* aN, HANDLE aProcess = nullptr) {
564 // Determine how many entries we need.
565 PSAPI_WORKING_SET_INFORMATION tmp;
566 DWORD tmpSize = sizeof(tmp);
567 memset(&tmp, 0, tmpSize);
569 HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
570 QueryWorkingSet(proc, &tmp, tmpSize);
572 // Fudge the size in case new entries are added between calls.
573 size_t entries = tmp.NumberOfEntries * 2;
575 if (!entries) {
576 return NS_ERROR_FAILURE;
579 DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
580 UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
581 static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
583 if (!infoArray) {
584 return NS_ERROR_FAILURE;
587 if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
588 return NS_ERROR_FAILURE;
591 entries = static_cast<size_t>(infoArray->NumberOfEntries);
592 size_t privatePages = 0;
593 for (size_t i = 0; i < entries; i++) {
594 // Count shared pages that only one process is using as private.
595 if (!infoArray->WorkingSetInfo[i].Shared ||
596 infoArray->WorkingSetInfo[i].ShareCount <= 1) {
597 privatePages++;
601 SYSTEM_INFO si;
602 GetSystemInfo(&si);
604 *aN = privatePages * si.dwPageSize;
605 return NS_OK;
608 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
609 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
610 int64_t* aN) {
611 SIZE_T biggestRegion = 0;
612 MEMORY_BASIC_INFORMATION vmemInfo = {0};
613 for (size_t currentAddress = 0;;) {
614 if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
615 // Something went wrong, just return whatever we've got already.
616 break;
619 if (vmemInfo.State == MEM_FREE) {
620 biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
623 SIZE_T lastAddress = currentAddress;
624 currentAddress += vmemInfo.RegionSize;
626 // If we overflow, we've examined all of the address space.
627 if (currentAddress < lastAddress) {
628 break;
632 *aN = biggestRegion;
633 return NS_OK;
636 # define HAVE_PRIVATE_REPORTER 1
637 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
638 PROCESS_MEMORY_COUNTERS_EX pmcex;
639 pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
641 if (!GetProcessMemoryInfo(GetCurrentProcess(),
642 (PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
643 return NS_ERROR_FAILURE;
646 *aN = pmcex.PrivateUsage;
647 return NS_OK;
650 # define HAVE_SYSTEM_HEAP_REPORTER 1
651 // Windows can have multiple separate heaps, but we should not touch non-default
652 // heaps because they may be destroyed at anytime while we hold a handle. So we
653 // count only the default heap.
654 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
655 HANDLE heap = GetProcessHeap();
657 NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
659 int64_t heapSize = 0;
660 PROCESS_HEAP_ENTRY entry;
661 entry.lpData = nullptr;
662 while (HeapWalk(heap, &entry)) {
663 // We don't count entry.cbOverhead, because we just want to measure the
664 // space available to the program.
665 if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
666 heapSize += entry.cbData;
670 // Check this result only after unlocking the heap, so that we don't leave
671 // the heap locked if there was an error.
672 DWORD lastError = GetLastError();
674 // I have no idea how things would proceed if unlocking this heap failed...
675 NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
677 NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
679 *aSizeOut = heapSize;
680 return NS_OK;
683 struct SegmentKind {
684 DWORD mState;
685 DWORD mType;
686 DWORD mProtect;
687 int mIsStack;
690 struct SegmentEntry : public PLDHashEntryHdr {
691 static PLDHashNumber HashKey(const void* aKey) {
692 auto kind = static_cast<const SegmentKind*>(aKey);
693 return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
694 kind->mIsStack);
697 static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
698 auto kind = static_cast<const SegmentKind*>(aKey);
699 auto entry = static_cast<const SegmentEntry*>(aEntry);
700 return kind->mState == entry->mKind.mState &&
701 kind->mType == entry->mKind.mType &&
702 kind->mProtect == entry->mKind.mProtect &&
703 kind->mIsStack == entry->mKind.mIsStack;
706 static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
707 auto kind = static_cast<const SegmentKind*>(aKey);
708 auto entry = static_cast<SegmentEntry*>(aEntry);
709 entry->mKind = *kind;
710 entry->mCount = 0;
711 entry->mSize = 0;
714 static const PLDHashTableOps Ops;
716 SegmentKind mKind; // The segment kind.
717 uint32_t mCount; // The number of segments of this kind.
718 size_t mSize; // The combined size of segments of this kind.
721 /* static */ const PLDHashTableOps SegmentEntry::Ops = {
722 SegmentEntry::HashKey, SegmentEntry::MatchEntry,
723 PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
724 SegmentEntry::InitEntry};
726 class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
727 ~WindowsAddressSpaceReporter() {}
729 public:
730 NS_DECL_ISUPPORTS
732 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
733 nsISupports* aData, bool aAnonymize) override {
734 // First iterate over all the segments and record how many of each kind
735 // there were and their aggregate sizes. We use a hash table for this
736 // because there are a couple of dozen different kinds possible.
738 PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
739 MEMORY_BASIC_INFORMATION info = {0};
740 bool isPrevSegStackGuard = false;
741 for (size_t currentAddress = 0;;) {
742 if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
743 // Something went wrong, just return whatever we've got already.
744 break;
747 size_t size = info.RegionSize;
749 // Note that |type| and |protect| are ignored in some cases.
750 DWORD state = info.State;
751 DWORD type =
752 (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
753 DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
754 bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
755 type == MEM_PRIVATE && protect == PAGE_READWRITE;
757 SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
758 auto entry =
759 static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
760 if (entry) {
761 entry->mCount += 1;
762 entry->mSize += size;
765 isPrevSegStackGuard = info.State == MEM_COMMIT &&
766 info.Type == MEM_PRIVATE &&
767 info.Protect == (PAGE_READWRITE | PAGE_GUARD);
769 size_t lastAddress = currentAddress;
770 currentAddress += size;
772 // If we overflow, we've examined all of the address space.
773 if (currentAddress < lastAddress) {
774 break;
778 // Then iterate over the hash table and report the details for each segment
779 // kind.
781 for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
782 // For each range of pages, we consider one or more of its State, Type
783 // and Protect values. These are documented at
784 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
785 // (for State and Type) and
786 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
787 // (for Protect).
789 // Not all State values have accompanying Type and Protection values.
790 bool doType = false;
791 bool doProtect = false;
793 auto entry = static_cast<const SegmentEntry*>(iter.Get());
795 nsCString path("address-space");
797 switch (entry->mKind.mState) {
798 case MEM_FREE:
799 path.AppendLiteral("/free");
800 break;
802 case MEM_RESERVE:
803 path.AppendLiteral("/reserved");
804 doType = true;
805 break;
807 case MEM_COMMIT:
808 path.AppendLiteral("/commit");
809 doType = true;
810 doProtect = true;
811 break;
813 default:
814 // Should be impossible, but handle it just in case.
815 path.AppendLiteral("/???");
816 break;
819 if (doType) {
820 switch (entry->mKind.mType) {
821 case MEM_IMAGE:
822 path.AppendLiteral("/image");
823 break;
825 case MEM_MAPPED:
826 path.AppendLiteral("/mapped");
827 break;
829 case MEM_PRIVATE:
830 path.AppendLiteral("/private");
831 break;
833 default:
834 // Should be impossible, but handle it just in case.
835 path.AppendLiteral("/???");
836 break;
840 if (doProtect) {
841 DWORD protect = entry->mKind.mProtect;
842 // Basic attributes. Exactly one of these should be set.
843 if (protect & PAGE_EXECUTE) {
844 path.AppendLiteral("/execute");
846 if (protect & PAGE_EXECUTE_READ) {
847 path.AppendLiteral("/execute-read");
849 if (protect & PAGE_EXECUTE_READWRITE) {
850 path.AppendLiteral("/execute-readwrite");
852 if (protect & PAGE_EXECUTE_WRITECOPY) {
853 path.AppendLiteral("/execute-writecopy");
855 if (protect & PAGE_NOACCESS) {
856 path.AppendLiteral("/noaccess");
858 if (protect & PAGE_READONLY) {
859 path.AppendLiteral("/readonly");
861 if (protect & PAGE_READWRITE) {
862 path.AppendLiteral("/readwrite");
864 if (protect & PAGE_WRITECOPY) {
865 path.AppendLiteral("/writecopy");
868 // Modifiers. At most one of these should be set.
869 if (protect & PAGE_GUARD) {
870 path.AppendLiteral("+guard");
872 if (protect & PAGE_NOCACHE) {
873 path.AppendLiteral("+nocache");
875 if (protect & PAGE_WRITECOMBINE) {
876 path.AppendLiteral("+writecombine");
879 // Annotate likely stack segments, too.
880 if (entry->mKind.mIsStack) {
881 path.AppendLiteral("+stack");
885 // Append the segment count.
886 path.AppendPrintf("(segments=%u)", entry->mCount);
888 aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
889 entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
890 aData);
893 return NS_OK;
896 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
898 #endif // XP_<PLATFORM>
900 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
901 class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
902 ~VsizeMaxContiguousReporter() {}
904 public:
905 NS_DECL_ISUPPORTS
907 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
908 nsISupports* aData, bool aAnonymize) override {
909 int64_t amount;
910 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
911 MOZ_COLLECT_REPORT(
912 "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
913 "Size of the maximum contiguous block of available virtual memory.");
915 return NS_OK;
918 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
919 #endif
921 #ifdef HAVE_PRIVATE_REPORTER
922 class PrivateReporter final : public nsIMemoryReporter {
923 ~PrivateReporter() {}
925 public:
926 NS_DECL_ISUPPORTS
928 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
929 nsISupports* aData, bool aAnonymize) override {
930 int64_t amount;
931 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
932 // clang-format off
933 MOZ_COLLECT_REPORT(
934 "private", KIND_OTHER, UNITS_BYTES, amount,
935 "Memory that cannot be shared with other processes, including memory that is "
936 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
937 "pages that have been written to.");
938 // clang-format on
940 return NS_OK;
943 NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
944 #endif
946 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
947 class VsizeReporter final : public nsIMemoryReporter {
948 ~VsizeReporter() = default;
950 public:
951 NS_DECL_ISUPPORTS
953 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
954 nsISupports* aData, bool aAnonymize) override {
955 int64_t amount;
956 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
957 // clang-format off
958 MOZ_COLLECT_REPORT(
959 "vsize", KIND_OTHER, UNITS_BYTES, amount,
960 "Memory mapped by the process, including code and data segments, the heap, "
961 "thread stacks, memory explicitly mapped by the process via mmap and similar "
962 "operations, and memory shared with other processes. This is the vsize figure "
963 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
964 "processes share huge amounts of memory with one another. But even on other "
965 "operating systems, 'resident' is a much better measure of the memory "
966 "resources used by the process.");
967 // clang-format on
969 return NS_OK;
972 NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
974 class ResidentReporter final : public nsIMemoryReporter {
975 ~ResidentReporter() = default;
977 public:
978 NS_DECL_ISUPPORTS
980 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
981 nsISupports* aData, bool aAnonymize) override {
982 int64_t amount;
983 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
984 // clang-format off
985 MOZ_COLLECT_REPORT(
986 "resident", KIND_OTHER, UNITS_BYTES, amount,
987 "Memory mapped by the process that is present in physical memory, also known "
988 "as the resident set size (RSS). This is the best single figure to use when "
989 "considering the memory resources used by the process, but it depends both on "
990 "other processes being run and details of the OS kernel and so is best used "
991 "for comparing the memory usage of a single process at different points in "
992 "time.");
993 // clang-format on
995 return NS_OK;
998 NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
1000 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1002 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1003 class ResidentUniqueReporter final : public nsIMemoryReporter {
1004 ~ResidentUniqueReporter() = default;
1006 public:
1007 NS_DECL_ISUPPORTS
1009 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1010 nsISupports* aData, bool aAnonymize) override {
1011 int64_t amount = 0;
1012 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1013 // clang-format off
1014 MOZ_COLLECT_REPORT(
1015 "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1016 "Memory mapped by the process that is present in physical memory and not "
1017 "shared with any other processes. This is also known as the process's unique "
1018 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1019 "closed this process.");
1020 // clang-format on
1022 return NS_OK;
1025 NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1027 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1029 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1031 class SystemHeapReporter final : public nsIMemoryReporter {
1032 ~SystemHeapReporter() = default;
1034 public:
1035 NS_DECL_ISUPPORTS
1037 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1038 nsISupports* aData, bool aAnonymize) override {
1039 int64_t amount;
1040 if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1041 // clang-format off
1042 MOZ_COLLECT_REPORT(
1043 "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1044 "Memory used by the system allocator that is currently allocated to the "
1045 "application. This is distinct from the jemalloc heap that Firefox uses for "
1046 "most or all of its heap allocations. Ideally this number is zero, but "
1047 "on some platforms we cannot force every heap allocation through jemalloc.");
1048 // clang-format on
1050 return NS_OK;
1053 NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1054 #endif // HAVE_SYSTEM_HEAP_REPORTER
1056 #ifdef XP_UNIX
1058 # include <sys/resource.h>
1060 # define HAVE_RESIDENT_PEAK_REPORTER 1
1062 [[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
1063 struct rusage usage;
1064 if (0 == getrusage(RUSAGE_SELF, &usage)) {
1065 // The units for ru_maxrrs:
1066 // - Mac: bytes
1067 // - Solaris: pages? But some sources it actually always returns 0, so
1068 // check for that
1069 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1070 # ifdef XP_MACOSX
1071 *aN = usage.ru_maxrss;
1072 # elif defined(SOLARIS)
1073 *aN = usage.ru_maxrss * getpagesize();
1074 # else
1075 *aN = usage.ru_maxrss * 1024;
1076 # endif
1077 if (*aN > 0) {
1078 return NS_OK;
1081 return NS_ERROR_FAILURE;
1084 class ResidentPeakReporter final : public nsIMemoryReporter {
1085 ~ResidentPeakReporter() = default;
1087 public:
1088 NS_DECL_ISUPPORTS
1090 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1091 nsISupports* aData, bool aAnonymize) override {
1092 int64_t amount = 0;
1093 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1094 MOZ_COLLECT_REPORT(
1095 "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1096 "The peak 'resident' value for the lifetime of the process.");
1098 return NS_OK;
1101 NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1103 # define HAVE_PAGE_FAULT_REPORTERS 1
1105 class PageFaultsSoftReporter final : public nsIMemoryReporter {
1106 ~PageFaultsSoftReporter() = default;
1108 public:
1109 NS_DECL_ISUPPORTS
1111 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1112 nsISupports* aData, bool aAnonymize) override {
1113 struct rusage usage;
1114 int err = getrusage(RUSAGE_SELF, &usage);
1115 if (err == 0) {
1116 int64_t amount = usage.ru_minflt;
1117 // clang-format off
1118 MOZ_COLLECT_REPORT(
1119 "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1120 "The number of soft page faults (also known as 'minor page faults') that "
1121 "have occurred since the process started. A soft page fault occurs when the "
1122 "process tries to access a page which is present in physical memory but is "
1123 "not mapped into the process's address space. For instance, a process might "
1124 "observe soft page faults when it loads a shared library which is already "
1125 "present in physical memory. A process may experience many thousands of soft "
1126 "page faults even when the machine has plenty of available physical memory, "
1127 "and because the OS services a soft page fault without accessing the disk, "
1128 "they impact performance much less than hard page faults.");
1129 // clang-format on
1131 return NS_OK;
1134 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1136 [[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
1137 int64_t* aAmount) {
1138 struct rusage usage;
1139 int err = getrusage(RUSAGE_SELF, &usage);
1140 if (err != 0) {
1141 return NS_ERROR_FAILURE;
1143 *aAmount = usage.ru_majflt;
1144 return NS_OK;
1147 class PageFaultsHardReporter final : public nsIMemoryReporter {
1148 ~PageFaultsHardReporter() = default;
1150 public:
1151 NS_DECL_ISUPPORTS
1153 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1154 nsISupports* aData, bool aAnonymize) override {
1155 int64_t amount = 0;
1156 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1157 // clang-format off
1158 MOZ_COLLECT_REPORT(
1159 "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1160 "The number of hard page faults (also known as 'major page faults') that have "
1161 "occurred since the process started. A hard page fault occurs when a process "
1162 "tries to access a page which is not present in physical memory. The "
1163 "operating system must access the disk in order to fulfill a hard page fault. "
1164 "When memory is plentiful, you should see very few hard page faults. But if "
1165 "the process tries to use more memory than your machine has available, you "
1166 "may see many thousands of hard page faults. Because accessing the disk is up "
1167 "to a million times slower than accessing RAM, the program may run very "
1168 "slowly when it is experiencing more than 100 or so hard page faults a "
1169 "second.");
1170 // clang-format on
1172 return NS_OK;
1175 NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1177 #endif // XP_UNIX
1180 ** memory reporter implementation for jemalloc and OSX malloc,
1181 ** to obtain info on total memory in use (that we know about,
1182 ** at least -- on OSX, there are sometimes other zones in use).
1185 #ifdef HAVE_JEMALLOC_STATS
1187 static size_t HeapOverhead(jemalloc_stats_t* aStats) {
1188 return aStats->waste + aStats->bookkeeping + aStats->page_cache +
1189 aStats->bin_unused;
1192 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1193 // 100x for the percentage.
1194 static int64_t HeapOverheadFraction(jemalloc_stats_t* aStats) {
1195 size_t heapOverhead = HeapOverhead(aStats);
1196 size_t heapCommitted = aStats->allocated + heapOverhead;
1197 return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1200 class JemallocHeapReporter final : public nsIMemoryReporter {
1201 ~JemallocHeapReporter() = default;
1203 public:
1204 NS_DECL_ISUPPORTS
1206 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1207 nsISupports* aData, bool aAnonymize) override {
1208 jemalloc_stats_t stats;
1209 jemalloc_bin_stats_t bin_stats[JEMALLOC_MAX_STATS_BINS];
1210 jemalloc_stats(&stats, bin_stats);
1212 // clang-format off
1213 MOZ_COLLECT_REPORT(
1214 "heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1215 "Memory mapped by the heap allocator that is currently allocated to the "
1216 "application. This may exceed the amount of memory requested by the "
1217 "application because the allocator regularly rounds up request sizes. (The "
1218 "exact amount requested is not recorded.)");
1220 MOZ_COLLECT_REPORT(
1221 "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1222 "The same as 'heap-committed/allocated'.");
1224 // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1225 // because KIND_HEAP memory means "counted in heap-allocated", which
1226 // this is not.
1227 for (auto& bin : bin_stats) {
1228 if (!bin.size) {
1229 continue;
1231 nsPrintfCString path("explicit/heap-overhead/bin-unused/bin-%zu",
1232 bin.size);
1233 aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
1234 bin.bytes_unused,
1235 nsLiteralCString(
1236 "Unused bytes in all runs of all bins for this size class"),
1237 aData);
1240 if (stats.waste > 0) {
1241 MOZ_COLLECT_REPORT(
1242 "explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
1243 stats.waste,
1244 "Committed bytes which do not correspond to an active allocation and which the "
1245 "allocator is not intentionally keeping alive (i.e., not "
1246 "'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1249 MOZ_COLLECT_REPORT(
1250 "explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1251 stats.bookkeeping,
1252 "Committed bytes which the heap allocator uses for internal data structures.");
1254 MOZ_COLLECT_REPORT(
1255 "explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
1256 stats.page_cache,
1257 "Memory which the allocator could return to the operating system, but hasn't. "
1258 "The allocator keeps this memory around as an optimization, so it doesn't "
1259 "have to ask the OS the next time it needs to fulfill a request. This value "
1260 "is typically not larger than a few megabytes.");
1262 MOZ_COLLECT_REPORT(
1263 "heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
1264 HeapOverhead(&stats),
1265 "The sum of 'explicit/heap-overhead/*'.");
1267 MOZ_COLLECT_REPORT(
1268 "heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
1269 "Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1270 "neither in physical memory nor paged to disk.");
1272 MOZ_COLLECT_REPORT(
1273 "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1274 "Size of chunks.");
1275 // clang-format on
1277 return NS_OK;
1280 NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1282 #endif // HAVE_JEMALLOC_STATS
1284 // Why is this here? At first glance, you'd think it could be defined and
1285 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1286 // However, the obvious time to register it is when the table is initialized,
1287 // and that happens before XPCOM components are initialized, which means the
1288 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1289 class AtomTablesReporter final : public nsIMemoryReporter {
1290 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1292 ~AtomTablesReporter() = default;
1294 public:
1295 NS_DECL_ISUPPORTS
1297 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1298 nsISupports* aData, bool aAnonymize) override {
1299 AtomsSizes sizes;
1300 NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1302 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
1303 sizes.mTable, "Memory used by the atom table.");
1305 MOZ_COLLECT_REPORT(
1306 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1307 sizes.mDynamicAtoms,
1308 "Memory used by dynamic atom objects and chars (which are stored "
1309 "at the end of each atom object).");
1311 return NS_OK;
1314 NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1316 class ThreadsReporter final : public nsIMemoryReporter {
1317 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1318 ~ThreadsReporter() = default;
1320 public:
1321 NS_DECL_ISUPPORTS
1323 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1324 nsISupports* aData, bool aAnonymize) override {
1325 #ifdef XP_LINUX
1326 nsTArray<MemoryMapping> mappings(1024);
1327 MOZ_TRY(GetMemoryMappings(mappings));
1328 #endif
1330 // Enumerating over active threads requires holding a lock, so we collect
1331 // info on all threads, and then call our reporter callbacks after releasing
1332 // the lock.
1333 struct ThreadData {
1334 nsCString mName;
1335 uint32_t mThreadId;
1336 size_t mPrivateSize;
1338 AutoTArray<ThreadData, 32> threads;
1340 size_t eventQueueSizes = 0;
1341 size_t wrapperSizes = 0;
1342 size_t threadCount = 0;
1344 for (auto* thread : nsThread::Enumerate()) {
1345 threadCount++;
1346 eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1347 wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1349 if (!thread->StackBase()) {
1350 continue;
1353 #if defined(XP_LINUX)
1354 int idx = mappings.BinaryIndexOf(thread->StackBase());
1355 if (idx < 0) {
1356 continue;
1358 // Referenced() is the combined size of all pages in the region which have
1359 // ever been touched, and are therefore consuming memory. For stack
1360 // regions, these pages are guaranteed to be un-shared unless we fork
1361 // after creating threads (which we don't).
1362 size_t privateSize = mappings[idx].Referenced();
1364 // On Linux, we have to be very careful matching memory regions to thread
1365 // stacks.
1367 // To begin with, the kernel only reports VM stats for regions of all
1368 // adjacent pages with the same flags, protection, and backing file.
1369 // There's no way to get finer-grained usage information for a subset of
1370 // those pages.
1372 // Stack segments always have a guard page at the bottom of the stack
1373 // (assuming we only support stacks that grow down), so there's no danger
1374 // of them being merged with other stack regions. At the top, there's no
1375 // protection page, and no way to allocate one without using pthreads
1376 // directly and allocating our own stacks. So we get around the problem by
1377 // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1378 // don't expect to be set on any heap regions. But this is not fool-proof.
1380 // A second kink is that different C libraries (and different versions
1381 // thereof) report stack base locations and sizes differently with regard
1382 // to the guard page. For the libraries that include the guard page in the
1383 // stack size base pointer, we need to adjust those values to compensate.
1384 // But it's possible that our logic will get out of sync with library
1385 // changes, or someone will compile with an unexpected library.
1388 // The upshot of all of this is that there may be configurations that our
1389 // special cases don't cover. And if there are, we want to know about it.
1390 // So assert that total size of the memory region we're reporting actually
1391 // matches the allocated size of the thread stack.
1392 # ifndef ANDROID
1393 MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1394 "Mapping region size doesn't match stack allocation size");
1395 # endif
1396 #elif defined(XP_WIN)
1397 auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1398 size_t privateSize = memInfo.Committed();
1399 #else
1400 size_t privateSize = thread->StackSize();
1401 MOZ_ASSERT_UNREACHABLE(
1402 "Shouldn't have stack base pointer on this "
1403 "platform");
1404 #endif
1406 threads.AppendElement(ThreadData{
1407 nsCString(PR_GetThreadName(thread->GetPRThread())),
1408 thread->ThreadId(),
1409 // On Linux, it's possible (but unlikely) that our stack region will
1410 // have been merged with adjacent heap regions, in which case we'll
1411 // get combined size information for both. So we take the minimum of
1412 // the reported private size and the requested stack size to avoid the
1413 // possible of majorly over-reporting in that case.
1414 std::min(privateSize, thread->StackSize()),
1418 for (auto& thread : threads) {
1419 nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1420 thread.mName.get(), thread.mThreadId);
1422 aHandleReport->Callback(
1423 ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
1424 nsLiteralCString("The sizes of thread stacks which have been "
1425 "committed to memory."),
1426 aData);
1429 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
1430 UNITS_BYTES, eventQueueSizes,
1431 "The sizes of nsThread event queues and observers.");
1433 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
1434 UNITS_BYTES, wrapperSizes,
1435 "The sizes of nsThread/PRThread wrappers.");
1437 #if defined(XP_WIN)
1438 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1439 // that's 12K. For 64 bit, it's 24K.
1441 // See
1442 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1443 constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1444 #elif defined(XP_LINUX)
1445 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1446 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1447 // have no way of knowing which ones do, so all we can do is guess.
1448 # if defined(__x86_64__) || defined(__i386__)
1449 constexpr size_t kKernelSize = 4 * 1024;
1450 # else
1451 constexpr size_t kKernelSize = 8 * 1024;
1452 # endif
1453 #elif defined(XP_MACOSX)
1454 // On Darwin, kernel stacks are 16K:
1456 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1457 constexpr size_t kKernelSize = 16 * 1024;
1458 #else
1459 // Elsewhere, just assume that kernel stacks require at least 8K.
1460 constexpr size_t kKernelSize = 8 * 1024;
1461 #endif
1463 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
1464 UNITS_BYTES, threadCount * kKernelSize,
1465 "The total kernel overhead for all active threads.");
1467 return NS_OK;
1470 NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1472 #ifdef DEBUG
1474 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1475 // However, this ends up breaking the linking step of various unit tests due
1476 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1477 // in DMD builds. So instead we do it here.
1478 class DeadlockDetectorReporter final : public nsIMemoryReporter {
1479 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1481 ~DeadlockDetectorReporter() = default;
1483 public:
1484 NS_DECL_ISUPPORTS
1486 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1487 nsISupports* aData, bool aAnonymize) override {
1488 MOZ_COLLECT_REPORT(
1489 "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1490 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1491 "Memory used by the deadlock detector.");
1493 return NS_OK;
1496 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1498 #endif
1500 #ifdef MOZ_DMD
1502 namespace mozilla {
1503 namespace dmd {
1505 class DMDReporter final : public nsIMemoryReporter {
1506 public:
1507 NS_DECL_ISUPPORTS
1509 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1510 nsISupports* aData, bool aAnonymize) override {
1511 dmd::Sizes sizes;
1512 dmd::SizeOf(&sizes);
1514 MOZ_COLLECT_REPORT(
1515 "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1516 sizes.mStackTracesUsed,
1517 "Memory used by stack traces which correspond to at least "
1518 "one heap block DMD is tracking.");
1520 MOZ_COLLECT_REPORT(
1521 "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1522 sizes.mStackTracesUnused,
1523 "Memory used by stack traces which don't correspond to any heap "
1524 "blocks DMD is currently tracking.");
1526 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
1527 UNITS_BYTES, sizes.mStackTraceTable,
1528 "Memory used by DMD's stack trace table.");
1530 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1531 sizes.mLiveBlockTable,
1532 "Memory used by DMD's live block table.");
1534 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1535 sizes.mDeadBlockTable,
1536 "Memory used by DMD's dead block list.");
1538 return NS_OK;
1541 private:
1542 ~DMDReporter() = default;
1544 NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1546 } // namespace dmd
1547 } // namespace mozilla
1549 #endif // MOZ_DMD
1552 ** nsMemoryReporterManager implementation
1555 NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
1556 nsIMemoryReporter)
1558 NS_IMETHODIMP
1559 nsMemoryReporterManager::Init() {
1560 if (!NS_IsMainThread()) {
1561 MOZ_CRASH();
1564 // Under normal circumstances this function is only called once. However,
1565 // we've (infrequently) seen memory report dumps in crash reports that
1566 // suggest that this function is sometimes called multiple times. That in
1567 // turn means that multiple reporters of each kind are registered, which
1568 // leads to duplicated reports of individual measurements such as "resident",
1569 // "vsize", etc.
1571 // It's unclear how these multiple calls can occur. The only plausible theory
1572 // so far is badly-written extensions, because this function is callable from
1573 // JS code via nsIMemoryReporter.idl.
1575 // Whatever the cause, it's a bad thing. So we protect against it with the
1576 // following check.
1577 static bool isInited = false;
1578 if (isInited) {
1579 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1580 return NS_OK;
1582 isInited = true;
1584 #ifdef HAVE_JEMALLOC_STATS
1585 RegisterStrongReporter(new JemallocHeapReporter());
1586 #endif
1588 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1589 RegisterStrongReporter(new VsizeReporter());
1590 RegisterStrongReporter(new ResidentReporter());
1591 #endif
1593 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1594 RegisterStrongReporter(new VsizeMaxContiguousReporter());
1595 #endif
1597 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1598 RegisterStrongReporter(new ResidentPeakReporter());
1599 #endif
1601 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1602 RegisterStrongReporter(new ResidentUniqueReporter());
1603 #endif
1605 #ifdef HAVE_PAGE_FAULT_REPORTERS
1606 RegisterStrongReporter(new PageFaultsSoftReporter());
1607 RegisterStrongReporter(new PageFaultsHardReporter());
1608 #endif
1610 #ifdef HAVE_PRIVATE_REPORTER
1611 RegisterStrongReporter(new PrivateReporter());
1612 #endif
1614 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1615 RegisterStrongReporter(new SystemHeapReporter());
1616 #endif
1618 RegisterStrongReporter(new AtomTablesReporter());
1620 RegisterStrongReporter(new ThreadsReporter());
1622 #ifdef DEBUG
1623 RegisterStrongReporter(new DeadlockDetectorReporter());
1624 #endif
1626 #ifdef MOZ_GECKO_PROFILER
1627 // We have to register this here rather than in profiler_init() because
1628 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1629 RegisterStrongReporter(new GeckoProfilerReporter());
1630 #endif
1632 #ifdef MOZ_DMD
1633 RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1634 #endif
1636 #ifdef XP_WIN
1637 RegisterStrongReporter(new WindowsAddressSpaceReporter());
1638 #endif
1640 #ifdef XP_UNIX
1641 nsMemoryInfoDumper::Initialize();
1642 #endif
1644 // Report our own memory usage as well.
1645 RegisterWeakReporter(this);
1647 return NS_OK;
1650 nsMemoryReporterManager::nsMemoryReporterManager()
1651 : mMutex("nsMemoryReporterManager::mMutex"),
1652 mIsRegistrationBlocked(false),
1653 mStrongReporters(new StrongReportersTable()),
1654 mWeakReporters(new WeakReportersTable()),
1655 mSavedStrongReporters(nullptr),
1656 mSavedWeakReporters(nullptr),
1657 mNextGeneration(1),
1658 mPendingProcessesState(nullptr),
1659 mPendingReportersState(nullptr)
1660 #ifdef HAVE_JEMALLOC_STATS
1662 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1663 #endif
1667 nsMemoryReporterManager::~nsMemoryReporterManager() {
1668 delete mStrongReporters;
1669 delete mWeakReporters;
1670 NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1671 NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1674 NS_IMETHODIMP
1675 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1676 nsISupports* aData, bool aAnonymize) {
1677 size_t n = MallocSizeOf(this);
1678 n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1679 n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1681 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1682 n, "Memory used by the memory reporter infrastructure.");
1684 return NS_OK;
1687 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1688 # define MEMORY_REPORTING_LOG(format, ...) \
1689 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1690 #else
1691 # define MEMORY_REPORTING_LOG(...)
1692 #endif
1694 NS_IMETHODIMP
1695 nsMemoryReporterManager::GetReports(
1696 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1697 nsIFinishReportingCallback* aFinishReporting,
1698 nsISupports* aFinishReportingData, bool aAnonymize) {
1699 return GetReportsExtended(aHandleReport, aHandleReportData, aFinishReporting,
1700 aFinishReportingData, aAnonymize,
1701 /* minimize = */ false,
1702 /* DMDident = */ u""_ns);
1705 NS_IMETHODIMP
1706 nsMemoryReporterManager::GetReportsExtended(
1707 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1708 nsIFinishReportingCallback* aFinishReporting,
1709 nsISupports* aFinishReportingData, bool aAnonymize, bool aMinimize,
1710 const nsAString& aDMDDumpIdent) {
1711 nsresult rv;
1713 // Memory reporters are not necessarily threadsafe, so this function must
1714 // be called from the main thread.
1715 if (!NS_IsMainThread()) {
1716 MOZ_CRASH();
1719 uint32_t generation = mNextGeneration++;
1721 if (mPendingProcessesState) {
1722 // A request is in flight. Don't start another one. And don't report
1723 // an error; just ignore it, and let the in-flight request finish.
1724 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
1725 mPendingProcessesState->mGeneration);
1726 return NS_OK;
1729 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1731 uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1732 MOZ_ASSERT(concurrency >= 1);
1733 if (concurrency < 1) {
1734 concurrency = 1;
1736 mPendingProcessesState = new PendingProcessesState(
1737 generation, aAnonymize, aMinimize, concurrency, aHandleReport,
1738 aHandleReportData, aFinishReporting, aFinishReportingData, aDMDDumpIdent);
1740 if (aMinimize) {
1741 nsCOMPtr<nsIRunnable> callback =
1742 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1743 &nsMemoryReporterManager::StartGettingReports);
1744 rv = MinimizeMemoryUsage(callback);
1745 } else {
1746 rv = StartGettingReports();
1748 return rv;
1751 nsresult nsMemoryReporterManager::StartGettingReports() {
1752 PendingProcessesState* s = mPendingProcessesState;
1753 nsresult rv;
1755 // Get reports for this process.
1756 FILE* parentDMDFile = nullptr;
1757 #ifdef MOZ_DMD
1758 if (!s->mDMDDumpIdent.IsEmpty()) {
1759 rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1760 &parentDMDFile);
1761 if (NS_WARN_IF(NS_FAILED(rv))) {
1762 // Proceed with the memory report as if DMD were disabled.
1763 parentDMDFile = nullptr;
1766 #endif
1768 // This is async.
1769 GetReportsForThisProcessExtended(
1770 s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
1771 s->mFinishReporting, s->mFinishReportingData);
1773 nsTArray<dom::ContentParent*> childWeakRefs;
1774 dom::ContentParent::GetAll(childWeakRefs);
1775 if (!childWeakRefs.IsEmpty()) {
1776 // Request memory reports from child processes. This happens
1777 // after the parent report so that the parent's main thread will
1778 // be free to process the child reports, instead of causing them
1779 // to be buffered and consume (possibly scarce) memory.
1781 for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1782 s->mChildrenPending.AppendElement(childWeakRefs[i]);
1786 if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1787 if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1788 s->mChildrenPending.AppendElement(proc.forget());
1792 if (RDDProcessManager* rdd = RDDProcessManager::Get()) {
1793 if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
1794 s->mChildrenPending.AppendElement(proc.forget());
1798 if (gfx::VRProcessManager* vr = gfx::VRProcessManager::Get()) {
1799 if (RefPtr<MemoryReportingProcess> proc = vr->GetProcessMemoryReporter()) {
1800 s->mChildrenPending.AppendElement(proc.forget());
1804 if (!mIsRegistrationBlocked && net::gIOService) {
1805 if (RefPtr<MemoryReportingProcess> proc =
1806 net::gIOService->GetSocketProcessMemoryReporter()) {
1807 s->mChildrenPending.AppendElement(proc.forget());
1811 if (!s->mChildrenPending.IsEmpty()) {
1812 nsCOMPtr<nsITimer> timer;
1813 rv = NS_NewTimerWithFuncCallback(
1814 getter_AddRefs(timer), TimeoutCallback, this, kTimeoutLengthMS,
1815 nsITimer::TYPE_ONE_SHOT,
1816 "nsMemoryReporterManager::StartGettingReports");
1817 if (NS_WARN_IF(NS_FAILED(rv))) {
1818 FinishReporting();
1819 return rv;
1822 MOZ_ASSERT(!s->mTimer);
1823 s->mTimer.swap(timer);
1826 return NS_OK;
1829 void nsMemoryReporterManager::DispatchReporter(
1830 nsIMemoryReporter* aReporter, bool aIsAsync,
1831 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1832 bool aAnonymize) {
1833 MOZ_ASSERT(mPendingReportersState);
1835 // Grab refs to everything used in the lambda function.
1836 RefPtr<nsMemoryReporterManager> self = this;
1837 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
1838 nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
1839 nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
1841 nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
1842 "nsMemoryReporterManager::DispatchReporter",
1843 [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
1844 reporter->CollectReports(handleReport, handleReportData, aAnonymize);
1845 if (!aIsAsync) {
1846 self->EndReport();
1850 NS_DispatchToMainThread(event);
1851 mPendingReportersState->mReportsPending++;
1854 NS_IMETHODIMP
1855 nsMemoryReporterManager::GetReportsForThisProcessExtended(
1856 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1857 bool aAnonymize, FILE* aDMDFile,
1858 nsIFinishReportingCallback* aFinishReporting,
1859 nsISupports* aFinishReportingData) {
1860 // Memory reporters are not necessarily threadsafe, so this function must
1861 // be called from the main thread.
1862 if (!NS_IsMainThread()) {
1863 MOZ_CRASH();
1866 if (NS_WARN_IF(mPendingReportersState)) {
1867 // Report is already in progress.
1868 return NS_ERROR_IN_PROGRESS;
1871 #ifdef MOZ_DMD
1872 if (aDMDFile) {
1873 // Clear DMD's reportedness state before running the memory
1874 // reporters, to avoid spurious twice-reported warnings.
1875 dmd::ClearReports();
1877 #else
1878 MOZ_ASSERT(!aDMDFile);
1879 #endif
1881 mPendingReportersState = new PendingReportersState(
1882 aFinishReporting, aFinishReportingData, aDMDFile);
1885 mozilla::MutexAutoLock autoLock(mMutex);
1887 for (auto iter = mStrongReporters->Iter(); !iter.Done(); iter.Next()) {
1888 DispatchReporter(iter.Key(), iter.Data(), aHandleReport,
1889 aHandleReportData, aAnonymize);
1892 for (auto iter = mWeakReporters->Iter(); !iter.Done(); iter.Next()) {
1893 nsCOMPtr<nsIMemoryReporter> reporter = iter.Key();
1894 DispatchReporter(reporter, iter.Data(), aHandleReport, aHandleReportData,
1895 aAnonymize);
1899 return NS_OK;
1902 NS_IMETHODIMP
1903 nsMemoryReporterManager::EndReport() {
1904 if (--mPendingReportersState->mReportsPending == 0) {
1905 #ifdef MOZ_DMD
1906 if (mPendingReportersState->mDMDFile) {
1907 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
1909 #endif
1910 if (mPendingProcessesState) {
1911 // This is the parent process.
1912 EndProcessReport(mPendingProcessesState->mGeneration, true);
1913 } else {
1914 mPendingReportersState->mFinishReporting->Callback(
1915 mPendingReportersState->mFinishReportingData);
1918 delete mPendingReportersState;
1919 mPendingReportersState = nullptr;
1922 return NS_OK;
1925 nsMemoryReporterManager::PendingProcessesState*
1926 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration) {
1927 // Memory reporting only happens on the main thread.
1928 MOZ_RELEASE_ASSERT(NS_IsMainThread());
1930 PendingProcessesState* s = mPendingProcessesState;
1932 if (!s) {
1933 // If we reach here, then:
1935 // - A child process reported back too late, and no subsequent request
1936 // is in flight.
1938 // So there's nothing to be done. Just ignore it.
1939 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
1940 aGeneration);
1941 return nullptr;
1944 if (aGeneration != s->mGeneration) {
1945 // If we reach here, a child process must have reported back, too late,
1946 // while a subsequent (higher-numbered) request is in flight. Again,
1947 // ignore it.
1948 MOZ_ASSERT(aGeneration < s->mGeneration);
1949 MEMORY_REPORTING_LOG(
1950 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration,
1951 s->mGeneration);
1952 return nullptr;
1955 return s;
1958 // This function has no return value. If something goes wrong, there's no
1959 // clear place to report the problem to, but that's ok -- we will end up
1960 // hitting the timeout and executing TimeoutCallback().
1961 void nsMemoryReporterManager::HandleChildReport(
1962 uint32_t aGeneration, const dom::MemoryReport& aChildReport) {
1963 PendingProcessesState* s = GetStateForGeneration(aGeneration);
1964 if (!s) {
1965 return;
1968 // Child reports should have a non-empty process.
1969 MOZ_ASSERT(!aChildReport.process().IsEmpty());
1971 // If the call fails, ignore and continue.
1972 s->mHandleReport->Callback(aChildReport.process(), aChildReport.path(),
1973 aChildReport.kind(), aChildReport.units(),
1974 aChildReport.amount(), aChildReport.desc(),
1975 s->mHandleReportData);
1978 /* static */
1979 bool nsMemoryReporterManager::StartChildReport(
1980 mozilla::MemoryReportingProcess* aChild,
1981 const PendingProcessesState* aState) {
1982 if (!aChild->IsAlive()) {
1983 MEMORY_REPORTING_LOG(
1984 "StartChildReports (gen=%u): child exited before"
1985 " its report was started\n",
1986 aState->mGeneration);
1987 return false;
1990 Maybe<mozilla::ipc::FileDescriptor> dmdFileDesc;
1991 #ifdef MOZ_DMD
1992 if (!aState->mDMDDumpIdent.IsEmpty()) {
1993 FILE* dmdFile = nullptr;
1994 nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
1995 aChild->Pid(), &dmdFile);
1996 if (NS_WARN_IF(NS_FAILED(rv))) {
1997 // Proceed with the memory report as if DMD were disabled.
1998 dmdFile = nullptr;
2000 if (dmdFile) {
2001 dmdFileDesc = Some(mozilla::ipc::FILEToFileDescriptor(dmdFile));
2002 fclose(dmdFile);
2005 #endif
2006 return aChild->SendRequestMemoryReport(
2007 aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2010 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration,
2011 bool aSuccess) {
2012 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2013 if (!s) {
2014 return;
2017 MOZ_ASSERT(s->mNumProcessesRunning > 0);
2018 s->mNumProcessesRunning--;
2019 s->mNumProcessesCompleted++;
2020 MEMORY_REPORTING_LOG(
2021 "HandleChildReports (aGen=%u): process %u %s"
2022 " (%u running, %u pending)\n",
2023 aGeneration, s->mNumProcessesCompleted,
2024 aSuccess ? "completed" : "exited during report", s->mNumProcessesRunning,
2025 static_cast<unsigned>(s->mChildrenPending.Length()));
2027 // Start pending children up to the concurrency limit.
2028 while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2029 !s->mChildrenPending.IsEmpty()) {
2030 // Pop last element from s->mChildrenPending
2031 const RefPtr<MemoryReportingProcess> nextChild =
2032 s->mChildrenPending.PopLastElement();
2033 // Start report (if the child is still alive).
2034 if (StartChildReport(nextChild, s)) {
2035 ++s->mNumProcessesRunning;
2036 MEMORY_REPORTING_LOG(
2037 "HandleChildReports (aGen=%u): started child report"
2038 " (%u running, %u pending)\n",
2039 aGeneration, s->mNumProcessesRunning,
2040 static_cast<unsigned>(s->mChildrenPending.Length()));
2044 // If all the child processes (if any) have reported, we can cancel
2045 // the timer (if started) and finish up. Otherwise, just return.
2046 if (s->mNumProcessesRunning == 0) {
2047 MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2048 if (s->mTimer) {
2049 s->mTimer->Cancel();
2051 FinishReporting();
2055 /* static */
2056 void nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData) {
2057 nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2058 PendingProcessesState* s = mgr->mPendingProcessesState;
2060 // Release assert because: if the pointer is null we're about to
2061 // crash regardless of DEBUG, and this way the compiler doesn't
2062 // complain about unused variables.
2063 MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2064 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2065 s->mGeneration, s->mNumProcessesRunning,
2066 static_cast<unsigned>(s->mChildrenPending.Length()));
2068 // We don't bother sending any kind of cancellation message to the child
2069 // processes that haven't reported back.
2070 mgr->FinishReporting();
2073 nsresult nsMemoryReporterManager::FinishReporting() {
2074 // Memory reporting only happens on the main thread.
2075 if (!NS_IsMainThread()) {
2076 MOZ_CRASH();
2079 MOZ_ASSERT(mPendingProcessesState);
2080 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2081 mPendingProcessesState->mGeneration,
2082 mPendingProcessesState->mNumProcessesCompleted);
2084 // Call this before deleting |mPendingProcessesState|. That way, if
2085 // |mFinishReportData| calls GetReports(), it will silently abort, as
2086 // required.
2087 nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2088 mPendingProcessesState->mFinishReportingData);
2090 delete mPendingProcessesState;
2091 mPendingProcessesState = nullptr;
2092 return rv;
2095 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2096 uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2097 uint32_t aConcurrencyLimit, nsIHandleReportCallback* aHandleReport,
2098 nsISupports* aHandleReportData,
2099 nsIFinishReportingCallback* aFinishReporting,
2100 nsISupports* aFinishReportingData, const nsAString& aDMDDumpIdent)
2101 : mGeneration(aGeneration),
2102 mAnonymize(aAnonymize),
2103 mMinimize(aMinimize),
2104 mChildrenPending(),
2105 mNumProcessesRunning(1), // reporting starts with the parent
2106 mNumProcessesCompleted(0),
2107 mConcurrencyLimit(aConcurrencyLimit),
2108 mHandleReport(aHandleReport),
2109 mHandleReportData(aHandleReportData),
2110 mFinishReporting(aFinishReporting),
2111 mFinishReportingData(aFinishReportingData),
2112 mDMDDumpIdent(aDMDDumpIdent) {}
2114 static void CrashIfRefcountIsZero(nsISupports* aObj) {
2115 // This will probably crash if the object's refcount is 0.
2116 uint32_t refcnt = NS_ADDREF(aObj);
2117 if (refcnt <= 1) {
2118 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2120 NS_RELEASE(aObj);
2123 nsresult nsMemoryReporterManager::RegisterReporterHelper(
2124 nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync) {
2125 // This method is thread-safe.
2126 mozilla::MutexAutoLock autoLock(mMutex);
2128 if (mIsRegistrationBlocked && !aForce) {
2129 return NS_ERROR_FAILURE;
2132 if (mStrongReporters->Contains(aReporter) ||
2133 mWeakReporters->Contains(aReporter)) {
2134 return NS_ERROR_FAILURE;
2137 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2138 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2139 // addref'ed and released |aReporter| before finally addref'ing it for
2140 // good, it would free aReporter! The kung fu death grip could itself be
2141 // problematic if PutEntry didn't addref |aReporter| (because then when the
2142 // death grip goes out of scope, we would delete the reporter). In debug
2143 // mode, we check that this doesn't happen.
2145 // If |aStrong| is false, we require that |aReporter| have a non-zero
2146 // refcnt.
2148 if (aStrong) {
2149 nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2150 mStrongReporters->Put(aReporter, aIsAsync);
2151 CrashIfRefcountIsZero(aReporter);
2152 } else {
2153 CrashIfRefcountIsZero(aReporter);
2154 nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2155 if (jsComponent) {
2156 // We cannot allow non-native reporters (WrappedJS), since we'll be
2157 // holding onto a raw pointer, which would point to the wrapper,
2158 // and that wrapper is likely to go away as soon as this register
2159 // call finishes. This would then lead to subsequent crashes in
2160 // CollectReports().
2161 return NS_ERROR_XPC_BAD_CONVERT_JS;
2163 mWeakReporters->Put(aReporter, aIsAsync);
2166 return NS_OK;
2169 NS_IMETHODIMP
2170 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter) {
2171 return RegisterReporterHelper(aReporter, /* force = */ false,
2172 /* strong = */ true,
2173 /* async = */ false);
2176 NS_IMETHODIMP
2177 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2178 nsIMemoryReporter* aReporter) {
2179 return RegisterReporterHelper(aReporter, /* force = */ false,
2180 /* strong = */ true,
2181 /* async = */ true);
2184 NS_IMETHODIMP
2185 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter) {
2186 return RegisterReporterHelper(aReporter, /* force = */ false,
2187 /* strong = */ false,
2188 /* async = */ false);
2191 NS_IMETHODIMP
2192 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2193 nsIMemoryReporter* aReporter) {
2194 return RegisterReporterHelper(aReporter, /* force = */ false,
2195 /* strong = */ false,
2196 /* async = */ true);
2199 NS_IMETHODIMP
2200 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2201 nsIMemoryReporter* aReporter) {
2202 return RegisterReporterHelper(aReporter, /* force = */ true,
2203 /* strong = */ true,
2204 /* async = */ false);
2207 NS_IMETHODIMP
2208 nsMemoryReporterManager::UnregisterStrongReporter(
2209 nsIMemoryReporter* aReporter) {
2210 // This method is thread-safe.
2211 mozilla::MutexAutoLock autoLock(mMutex);
2213 MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2215 if (mStrongReporters->Contains(aReporter)) {
2216 mStrongReporters->Remove(aReporter);
2217 return NS_OK;
2220 // We don't register new reporters when the block is in place, but we do
2221 // unregister existing reporters. This is so we don't keep holding strong
2222 // references that these reporters aren't expecting (which can keep them
2223 // alive longer than intended).
2224 if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2225 mSavedStrongReporters->Remove(aReporter);
2226 return NS_OK;
2229 return NS_ERROR_FAILURE;
2232 NS_IMETHODIMP
2233 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter) {
2234 // This method is thread-safe.
2235 mozilla::MutexAutoLock autoLock(mMutex);
2237 MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2239 if (mWeakReporters->Contains(aReporter)) {
2240 mWeakReporters->Remove(aReporter);
2241 return NS_OK;
2244 // We don't register new reporters when the block is in place, but we do
2245 // unregister existing reporters. This is so we don't keep holding weak
2246 // references that the old reporters aren't expecting (which can end up as
2247 // dangling pointers that lead to use-after-frees).
2248 if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2249 mSavedWeakReporters->Remove(aReporter);
2250 return NS_OK;
2253 return NS_ERROR_FAILURE;
2256 NS_IMETHODIMP
2257 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2258 // This method is thread-safe.
2259 mozilla::MutexAutoLock autoLock(mMutex);
2260 if (mIsRegistrationBlocked) {
2261 return NS_ERROR_FAILURE;
2263 mIsRegistrationBlocked = true;
2265 // Hide the existing reporters, saving them for later restoration.
2266 MOZ_ASSERT(!mSavedStrongReporters);
2267 MOZ_ASSERT(!mSavedWeakReporters);
2268 mSavedStrongReporters = mStrongReporters;
2269 mSavedWeakReporters = mWeakReporters;
2270 mStrongReporters = new StrongReportersTable();
2271 mWeakReporters = new WeakReportersTable();
2273 return NS_OK;
2276 NS_IMETHODIMP
2277 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2278 // This method is thread-safe.
2279 mozilla::MutexAutoLock autoLock(mMutex);
2280 if (!mIsRegistrationBlocked) {
2281 return NS_ERROR_FAILURE;
2284 // Banish the current reporters, and restore the hidden ones.
2285 delete mStrongReporters;
2286 delete mWeakReporters;
2287 mStrongReporters = mSavedStrongReporters;
2288 mWeakReporters = mSavedWeakReporters;
2289 mSavedStrongReporters = nullptr;
2290 mSavedWeakReporters = nullptr;
2292 mIsRegistrationBlocked = false;
2293 return NS_OK;
2296 NS_IMETHODIMP
2297 nsMemoryReporterManager::GetVsize(int64_t* aVsize) {
2298 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2299 return VsizeDistinguishedAmount(aVsize);
2300 #else
2301 *aVsize = 0;
2302 return NS_ERROR_NOT_AVAILABLE;
2303 #endif
2306 NS_IMETHODIMP
2307 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount) {
2308 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2309 return VsizeMaxContiguousDistinguishedAmount(aAmount);
2310 #else
2311 *aAmount = 0;
2312 return NS_ERROR_NOT_AVAILABLE;
2313 #endif
2316 NS_IMETHODIMP
2317 nsMemoryReporterManager::GetResident(int64_t* aAmount) {
2318 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2319 return ResidentDistinguishedAmount(aAmount);
2320 #else
2321 *aAmount = 0;
2322 return NS_ERROR_NOT_AVAILABLE;
2323 #endif
2326 NS_IMETHODIMP
2327 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount) {
2328 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2329 return ResidentFastDistinguishedAmount(aAmount);
2330 #else
2331 *aAmount = 0;
2332 return NS_ERROR_NOT_AVAILABLE;
2333 #endif
2336 /*static*/
2337 int64_t nsMemoryReporterManager::ResidentFast() {
2338 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2339 int64_t amount;
2340 nsresult rv = ResidentFastDistinguishedAmount(&amount);
2341 NS_ENSURE_SUCCESS(rv, 0);
2342 return amount;
2343 #else
2344 return 0;
2345 #endif
2348 NS_IMETHODIMP
2349 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount) {
2350 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2351 return ResidentPeakDistinguishedAmount(aAmount);
2352 #else
2353 *aAmount = 0;
2354 return NS_ERROR_NOT_AVAILABLE;
2355 #endif
2358 /*static*/
2359 int64_t nsMemoryReporterManager::ResidentPeak() {
2360 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2361 int64_t amount = 0;
2362 nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2363 NS_ENSURE_SUCCESS(rv, 0);
2364 return amount;
2365 #else
2366 return 0;
2367 #endif
2370 NS_IMETHODIMP
2371 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount) {
2372 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2373 return ResidentUniqueDistinguishedAmount(aAmount);
2374 #else
2375 *aAmount = 0;
2376 return NS_ERROR_NOT_AVAILABLE;
2377 #endif
2380 typedef
2381 #ifdef XP_WIN
2382 HANDLE
2383 #elif XP_MACOSX
2384 mach_port_t
2385 #elif XP_LINUX
2386 pid_t
2387 #else
2388 int /*dummy type */
2389 #endif
2390 ResidentUniqueArg;
2392 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2394 /*static*/
2395 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess) {
2396 int64_t amount = 0;
2397 nsresult rv = ResidentUniqueDistinguishedAmount(&amount, aProcess);
2398 NS_ENSURE_SUCCESS(rv, 0);
2399 return amount;
2402 #else
2404 /*static*/
2405 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg) {
2406 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2407 int64_t amount = 0;
2408 nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2409 NS_ENSURE_SUCCESS(rv, 0);
2410 return amount;
2411 # else
2412 return 0;
2413 # endif
2416 #endif // XP_{WIN, MACOSX, LINUX, *}
2418 NS_IMETHODIMP
2419 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount) {
2420 #ifdef HAVE_JEMALLOC_STATS
2421 jemalloc_stats_t stats;
2422 jemalloc_stats(&stats);
2423 *aAmount = stats.allocated;
2424 return NS_OK;
2425 #else
2426 *aAmount = 0;
2427 return NS_ERROR_NOT_AVAILABLE;
2428 #endif
2431 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2432 NS_IMETHODIMP
2433 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount) {
2434 #ifdef HAVE_JEMALLOC_STATS
2435 jemalloc_stats_t stats;
2436 jemalloc_stats(&stats);
2437 *aAmount = HeapOverheadFraction(&stats);
2438 return NS_OK;
2439 #else
2440 *aAmount = 0;
2441 return NS_ERROR_NOT_AVAILABLE;
2442 #endif
2445 [[nodiscard]] static nsresult GetInfallibleAmount(InfallibleAmountFn aAmountFn,
2446 int64_t* aAmount) {
2447 if (aAmountFn) {
2448 *aAmount = aAmountFn();
2449 return NS_OK;
2451 *aAmount = 0;
2452 return NS_ERROR_NOT_AVAILABLE;
2455 NS_IMETHODIMP
2456 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount) {
2457 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2460 NS_IMETHODIMP
2461 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount) {
2462 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2465 NS_IMETHODIMP
2466 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount) {
2467 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsSystem,
2468 aAmount);
2471 NS_IMETHODIMP
2472 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount) {
2473 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsUser,
2474 aAmount);
2477 NS_IMETHODIMP
2478 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount) {
2479 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem, aAmount);
2482 NS_IMETHODIMP
2483 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount) {
2484 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser, aAmount);
2487 NS_IMETHODIMP
2488 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount) {
2489 return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2490 aAmount);
2493 NS_IMETHODIMP
2494 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount) {
2495 return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2498 NS_IMETHODIMP
2499 nsMemoryReporterManager::GetLowMemoryEventsVirtual(int64_t* aAmount) {
2500 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsVirtual, aAmount);
2503 NS_IMETHODIMP
2504 nsMemoryReporterManager::GetLowMemoryEventsCommitSpace(int64_t* aAmount) {
2505 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsCommitSpace, aAmount);
2508 NS_IMETHODIMP
2509 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount) {
2510 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2513 NS_IMETHODIMP
2514 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount) {
2515 return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2518 NS_IMETHODIMP
2519 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount) {
2520 #ifdef HAVE_PAGE_FAULT_REPORTERS
2521 return PageFaultsHardDistinguishedAmount(aAmount);
2522 #else
2523 *aAmount = 0;
2524 return NS_ERROR_NOT_AVAILABLE;
2525 #endif
2528 NS_IMETHODIMP
2529 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas) {
2530 void* p = malloc(16);
2531 if (!p) {
2532 return NS_ERROR_OUT_OF_MEMORY;
2534 size_t usable = moz_malloc_usable_size(p);
2535 free(p);
2536 *aHas = !!(usable > 0);
2537 return NS_OK;
2540 NS_IMETHODIMP
2541 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled) {
2542 #ifdef MOZ_DMD
2543 *aIsEnabled = true;
2544 #else
2545 *aIsEnabled = false;
2546 #endif
2547 return NS_OK;
2550 NS_IMETHODIMP
2551 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning) {
2552 #ifdef MOZ_DMD
2553 *aIsRunning = dmd::IsRunning();
2554 #else
2555 *aIsRunning = false;
2556 #endif
2557 return NS_OK;
2560 namespace {
2563 * This runnable lets us implement
2564 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2565 * notification, spin the event loop, and repeat this process a few times.
2567 * When this sequence finishes, we invoke the callback function passed to the
2568 * runnable's constructor.
2570 class MinimizeMemoryUsageRunnable : public Runnable {
2571 public:
2572 explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2573 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2574 mCallback(aCallback),
2575 mRemainingIters(sNumIters) {}
2577 NS_IMETHOD Run() override {
2578 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2579 if (!os) {
2580 return NS_ERROR_FAILURE;
2583 if (mRemainingIters == 0) {
2584 os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2585 u"MinimizeMemoryUsageRunnable");
2586 if (mCallback) {
2587 mCallback->Run();
2589 return NS_OK;
2592 os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2593 mRemainingIters--;
2594 NS_DispatchToMainThread(this);
2596 return NS_OK;
2599 private:
2600 // Send sNumIters heap-minimize notifications, spinning the event
2601 // loop after each notification (see bug 610166 comment 12 for an
2602 // explanation), because one notification doesn't cut it.
2603 static const uint32_t sNumIters = 3;
2605 nsCOMPtr<nsIRunnable> mCallback;
2606 uint32_t mRemainingIters;
2609 } // namespace
2611 NS_IMETHODIMP
2612 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback) {
2613 RefPtr<MinimizeMemoryUsageRunnable> runnable =
2614 new MinimizeMemoryUsageRunnable(aCallback);
2616 return NS_DispatchToMainThread(runnable);
2619 NS_IMETHODIMP
2620 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2621 int64_t* aJSObjectsSize,
2622 int64_t* aJSStringsSize,
2623 int64_t* aJSOtherSize, int64_t* aDomSize,
2624 int64_t* aStyleSize, int64_t* aOtherSize,
2625 int64_t* aTotalSize, double* aJSMilliseconds,
2626 double* aNonJSMilliseconds) {
2627 nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2628 auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2629 if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2630 return NS_ERROR_FAILURE;
2633 TimeStamp t1 = TimeStamp::Now();
2635 // Measure JS memory consumption (and possibly some non-JS consumption, via
2636 // |jsPrivateSize|).
2637 size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2638 nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(), &jsObjectsSize,
2639 &jsStringsSize, &jsPrivateSize, &jsOtherSize);
2640 if (NS_WARN_IF(NS_FAILED(rv))) {
2641 return rv;
2644 TimeStamp t2 = TimeStamp::Now();
2646 // Measure non-JS memory consumption.
2647 size_t domSize, styleSize, otherSize;
2648 rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2649 if (NS_WARN_IF(NS_FAILED(rv))) {
2650 return rv;
2653 TimeStamp t3 = TimeStamp::Now();
2655 *aTotalSize = 0;
2656 #define DO(aN, n) \
2658 *aN = (n); \
2659 *aTotalSize += (n); \
2661 DO(aJSObjectsSize, jsObjectsSize);
2662 DO(aJSStringsSize, jsStringsSize);
2663 DO(aJSOtherSize, jsOtherSize);
2664 DO(aDomSize, jsPrivateSize + domSize);
2665 DO(aStyleSize, styleSize);
2666 DO(aOtherSize, otherSize);
2667 #undef DO
2669 *aJSMilliseconds = (t2 - t1).ToMilliseconds();
2670 *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2672 return NS_OK;
2675 namespace mozilla {
2677 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2678 RefPtr<nsMemoryReporterManager> mgr = \
2679 nsMemoryReporterManager::GetOrCreate(); \
2680 if (!mgr) { \
2681 return NS_ERROR_FAILURE; \
2684 nsresult RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2685 // Hold a strong reference to the argument to make sure it gets released if
2686 // we return early below.
2687 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2688 GET_MEMORY_REPORTER_MANAGER(mgr)
2689 return mgr->RegisterStrongReporter(reporter);
2692 nsresult RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2693 // Hold a strong reference to the argument to make sure it gets released if
2694 // we return early below.
2695 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2696 GET_MEMORY_REPORTER_MANAGER(mgr)
2697 return mgr->RegisterStrongAsyncReporter(reporter);
2700 nsresult RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2701 GET_MEMORY_REPORTER_MANAGER(mgr)
2702 return mgr->RegisterWeakReporter(aReporter);
2705 nsresult RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2706 GET_MEMORY_REPORTER_MANAGER(mgr)
2707 return mgr->RegisterWeakAsyncReporter(aReporter);
2710 nsresult UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2711 GET_MEMORY_REPORTER_MANAGER(mgr)
2712 return mgr->UnregisterStrongReporter(aReporter);
2715 nsresult UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2716 GET_MEMORY_REPORTER_MANAGER(mgr)
2717 return mgr->UnregisterWeakReporter(aReporter);
2720 // Macro for generating functions that register distinguished amount functions
2721 // with the memory reporter manager.
2722 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2723 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2724 GET_MEMORY_REPORTER_MANAGER(mgr) \
2725 mgr->mAmountFns.m##name = aAmountFn; \
2726 return NS_OK; \
2729 // Macro for generating functions that unregister distinguished amount
2730 // functions with the memory reporter manager.
2731 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2732 nsresult Unregister##name##DistinguishedAmount() { \
2733 GET_MEMORY_REPORTER_MANAGER(mgr) \
2734 mgr->mAmountFns.m##name = nullptr; \
2735 return NS_OK; \
2738 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2739 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2740 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible,
2741 JSMainRuntimeCompartmentsSystem)
2742 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeCompartmentsUser)
2743 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2744 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2746 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2747 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2749 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2750 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2752 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsVirtual)
2753 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsCommitSpace)
2754 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2756 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2758 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2759 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2761 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2762 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2763 GET_MEMORY_REPORTER_MANAGER(mgr) \
2764 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2765 return NS_OK; \
2768 DEFINE_REGISTER_SIZE_OF_TAB(JS);
2769 DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2771 #undef DEFINE_REGISTER_SIZE_OF_TAB
2773 #undef GET_MEMORY_REPORTER_MANAGER
2775 } // namespace mozilla