Bug 1858509 add thread-safety annotations around MediaSourceDemuxer::mMonitor r=alwu
[gecko.git] / memory / build / zone.c
blob7311ccf27bc47d8a0b89b68e564e50551d3883de
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozmemory_wrap.h"
9 #include <stdlib.h>
10 #include <mach/mach_types.h>
11 #include "mozilla/Assertions.h"
13 // Malloc implementation functions are MOZ_MEMORY_API, and jemalloc
14 // specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
16 #define MALLOC_DECL(name, return_type, ...) \
17 MOZ_MEMORY_API return_type name##_impl(__VA_ARGS__);
18 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
19 #include "malloc_decls.h"
21 #define MALLOC_DECL(name, return_type, ...) \
22 MOZ_JEMALLOC_API return_type name##_impl(__VA_ARGS__);
23 #define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
24 #include "malloc_decls.h"
26 // Definitions of the following structs in malloc/malloc.h might be too old
27 // for the built binary to run on newer versions of OSX. So use the newest
28 // possible version of those structs.
30 typedef struct _malloc_zone_t {
31 void* reserved1;
32 void* reserved2;
33 size_t (*size)(struct _malloc_zone_t*, const void*);
34 void* (*malloc)(struct _malloc_zone_t*, size_t);
35 void* (*calloc)(struct _malloc_zone_t*, size_t, size_t);
36 void* (*valloc)(struct _malloc_zone_t*, size_t);
37 void (*free)(struct _malloc_zone_t*, void*);
38 void* (*realloc)(struct _malloc_zone_t*, void*, size_t);
39 void (*destroy)(struct _malloc_zone_t*);
40 const char* zone_name;
41 unsigned (*batch_malloc)(struct _malloc_zone_t*, size_t, void**, unsigned);
42 void (*batch_free)(struct _malloc_zone_t*, void**, unsigned);
43 struct malloc_introspection_t* introspect;
44 unsigned version;
45 void* (*memalign)(struct _malloc_zone_t*, size_t, size_t);
46 void (*free_definite_size)(struct _malloc_zone_t*, void*, size_t);
47 size_t (*pressure_relief)(struct _malloc_zone_t*, size_t);
48 } malloc_zone_t;
50 typedef struct {
51 vm_address_t address;
52 vm_size_t size;
53 } vm_range_t;
55 typedef struct malloc_statistics_t {
56 unsigned blocks_in_use;
57 size_t size_in_use;
58 size_t max_size_in_use;
59 size_t size_allocated;
60 } malloc_statistics_t;
62 typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void**);
64 typedef void vm_range_recorder_t(task_t, void*, unsigned type, vm_range_t*,
65 unsigned);
67 typedef struct malloc_introspection_t {
68 kern_return_t (*enumerator)(task_t, void*, unsigned, vm_address_t,
69 memory_reader_t, vm_range_recorder_t);
70 size_t (*good_size)(malloc_zone_t*, size_t);
71 boolean_t (*check)(malloc_zone_t*);
72 void (*print)(malloc_zone_t*, boolean_t);
73 void (*log)(malloc_zone_t*, void*);
74 void (*force_lock)(malloc_zone_t*);
75 void (*force_unlock)(malloc_zone_t*);
76 void (*statistics)(malloc_zone_t*, malloc_statistics_t*);
77 boolean_t (*zone_locked)(malloc_zone_t*);
78 boolean_t (*enable_discharge_checking)(malloc_zone_t*);
79 boolean_t (*disable_discharge_checking)(malloc_zone_t*);
80 void (*discharge)(malloc_zone_t*, void*);
81 #ifdef __BLOCKS__
82 void (*enumerate_discharged_pointers)(malloc_zone_t*, void (^)(void*, void*));
83 #else
84 void* enumerate_unavailable_without_blocks;
85 #endif
86 void (*reinit_lock)(malloc_zone_t*);
87 } malloc_introspection_t;
89 extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t,
90 vm_address_t**, unsigned*);
92 extern malloc_zone_t* malloc_default_zone(void);
94 extern void malloc_zone_register(malloc_zone_t* zone);
96 extern void malloc_zone_unregister(malloc_zone_t* zone);
98 extern malloc_zone_t* malloc_default_purgeable_zone(void);
100 extern malloc_zone_t* malloc_zone_from_ptr(const void* ptr);
102 extern void malloc_zone_free(malloc_zone_t* zone, void* ptr);
104 extern void* malloc_zone_realloc(malloc_zone_t* zone, void* ptr, size_t size);
106 // The following is a OSX zone allocator implementation.
107 // /!\ WARNING. It assumes the underlying malloc implementation's
108 // malloc_usable_size returns 0 when the given pointer is not owned by
109 // the allocator. Sadly, OSX does call zone_size with pointers not
110 // owned by the allocator.
112 static size_t zone_size(malloc_zone_t* zone, const void* ptr) {
113 return malloc_usable_size_impl(ptr);
116 static void* zone_malloc(malloc_zone_t* zone, size_t size) {
117 return malloc_impl(size);
120 static void* zone_calloc(malloc_zone_t* zone, size_t num, size_t size) {
121 return calloc_impl(num, size);
124 static void* zone_realloc(malloc_zone_t* zone, void* ptr, size_t size) {
125 if (malloc_usable_size_impl(ptr)) return realloc_impl(ptr, size);
127 // Sometimes, system libraries call malloc_zone_* functions with the wrong
128 // zone (e.g. CoreFoundation does). In that case, we need to find the real
129 // one. We can't call libSystem's realloc directly because we're exporting
130 // realloc from libmozglue and we'd pick that one, so we manually find the
131 // right zone and realloc with it.
132 malloc_zone_t* real_zone = malloc_zone_from_ptr(ptr);
133 // The system allocator crashes voluntarily by default when a pointer can't
134 // be traced back to a zone. Do the same.
135 MOZ_RELEASE_ASSERT(real_zone);
136 MOZ_RELEASE_ASSERT(real_zone != zone);
137 return malloc_zone_realloc(real_zone, ptr, size);
140 static void other_zone_free(malloc_zone_t* original_zone, void* ptr) {
141 // Sometimes, system libraries call malloc_zone_* functions with the wrong
142 // zone (e.g. CoreFoundation does). In that case, we need to find the real
143 // one. We can't call libSystem's free directly because we're exporting
144 // free from libmozglue and we'd pick that one, so we manually find the
145 // right zone and free with it.
146 if (!ptr) {
147 return;
149 malloc_zone_t* zone = malloc_zone_from_ptr(ptr);
150 // The system allocator crashes voluntarily by default when a pointer can't
151 // be traced back to a zone. Do the same.
152 MOZ_RELEASE_ASSERT(zone);
153 MOZ_RELEASE_ASSERT(zone != original_zone);
154 return malloc_zone_free(zone, ptr);
157 static void zone_free(malloc_zone_t* zone, void* ptr) {
158 if (malloc_usable_size_impl(ptr)) {
159 free_impl(ptr);
160 return;
162 other_zone_free(zone, ptr);
165 static void zone_free_definite_size(malloc_zone_t* zone, void* ptr,
166 size_t size) {
167 size_t current_size = malloc_usable_size_impl(ptr);
168 if (current_size) {
169 MOZ_ASSERT(current_size == size);
170 free_impl(ptr);
171 return;
173 other_zone_free(zone, ptr);
176 static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) {
177 void* ptr;
178 if (posix_memalign_impl(&ptr, alignment, size) == 0) return ptr;
179 return NULL;
182 static void* zone_valloc(malloc_zone_t* zone, size_t size) {
183 return valloc_impl(size);
186 static void zone_destroy(malloc_zone_t* zone) {
187 // This function should never be called.
188 MOZ_CRASH();
191 static unsigned zone_batch_malloc(malloc_zone_t* zone, size_t size,
192 void** results, unsigned num_requested) {
193 unsigned i;
195 for (i = 0; i < num_requested; i++) {
196 results[i] = malloc_impl(size);
197 if (!results[i]) break;
200 return i;
203 static void zone_batch_free(malloc_zone_t* zone, void** to_be_freed,
204 unsigned num_to_be_freed) {
205 unsigned i;
207 for (i = 0; i < num_to_be_freed; i++) {
208 zone_free(zone, to_be_freed[i]);
209 to_be_freed[i] = NULL;
213 static size_t zone_pressure_relief(malloc_zone_t* zone, size_t goal) {
214 return 0;
217 static size_t zone_good_size(malloc_zone_t* zone, size_t size) {
218 return malloc_good_size_impl(size);
221 static kern_return_t zone_enumerator(task_t task, void* data,
222 unsigned type_mask,
223 vm_address_t zone_address,
224 memory_reader_t reader,
225 vm_range_recorder_t recorder) {
226 return KERN_SUCCESS;
229 static boolean_t zone_check(malloc_zone_t* zone) { return true; }
231 static void zone_print(malloc_zone_t* zone, boolean_t verbose) {}
233 static void zone_log(malloc_zone_t* zone, void* address) {}
235 extern void _malloc_prefork(void);
236 extern void _malloc_postfork_child(void);
238 static void zone_force_lock(malloc_zone_t* zone) {
239 // /!\ This calls into mozjemalloc. It works because we're linked in the
240 // same library.
241 _malloc_prefork();
244 static void zone_force_unlock(malloc_zone_t* zone) {
245 // /!\ This calls into mozjemalloc. It works because we're linked in the
246 // same library.
247 _malloc_postfork_child();
250 static void zone_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
251 // We make no effort to actually fill the values
252 stats->blocks_in_use = 0;
253 stats->size_in_use = 0;
254 stats->max_size_in_use = 0;
255 stats->size_allocated = 0;
258 static boolean_t zone_locked(malloc_zone_t* zone) {
259 // Pretend no lock is being held
260 return false;
263 static void zone_reinit_lock(malloc_zone_t* zone) {
264 // As of OSX 10.12, this function is only used when force_unlock would
265 // be used if the zone version were < 9. So just use force_unlock.
266 zone_force_unlock(zone);
269 static malloc_zone_t zone;
270 static struct malloc_introspection_t zone_introspect;
272 static malloc_zone_t* get_default_zone() {
273 malloc_zone_t** zones = NULL;
274 unsigned int num_zones = 0;
276 // On OSX 10.12, malloc_default_zone returns a special zone that is not
277 // present in the list of registered zones. That zone uses a "lite zone"
278 // if one is present (apparently enabled when malloc stack logging is
279 // enabled), or the first registered zone otherwise. In practice this
280 // means unless malloc stack logging is enabled, the first registered
281 // zone is the default.
282 // So get the list of zones to get the first one, instead of relying on
283 // malloc_default_zone.
284 if (KERN_SUCCESS !=
285 malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) {
286 // Reset the value in case the failure happened after it was set.
287 num_zones = 0;
289 if (num_zones) {
290 return zones[0];
292 return malloc_default_zone();
295 __attribute__((constructor)) static void register_zone(void) {
296 malloc_zone_t* default_zone = get_default_zone();
298 zone.size = zone_size;
299 zone.malloc = zone_malloc;
300 zone.calloc = zone_calloc;
301 zone.valloc = zone_valloc;
302 zone.free = zone_free;
303 zone.realloc = zone_realloc;
304 zone.destroy = zone_destroy;
305 #ifdef MOZ_REPLACE_MALLOC
306 zone.zone_name = "replace_malloc_zone";
307 #else
308 zone.zone_name = "jemalloc_zone";
309 #endif
310 zone.batch_malloc = zone_batch_malloc;
311 zone.batch_free = zone_batch_free;
312 zone.introspect = &zone_introspect;
313 zone.version = 9;
314 zone.memalign = zone_memalign;
315 zone.free_definite_size = zone_free_definite_size;
316 zone.pressure_relief = zone_pressure_relief;
317 zone_introspect.enumerator = zone_enumerator;
318 zone_introspect.good_size = zone_good_size;
319 zone_introspect.check = zone_check;
320 zone_introspect.print = zone_print;
321 zone_introspect.log = zone_log;
322 zone_introspect.force_lock = zone_force_lock;
323 zone_introspect.force_unlock = zone_force_unlock;
324 zone_introspect.statistics = zone_statistics;
325 zone_introspect.zone_locked = zone_locked;
326 zone_introspect.enable_discharge_checking = NULL;
327 zone_introspect.disable_discharge_checking = NULL;
328 zone_introspect.discharge = NULL;
329 #ifdef __BLOCKS__
330 zone_introspect.enumerate_discharged_pointers = NULL;
331 #else
332 zone_introspect.enumerate_unavailable_without_blocks = NULL;
333 #endif
334 zone_introspect.reinit_lock = zone_reinit_lock;
336 // The default purgeable zone is created lazily by OSX's libc. It uses
337 // the default zone when it is created for "small" allocations
338 // (< 15 KiB), but assumes the default zone is a scalable_zone. This
339 // obviously fails when the default zone is the jemalloc zone, so
340 // malloc_default_purgeable_zone is called beforehand so that the
341 // default purgeable zone is created when the default zone is still
342 // a scalable_zone.
343 malloc_zone_t* purgeable_zone = malloc_default_purgeable_zone();
345 // There is a problem related to the above with the system nano zone, which
346 // is hard to work around from here, and that is instead worked around by
347 // disabling the nano zone through an environment variable
348 // (MallocNanoZone=0). In Firefox, we do that through
349 // browser/app/macbuild/Contents/Info.plist.in.
351 // Register the custom zone. At this point it won't be the default.
352 malloc_zone_register(&zone);
354 do {
355 // Unregister and reregister the default zone. On OSX >= 10.6,
356 // unregistering takes the last registered zone and places it at the
357 // location of the specified zone. Unregistering the default zone thus
358 // makes the last registered one the default. On OSX < 10.6,
359 // unregistering shifts all registered zones. The first registered zone
360 // then becomes the default.
361 malloc_zone_unregister(default_zone);
362 malloc_zone_register(default_zone);
364 // On OSX 10.6, having the default purgeable zone appear before the default
365 // zone makes some things crash because it thinks it owns the default
366 // zone allocated pointers. We thus unregister/re-register it in order to
367 // ensure it's always after the default zone. On OSX < 10.6, as
368 // unregistering shifts registered zones, this simply removes the purgeable
369 // zone from the list and adds it back at the end, after the default zone.
370 // On OSX >= 10.6, unregistering replaces the purgeable zone with the last
371 // registered zone above, i.e the default zone. Registering it again then
372 // puts it at the end, obviously after the default zone.
373 malloc_zone_unregister(purgeable_zone);
374 malloc_zone_register(purgeable_zone);
375 default_zone = get_default_zone();
376 } while (default_zone != &zone);