2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * Note that this defines a large number of tuning hooks, which can
19 * safely be ignored in nearly all cases. For normal use it suffices
20 * to call only GC_MALLOC and perhaps GC_REALLOC.
21 * For better performance, also look at GC_MALLOC_ATOMIC, and
22 * GC_enable_incremental. If you need an action to be performed
23 * immediately before an object is collected, look at GC_register_finalizer.
24 * If you are using Solaris threads, look at the end of this file.
25 * Everything else is best ignored unless you encounter performance
33 # include "gc_config_macros.h"
35 # if defined(__STDC__) || defined(__cplusplus)
36 # define GC_PROTO(args) args
37 typedef void * GC_PTR
;
38 # define GC_CONST const
40 # define GC_PROTO(args) ()
41 typedef char * GC_PTR
;
50 /* Define word and signed_word to be unsigned and signed types of the */
51 /* size as char * or void *. There seems to be no way to do this */
52 /* even semi-portably. The following is probably no better/worse */
53 /* than almost anything else. */
54 /* The ANSI standard suggests that size_t and ptr_diff_t might be */
55 /* better choices. But those appear to have incorrect definitions */
56 /* on may systems. Notably "typedef int size_t" seems to be both */
57 /* frequent and WRONG. */
58 typedef unsigned long GC_word
;
59 typedef long GC_signed_word
;
61 /* Public read-only variables */
63 GC_API GC_word GC_gc_no
;/* Counter incremented per collection. */
64 /* Includes empty GCs at startup. */
66 GC_API
int GC_parallel
; /* GC is parallelized for performance on */
67 /* multiprocessors. Currently set only */
68 /* implicitly if collector is built with */
69 /* -DPARALLEL_MARK and if either: */
70 /* Env variable GC_NPROC is set to > 1, or */
71 /* GC_NPROC is not set and this is an MP. */
72 /* If GC_parallel is set, incremental */
73 /* collection is only partially functional, */
74 /* and may not be desirable. */
77 /* Public R/W variables */
79 GC_API
GC_PTR (*GC_oom_fn
) GC_PROTO((size_t bytes_requested
));
80 /* When there is insufficient memory to satisfy */
81 /* an allocation request, we return */
82 /* (*GC_oom_fn)(). By default this just */
84 /* If it returns, it must return 0 or a valid */
85 /* pointer to a previously allocated heap */
88 GC_API
int GC_find_leak
;
89 /* Do not actually garbage collect, but simply */
90 /* report inaccessible memory that was not */
91 /* deallocated with GC_free. Initial value */
92 /* is determined by FIND_LEAK macro. */
94 GC_API
int GC_all_interior_pointers
;
95 /* Arrange for pointers to object interiors to */
96 /* be recognized as valid. May not be changed */
97 /* after GC initialization. */
98 /* Initial value is determined by */
99 /* -DALL_INTERIOR_POINTERS. */
100 /* Unless DONT_ADD_BYTE_AT_END is defined, this */
101 /* also affects whether sizes are increased by */
102 /* at least a byte to allow "off the end" */
103 /* pointer recognition. */
104 /* MUST BE 0 or 1. */
106 GC_API
int GC_quiet
; /* Disable statistics output. Only matters if */
107 /* collector has been compiled with statistics */
108 /* enabled. This involves a performance cost, */
109 /* and is thus not the default. */
111 GC_API
int GC_finalize_on_demand
;
112 /* If nonzero, finalizers will only be run in */
113 /* response to an explicit GC_invoke_finalizers */
114 /* call. The default is determined by whether */
115 /* the FINALIZE_ON_DEMAND macro is defined */
116 /* when the collector is built. */
118 GC_API
int GC_java_finalization
;
119 /* Mark objects reachable from finalizable */
120 /* objects in a separate postpass. This makes */
121 /* it a bit safer to use non-topologically- */
122 /* ordered finalization. Default value is */
123 /* determined by JAVA_FINALIZATION macro. */
125 GC_API
void (* GC_finalizer_notifier
)();
126 /* Invoked by the collector when there are */
127 /* objects to be finalized. Invoked at most */
128 /* once per GC cycle. Never invoked unless */
129 /* GC_finalize_on_demand is set. */
130 /* Typically this will notify a finalization */
131 /* thread, which will call GC_invoke_finalizers */
134 GC_API
int GC_dont_gc
; /* != 0 ==> Dont collect. In versions 6.2a1+, */
135 /* this overrides explicit GC_gcollect() calls. */
136 /* Used as a counter, so that nested enabling */
137 /* and disabling work correctly. Should */
138 /* normally be updated with GC_enable() and */
139 /* GC_disable() calls. */
140 /* Direct assignment to GC_dont_gc is */
143 GC_API
int GC_dont_expand
;
144 /* Dont expand heap unless explicitly requested */
147 GC_API
int GC_use_entire_heap
;
148 /* Causes the nonincremental collector to use the */
149 /* entire heap before collecting. This was the only */
150 /* option for GC versions < 5.0. This sometimes */
151 /* results in more large block fragmentation, since */
152 /* very larg blocks will tend to get broken up */
153 /* during each GC cycle. It is likely to result in a */
154 /* larger working set, but lower collection */
155 /* frequencies, and hence fewer instructions executed */
156 /* in the collector. */
158 GC_API
int GC_full_freq
; /* Number of partial collections between */
159 /* full collections. Matters only if */
160 /* GC_incremental is set. */
161 /* Full collections are also triggered if */
162 /* the collector detects a substantial */
163 /* increase in the number of in-use heap */
164 /* blocks. Values in the tens are now */
165 /* perfectly reasonable, unlike for */
166 /* earlier GC versions. */
168 GC_API GC_word GC_non_gc_bytes
;
169 /* Bytes not considered candidates for collection. */
170 /* Used only to control scheduling of collections. */
171 /* Updated by GC_malloc_uncollectable and GC_free. */
174 GC_API
int GC_no_dls
;
175 /* Don't register dynamic library data segments. */
176 /* Wizards only. Should be used only if the */
177 /* application explicitly registers all roots. */
178 /* In Microsoft Windows environments, this will */
179 /* usually also prevent registration of the */
180 /* main data segment as part of the root set. */
182 GC_API GC_word GC_free_space_divisor
;
183 /* We try to make sure that we allocate at */
184 /* least N/GC_free_space_divisor bytes between */
185 /* collections, where N is the heap size plus */
186 /* a rough estimate of the root set size. */
187 /* Initially, GC_free_space_divisor = 4. */
188 /* Increasing its value will use less space */
189 /* but more collection time. Decreasing it */
190 /* will appreciably decrease collection time */
191 /* at the expense of space. */
192 /* GC_free_space_divisor = 1 will effectively */
193 /* disable collections. */
195 GC_API GC_word GC_max_retries
;
196 /* The maximum number of GCs attempted before */
197 /* reporting out of memory after heap */
198 /* expansion fails. Initially 0. */
201 GC_API
char *GC_stackbottom
; /* Cool end of user stack. */
202 /* May be set in the client prior to */
203 /* calling any GC_ routines. This */
204 /* avoids some overhead, and */
205 /* potentially some signals that can */
206 /* confuse debuggers. Otherwise the */
207 /* collector attempts to set it */
209 /* For multithreaded code, this is the */
210 /* cold end of the stack for the */
211 /* primordial thread. */
213 GC_API
int GC_dont_precollect
; /* Don't collect as part of */
214 /* initialization. Should be set only */
215 /* if the client wants a chance to */
216 /* manually initialize the root set */
217 /* before the first collection. */
218 /* Interferes with blacklisting. */
221 /* Public procedures */
223 /* Initialize the collector. This is only required when using thread-local
224 * allocation, since unlike the regular allocation routines, GC_local_malloc
225 * is not self-initializing. If you use GC_local_malloc you should arrange
226 * to call this somehow (e.g. from a constructor) before doing any allocation.
228 GC_API
void GC_init
GC_PROTO((void));
230 GC_API
unsigned long GC_time_limit
;
231 /* If incremental collection is enabled, */
232 /* We try to terminate collections */
233 /* after this many milliseconds. Not a */
234 /* hard time bound. Setting this to */
235 /* GC_TIME_UNLIMITED will essentially */
236 /* disable incremental collection while */
237 /* leaving generational collection */
239 # define GC_TIME_UNLIMITED 999999
240 /* Setting GC_time_limit to this value */
241 /* will disable the "pause time exceeded"*/
244 /* Public procedures */
246 /* Initialize the collector. This is only required when using thread-local
247 * allocation, since unlike the regular allocation routines, GC_local_malloc
248 * is not self-initializing. If you use GC_local_malloc you should arrange
249 * to call this somehow (e.g. from a constructor) before doing any allocation.
251 GC_API
void GC_init
GC_PROTO((void));
254 * general purpose allocation routines, with roughly malloc calling conv.
255 * The atomic versions promise that no relevant pointers are contained
256 * in the object. The nonatomic versions guarantee that the new object
257 * is cleared. GC_malloc_stubborn promises that no changes to the object
258 * will occur after GC_end_stubborn_change has been called on the
259 * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
260 * that is scanned for pointers to collectable objects, but is not itself
261 * collectable. The object is scanned even if it does not appear to
262 * be reachable. GC_malloc_uncollectable and GC_free called on the resulting
263 * object implicitly update GC_non_gc_bytes appropriately.
265 * Note that the GC_malloc_stubborn support is stubbed out by default
266 * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
267 * the collector is built with STUBBORN_ALLOC defined.
269 GC_API GC_PTR GC_malloc
GC_PROTO((size_t size_in_bytes
));
270 GC_API GC_PTR GC_malloc_atomic
GC_PROTO((size_t size_in_bytes
));
271 GC_API GC_PTR GC_malloc_uncollectable
GC_PROTO((size_t size_in_bytes
));
272 GC_API GC_PTR GC_malloc_stubborn
GC_PROTO((size_t size_in_bytes
));
274 /* The following is only defined if the library has been suitably */
276 GC_API GC_PTR GC_malloc_atomic_uncollectable
GC_PROTO((size_t size_in_bytes
));
278 /* Explicitly deallocate an object. Dangerous if used incorrectly. */
279 /* Requires a pointer to the base of an object. */
280 /* If the argument is stubborn, it should not be changeable when freed. */
281 /* An object should not be enable for finalization when it is */
282 /* explicitly deallocated. */
283 /* GC_free(0) is a no-op, as required by ANSI C for free. */
284 GC_API
void GC_free
GC_PROTO((GC_PTR object_addr
));
287 * Stubborn objects may be changed only if the collector is explicitly informed.
288 * The collector is implicitly informed of coming change when such
289 * an object is first allocated. The following routines inform the
290 * collector that an object will no longer be changed, or that it will
291 * once again be changed. Only nonNIL pointer stores into the object
292 * are considered to be changes. The argument to GC_end_stubborn_change
293 * must be exacly the value returned by GC_malloc_stubborn or passed to
294 * GC_change_stubborn. (In the second case it may be an interior pointer
295 * within 512 bytes of the beginning of the objects.)
296 * There is a performance penalty for allowing more than
297 * one stubborn object to be changed at once, but it is acceptable to
298 * do so. The same applies to dropping stubborn objects that are still
301 GC_API
void GC_change_stubborn
GC_PROTO((GC_PTR
));
302 GC_API
void GC_end_stubborn_change
GC_PROTO((GC_PTR
));
304 /* Return a pointer to the base (lowest address) of an object given */
305 /* a pointer to a location within the object. */
306 /* I.e. map an interior pointer to the corresponding bas pointer. */
307 /* Note that with debugging allocation, this returns a pointer to the */
308 /* actual base of the object, i.e. the debug information, not to */
309 /* the base of the user object. */
310 /* Return 0 if displaced_pointer doesn't point to within a valid */
312 GC_API GC_PTR GC_base
GC_PROTO((GC_PTR displaced_pointer
));
314 /* Given a pointer to the base of an object, return its size in bytes. */
315 /* The returned size may be slightly larger than what was originally */
317 GC_API
size_t GC_size
GC_PROTO((GC_PTR object_addr
));
319 /* For compatibility with C library. This is occasionally faster than */
320 /* a malloc followed by a bcopy. But if you rely on that, either here */
321 /* or with the standard C library, your code is broken. In my */
322 /* opinion, it shouldn't have been invented, but now we're stuck. -HB */
323 /* The resulting object has the same kind as the original. */
324 /* If the argument is stubborn, the result will have changes enabled. */
325 /* It is an error to have changes enabled for the original object. */
326 /* Follows ANSI comventions for NULL old_object. */
327 GC_API GC_PTR GC_realloc
328 GC_PROTO((GC_PTR old_object
, size_t new_size_in_bytes
));
330 /* Explicitly increase the heap size. */
331 /* Returns 0 on failure, 1 on success. */
332 GC_API
int GC_expand_hp
GC_PROTO((size_t number_of_bytes
));
334 /* Limit the heap size to n bytes. Useful when you're debugging, */
335 /* especially on systems that don't handle running out of memory well. */
336 /* n == 0 ==> unbounded. This is the default. */
337 GC_API
void GC_set_max_heap_size
GC_PROTO((GC_word n
));
339 /* Inform the collector that a certain section of statically allocated */
340 /* memory contains no pointers to garbage collected memory. Thus it */
341 /* need not be scanned. This is sometimes important if the application */
342 /* maps large read/write files into the address space, which could be */
343 /* mistaken for dynamic library data segments on some systems. */
344 GC_API
void GC_exclude_static_roots
GC_PROTO((GC_PTR start
, GC_PTR finish
));
346 /* Clear the set of root segments. Wizards only. */
347 GC_API
void GC_clear_roots
GC_PROTO((void));
349 /* Add a root segment. Wizards only. */
350 GC_API
void GC_add_roots
GC_PROTO((char * low_address
,
351 char * high_address_plus_1
));
353 /* Remove a root segment. Wizards only. */
354 GC_API
void GC_remove_roots
GC_PROTO((char * low_address
,
355 char * high_address_plus_1
));
357 /* Add a displacement to the set of those considered valid by the */
358 /* collector. GC_register_displacement(n) means that if p was returned */
359 /* by GC_malloc, then (char *)p + n will be considered to be a valid */
360 /* pointer to p. N must be small and less than the size of p. */
361 /* (All pointers to the interior of objects from the stack are */
362 /* considered valid in any case. This applies to heap objects and */
364 /* Preferably, this should be called before any other GC procedures. */
365 /* Calling it later adds to the probability of excess memory */
367 /* This is a no-op if the collector has recognition of */
368 /* arbitrary interior pointers enabled, which is now the default. */
369 GC_API
void GC_register_displacement
GC_PROTO((GC_word n
));
371 /* The following version should be used if any debugging allocation is */
373 GC_API
void GC_debug_register_displacement
GC_PROTO((GC_word n
));
375 /* Explicitly trigger a full, world-stop collection. */
376 GC_API
void GC_gcollect
GC_PROTO((void));
378 /* Trigger a full world-stopped collection. Abort the collection if */
379 /* and when stop_func returns a nonzero value. Stop_func will be */
380 /* called frequently, and should be reasonably fast. This works even */
381 /* if virtual dirty bits, and hence incremental collection is not */
382 /* available for this architecture. Collections can be aborted faster */
383 /* than normal pause times for incremental collection. However, */
384 /* aborted collections do no useful work; the next collection needs */
385 /* to start from the beginning. */
386 /* Return 0 if the collection was aborted, 1 if it succeeded. */
387 typedef int (* GC_stop_func
) GC_PROTO((void));
388 GC_API
int GC_try_to_collect
GC_PROTO((GC_stop_func stop_func
));
390 /* Return the number of bytes in the heap. Excludes collector private */
391 /* data structures. Includes empty blocks and fragmentation loss. */
392 /* Includes some pages that were allocated but never written. */
393 GC_API
size_t GC_get_heap_size
GC_PROTO((void));
395 /* Return a lower bound on the number of free bytes in the heap. */
396 GC_API
size_t GC_get_free_bytes
GC_PROTO((void));
398 /* Return the number of bytes allocated since the last collection. */
399 GC_API
size_t GC_get_bytes_since_gc
GC_PROTO((void));
401 /* Return the total number of bytes allocated in this process. */
402 /* Never decreases, except due to wrapping. */
403 GC_API
size_t GC_get_total_bytes
GC_PROTO((void));
405 /* Disable garbage collection. Even GC_gcollect calls will be */
407 GC_API
void GC_disable
GC_PROTO((void));
409 /* Reenable garbage collection. GC_disable() and GC_enable() calls */
410 /* nest. Garbage collection is enabled if the number of calls to both */
411 /* both functions is equal. */
412 GC_API
void GC_enable
GC_PROTO((void));
414 /* Enable incremental/generational collection. */
415 /* Not advisable unless dirty bits are */
416 /* available or most heap objects are */
417 /* pointerfree(atomic) or immutable. */
418 /* Don't use in leak finding mode. */
419 /* Ignored if GC_dont_gc is true. */
420 /* Only the generational piece of this is */
421 /* functional if GC_parallel is TRUE */
422 /* or if GC_time_limit is GC_TIME_UNLIMITED. */
423 /* Causes GC_local_gcj_malloc() to revert to */
424 /* locked allocation. Must be called */
425 /* before any GC_local_gcj_malloc() calls. */
426 GC_API
void GC_enable_incremental
GC_PROTO((void));
428 /* Does incremental mode write-protect pages? Returns zero or */
429 /* more of the following, or'ed together: */
430 #define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */
431 #define GC_PROTECTS_PTRFREE_HEAP 2
432 #define GC_PROTECTS_STATIC_DATA 4 /* Curently never. */
433 #define GC_PROTECTS_STACK 8 /* Probably impractical. */
435 #define GC_PROTECTS_NONE 0
436 GC_API
int GC_incremental_protection_needs
GC_PROTO((void));
438 /* Perform some garbage collection work, if appropriate. */
439 /* Return 0 if there is no more work to be done. */
440 /* Typically performs an amount of work corresponding roughly */
441 /* to marking from one page. May do more work if further */
442 /* progress requires it, e.g. if incremental collection is */
443 /* disabled. It is reasonable to call this in a wait loop */
444 /* until it returns 0. */
445 GC_API
int GC_collect_a_little
GC_PROTO((void));
447 /* Allocate an object of size lb bytes. The client guarantees that */
448 /* as long as the object is live, it will be referenced by a pointer */
449 /* that points to somewhere within the first 256 bytes of the object. */
450 /* (This should normally be declared volatile to prevent the compiler */
451 /* from invalidating this assertion.) This routine is only useful */
452 /* if a large array is being allocated. It reduces the chance of */
453 /* accidentally retaining such an array as a result of scanning an */
454 /* integer that happens to be an address inside the array. (Actually, */
455 /* it reduces the chance of the allocator not finding space for such */
456 /* an array, since it will try hard to avoid introducing such a false */
457 /* reference.) On a SunOS 4.X or MS Windows system this is recommended */
458 /* for arrays likely to be larger than 100K or so. For other systems, */
459 /* or if the collector is not configured to recognize all interior */
460 /* pointers, the threshold is normally much higher. */
461 GC_API GC_PTR GC_malloc_ignore_off_page
GC_PROTO((size_t lb
));
462 GC_API GC_PTR GC_malloc_atomic_ignore_off_page
GC_PROTO((size_t lb
));
464 #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
465 # define GC_ADD_CALLER
466 # define GC_RETURN_ADDR (GC_word)__return_address
470 # include <features.h>
471 # if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \
472 && !defined(__ia64__)
473 # define GC_HAVE_BUILTIN_BACKTRACE
474 # define GC_CAN_SAVE_CALL_STACKS
476 # if defined(__i386__) || defined(__x86_64__)
477 # define GC_CAN_SAVE_CALL_STACKS
481 #if defined(__sparc__)
482 # define GC_CAN_SAVE_CALL_STACKS
485 /* If we're on an a platform on which we can't save call stacks, but */
486 /* gcc is normally used, we go ahead and define GC_ADD_CALLER. */
487 /* We make this decision independent of whether gcc is actually being */
488 /* used, in order to keep the interface consistent, and allow mixing */
490 /* This may also be desirable if it is possible but expensive to */
491 /* retrieve the call chain. */
492 #if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \
493 || defined(__FreeBSD__)) & !defined(GC_CAN_SAVE_CALL_STACKS)
494 # define GC_ADD_CALLER
495 # if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95)
496 /* gcc knows how to retrieve return address, but we don't know */
497 /* how to generate call stacks. */
498 # define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
500 /* Just pass 0 for gcc compatibility. */
501 # define GC_RETURN_ADDR 0
506 # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
507 # define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i
509 # define GC_EXTRAS __FILE__, __LINE__
510 # define GC_EXTRA_PARAMS GC_CONST char * s, int i
513 /* Debugging (annotated) allocation. GC_gcollect will check */
514 /* objects allocated in this way for overwrites, etc. */
515 GC_API GC_PTR GC_debug_malloc
516 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
517 GC_API GC_PTR GC_debug_malloc_atomic
518 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
519 GC_API GC_PTR GC_debug_malloc_uncollectable
520 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
521 GC_API GC_PTR GC_debug_malloc_stubborn
522 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
523 GC_API GC_PTR GC_debug_malloc_ignore_off_page
524 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
525 GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page
526 GC_PROTO((size_t size_in_bytes
, GC_EXTRA_PARAMS
));
527 GC_API
void GC_debug_free
GC_PROTO((GC_PTR object_addr
));
528 GC_API GC_PTR GC_debug_realloc
529 GC_PROTO((GC_PTR old_object
, size_t new_size_in_bytes
,
531 GC_API
void GC_debug_change_stubborn
GC_PROTO((GC_PTR
));
532 GC_API
void GC_debug_end_stubborn_change
GC_PROTO((GC_PTR
));
534 /* Routines that allocate objects with debug information (like the */
535 /* above), but just fill in dummy file and line number information. */
536 /* Thus they can serve as drop-in malloc/realloc replacements. This */
537 /* can be useful for two reasons: */
538 /* 1) It allows the collector to be built with DBG_HDRS_ALL defined */
539 /* even if some allocation calls come from 3rd party libraries */
540 /* that can't be recompiled. */
541 /* 2) On some platforms, the file and line information is redundant, */
542 /* since it can be reconstructed from a stack trace. On such */
543 /* platforms it may be more convenient not to recompile, e.g. for */
544 /* leak detection. This can be accomplished by instructing the */
545 /* linker to replace malloc/realloc with these. */
546 GC_API GC_PTR GC_debug_malloc_replacement
GC_PROTO((size_t size_in_bytes
));
547 GC_API GC_PTR GC_debug_realloc_replacement
548 GC_PROTO((GC_PTR object_addr
, size_t size_in_bytes
));
551 # define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
552 # define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
553 # define GC_MALLOC_UNCOLLECTABLE(sz) \
554 GC_debug_malloc_uncollectable(sz, GC_EXTRAS)
555 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
556 GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS)
557 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
558 GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS)
559 # define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
560 # define GC_FREE(p) GC_debug_free(p)
561 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
562 GC_debug_register_finalizer(p, f, d, of, od)
563 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
564 GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
565 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
566 GC_debug_register_finalizer_no_order(p, f, d, of, od)
567 # define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
568 # define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
569 # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
570 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
571 GC_general_register_disappearing_link(link, GC_base(obj))
572 # define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
574 # define GC_MALLOC(sz) GC_malloc(sz)
575 # define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
576 # define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
577 # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
578 GC_malloc_ignore_off_page(sz)
579 # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
580 GC_malloc_atomic_ignore_off_page(sz)
581 # define GC_REALLOC(old, sz) GC_realloc(old, sz)
582 # define GC_FREE(p) GC_free(p)
583 # define GC_REGISTER_FINALIZER(p, f, d, of, od) \
584 GC_register_finalizer(p, f, d, of, od)
585 # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
586 GC_register_finalizer_ignore_self(p, f, d, of, od)
587 # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
588 GC_register_finalizer_no_order(p, f, d, of, od)
589 # define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
590 # define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
591 # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
592 # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
593 GC_general_register_disappearing_link(link, obj)
594 # define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
596 /* The following are included because they are often convenient, and */
597 /* reduce the chance for a misspecifed size argument. But calls may */
598 /* expand to something syntactically incorrect if t is a complicated */
599 /* type expression. */
600 # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
601 # define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
602 # define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
603 # define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
605 /* Finalization. Some of these primitives are grossly unsafe. */
606 /* The idea is to make them both cheap, and sufficient to build */
607 /* a safer layer, closer to PCedar finalization. */
608 /* The interface represents my conclusions from a long discussion */
609 /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
610 /* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
611 /* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
612 typedef void (*GC_finalization_proc
)
613 GC_PROTO((GC_PTR obj
, GC_PTR client_data
));
615 GC_API
void GC_register_finalizer
616 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
617 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
618 GC_API
void GC_debug_register_finalizer
619 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
620 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
621 /* When obj is no longer accessible, invoke */
622 /* (*fn)(obj, cd). If a and b are inaccessible, and */
623 /* a points to b (after disappearing links have been */
624 /* made to disappear), then only a will be */
625 /* finalized. (If this does not create any new */
626 /* pointers to b, then b will be finalized after the */
627 /* next collection.) Any finalizable object that */
628 /* is reachable from itself by following one or more */
629 /* pointers will not be finalized (or collected). */
630 /* Thus cycles involving finalizable objects should */
631 /* be avoided, or broken by disappearing links. */
632 /* All but the last finalizer registered for an object */
634 /* Finalization may be removed by passing 0 as fn. */
635 /* Finalizers are implicitly unregistered just before */
636 /* they are invoked. */
637 /* The old finalizer and client data are stored in */
639 /* Fn is never invoked on an accessible object, */
640 /* provided hidden pointers are converted to real */
641 /* pointers only if the allocation lock is held, and */
642 /* such conversions are not performed by finalization */
644 /* If GC_register_finalizer is aborted as a result of */
645 /* a signal, the object may be left with no */
646 /* finalization, even if neither the old nor new */
647 /* finalizer were NULL. */
648 /* Obj should be the nonNULL starting address of an */
649 /* object allocated by GC_malloc or friends. */
650 /* Note that any garbage collectable object referenced */
651 /* by cd will be considered accessible until the */
652 /* finalizer is invoked. */
654 /* Another versions of the above follow. It ignores */
655 /* self-cycles, i.e. pointers from a finalizable object to */
656 /* itself. There is a stylistic argument that this is wrong, */
657 /* but it's unavoidable for C++, since the compiler may */
658 /* silently introduce these. It's also benign in that specific */
659 /* case. And it helps if finalizable objects are split to */
661 /* Note that cd will still be viewed as accessible, even if it */
662 /* refers to the object itself. */
663 GC_API
void GC_register_finalizer_ignore_self
664 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
665 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
666 GC_API
void GC_debug_register_finalizer_ignore_self
667 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
668 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
670 /* Another version of the above. It ignores all cycles. */
671 /* It should probably only be used by Java implementations. */
672 /* Note that cd will still be viewed as accessible, even if it */
673 /* refers to the object itself. */
674 GC_API
void GC_register_finalizer_no_order
675 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
676 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
677 GC_API
void GC_debug_register_finalizer_no_order
678 GC_PROTO((GC_PTR obj
, GC_finalization_proc fn
, GC_PTR cd
,
679 GC_finalization_proc
*ofn
, GC_PTR
*ocd
));
682 /* The following routine may be used to break cycles between */
683 /* finalizable objects, thus causing cyclic finalizable */
684 /* objects to be finalized in the correct order. Standard */
685 /* use involves calling GC_register_disappearing_link(&p), */
686 /* where p is a pointer that is not followed by finalization */
687 /* code, and should not be considered in determining */
688 /* finalization order. */
689 GC_API
int GC_register_disappearing_link
GC_PROTO((GC_PTR
* /* link */));
690 /* Link should point to a field of a heap allocated */
691 /* object obj. *link will be cleared when obj is */
692 /* found to be inaccessible. This happens BEFORE any */
693 /* finalization code is invoked, and BEFORE any */
694 /* decisions about finalization order are made. */
695 /* This is useful in telling the finalizer that */
696 /* some pointers are not essential for proper */
697 /* finalization. This may avoid finalization cycles. */
698 /* Note that obj may be resurrected by another */
699 /* finalizer, and thus the clearing of *link may */
700 /* be visible to non-finalization code. */
701 /* There's an argument that an arbitrary action should */
702 /* be allowed here, instead of just clearing a pointer. */
703 /* But this causes problems if that action alters, or */
704 /* examines connectivity. */
705 /* Returns 1 if link was already registered, 0 */
707 /* Only exists for backward compatibility. See below: */
709 GC_API
int GC_general_register_disappearing_link
710 GC_PROTO((GC_PTR
* /* link */, GC_PTR obj
));
711 /* A slight generalization of the above. *link is */
712 /* cleared when obj first becomes inaccessible. This */
713 /* can be used to implement weak pointers easily and */
714 /* safely. Typically link will point to a location */
715 /* holding a disguised pointer to obj. (A pointer */
716 /* inside an "atomic" object is effectively */
717 /* disguised.) In this way soft */
718 /* pointers are broken before any object */
719 /* reachable from them are finalized. Each link */
720 /* May be registered only once, i.e. with one obj */
721 /* value. This was added after a long email discussion */
722 /* with John Ellis. */
723 /* Obj must be a pointer to the first word of an object */
724 /* we allocated. It is unsafe to explicitly deallocate */
725 /* the object containing link. Explicitly deallocating */
726 /* obj may or may not cause link to eventually be */
728 GC_API
int GC_unregister_disappearing_link
GC_PROTO((GC_PTR
* /* link */));
729 /* Returns 0 if link was not actually registered. */
730 /* Undoes a registration by either of the above two */
733 /* Returns !=0 if GC_invoke_finalizers has something to do. */
734 GC_API
int GC_should_invoke_finalizers
GC_PROTO((void));
736 GC_API
int GC_invoke_finalizers
GC_PROTO((void));
737 /* Run finalizers for all objects that are ready to */
738 /* be finalized. Return the number of finalizers */
739 /* that were run. Normally this is also called */
740 /* implicitly during some allocations. If */
741 /* GC-finalize_on_demand is nonzero, it must be called */
744 /* GC_set_warn_proc can be used to redirect or filter warning messages. */
745 /* p may not be a NULL pointer. */
746 typedef void (*GC_warn_proc
) GC_PROTO((char *msg
, GC_word arg
));
747 GC_API GC_warn_proc GC_set_warn_proc
GC_PROTO((GC_warn_proc p
));
748 /* Returns old warning procedure. */
750 GC_API GC_word GC_set_free_space_divisor
GC_PROTO((GC_word value
));
751 /* Set free_space_divisor. See above for definition. */
752 /* Returns old value. */
754 /* The following is intended to be used by a higher level */
755 /* (e.g. Java-like) finalization facility. It is expected */
756 /* that finalization code will arrange for hidden pointers to */
757 /* disappear. Otherwise objects can be accessed after they */
758 /* have been collected. */
759 /* Note that putting pointers in atomic objects or in */
760 /* nonpointer slots of "typed" objects is equivalent to */
761 /* disguising them in this way, and may have other advantages. */
762 # if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
763 typedef GC_word GC_hidden_pointer
;
764 # define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
765 # define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
766 /* Converting a hidden pointer to a real pointer requires verifying */
767 /* that the object still exists. This involves acquiring the */
768 /* allocator lock to avoid a race with the collector. */
769 # endif /* I_HIDE_POINTERS */
771 typedef GC_PTR (*GC_fn_type
) GC_PROTO((GC_PTR client_data
));
772 GC_API GC_PTR GC_call_with_alloc_lock
773 GC_PROTO((GC_fn_type fn
, GC_PTR client_data
));
775 /* The following routines are primarily intended for use with a */
776 /* preprocessor which inserts calls to check C pointer arithmetic. */
778 /* Check that p and q point to the same object. */
779 /* Fail conspicuously if they don't. */
780 /* Returns the first argument. */
781 /* Succeeds if neither p nor q points to the heap. */
782 /* May succeed if both p and q point to between heap objects. */
783 GC_API GC_PTR GC_same_obj
GC_PROTO((GC_PTR p
, GC_PTR q
));
785 /* Checked pointer pre- and post- increment operations. Note that */
786 /* the second argument is in units of bytes, not multiples of the */
787 /* object size. This should either be invoked from a macro, or the */
788 /* call should be automatically generated. */
789 GC_API GC_PTR GC_pre_incr
GC_PROTO((GC_PTR
*p
, size_t how_much
));
790 GC_API GC_PTR GC_post_incr
GC_PROTO((GC_PTR
*p
, size_t how_much
));
792 /* Check that p is visible */
793 /* to the collector as a possibly pointer containing location. */
794 /* If it isn't fail conspicuously. */
795 /* Returns the argument in all cases. May erroneously succeed */
796 /* in hard cases. (This is intended for debugging use with */
797 /* untyped allocations. The idea is that it should be possible, though */
798 /* slow, to add such a call to all indirect pointer stores.) */
799 /* Currently useless for multithreaded worlds. */
800 GC_API GC_PTR GC_is_visible
GC_PROTO((GC_PTR p
));
802 /* Check that if p is a pointer to a heap page, then it points to */
803 /* a valid displacement within a heap object. */
804 /* Fail conspicuously if this property does not hold. */
805 /* Uninteresting with GC_all_interior_pointers. */
806 /* Always returns its argument. */
807 GC_API GC_PTR GC_is_valid_displacement
GC_PROTO((GC_PTR p
));
809 /* Safer, but slow, pointer addition. Probably useful mainly with */
810 /* a preprocessor. Useful only for heap pointers. */
812 # define GC_PTR_ADD3(x, n, type_of_result) \
813 ((type_of_result)GC_same_obj((x)+(n), (x)))
814 # define GC_PRE_INCR3(x, n, type_of_result) \
815 ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
816 # define GC_POST_INCR2(x, type_of_result) \
817 ((type_of_result)GC_post_incr(&(x), sizeof(*x))
819 # define GC_PTR_ADD(x, n) \
820 GC_PTR_ADD3(x, n, typeof(x))
821 # define GC_PRE_INCR(x, n) \
822 GC_PRE_INCR3(x, n, typeof(x))
823 # define GC_POST_INCR(x, n) \
824 GC_POST_INCR3(x, typeof(x))
826 /* We can't do this right without typeof, which ANSI */
827 /* decided was not sufficiently useful. Repeatedly */
828 /* mentioning the arguments seems too dangerous to be */
829 /* useful. So does not casting the result. */
830 # define GC_PTR_ADD(x, n) ((x)+(n))
832 #else /* !GC_DEBUG */
833 # define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
834 # define GC_PTR_ADD(x, n) ((x)+(n))
835 # define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
836 # define GC_PRE_INCR(x, n) ((x) += (n))
837 # define GC_POST_INCR2(x, n, type_of_result) ((x)++)
838 # define GC_POST_INCR(x, n) ((x)++)
841 /* Safer assignment of a pointer to a nonstack location. */
844 # define GC_PTR_STORE(p, q) \
845 (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
847 # define GC_PTR_STORE(p, q) \
848 (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
850 #else /* !GC_DEBUG */
851 # define GC_PTR_STORE(p, q) *((p) = (q))
854 /* Fynctions called to report pointer checking errors */
855 GC_API
void (*GC_same_obj_print_proc
) GC_PROTO((GC_PTR p
, GC_PTR q
));
857 GC_API
void (*GC_is_valid_displacement_print_proc
)
858 GC_PROTO((GC_PTR p
));
860 GC_API
void (*GC_is_visible_print_proc
)
861 GC_PROTO((GC_PTR p
));
864 /* For pthread support, we generally need to intercept a number of */
865 /* thread library calls. We do that here by macro defining them. */
867 #if !defined(GC_USE_LD_WRAP) && \
868 (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS))
869 # include "gc_pthread_redirects.h"
872 # if defined(PCR) || defined(GC_SOLARIS_THREADS) || \
873 defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
874 /* Any flavor of threads except SRC_M3. */
875 /* This returns a list of objects, linked through their first */
876 /* word. Its use can greatly reduce lock contention problems, since */
877 /* the allocation lock can be acquired and released many fewer times. */
878 /* lb must be large enough to hold the pointer field. */
879 /* It is used internally by gc_local_alloc.h, which provides a simpler */
880 /* programming interface on Linux. */
881 GC_PTR
GC_malloc_many(size_t lb
);
882 #define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
883 /* in returned list. */
884 extern void GC_thr_init(); /* Needed for Solaris/X86 */
886 #endif /* THREADS && !SRC_M3 */
888 #if defined(GC_WIN32_THREADS) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
889 # include <windows.h>
892 * All threads must be created using GC_CreateThread, so that they will be
893 * recorded in the thread table. For backwards compatibility, this is not
894 * technically true if the GC is built as a dynamic library, since it can
895 * and does then use DllMain to keep track of thread creations. But new code
896 * should be built to call GC_CreateThread.
898 GC_API HANDLE WINAPI
GC_CreateThread(
899 LPSECURITY_ATTRIBUTES lpThreadAttributes
,
900 DWORD dwStackSize
, LPTHREAD_START_ROUTINE lpStartAddress
,
901 LPVOID lpParameter
, DWORD dwCreationFlags
, LPDWORD lpThreadId
);
903 # if defined(_WIN32_WCE)
905 * win32_threads.c implements the real WinMain, which will start a new thread
906 * to call GC_WinMain after initializing the garbage collector.
908 int WINAPI
GC_WinMain(
910 HINSTANCE hPrevInstance
,
915 # define WinMain GC_WinMain
916 # define CreateThread GC_CreateThread
918 # endif /* defined(_WIN32_WCE) */
920 #endif /* defined(GC_WIN32_THREADS) && !cygwin */
923 * If you are planning on putting
924 * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
925 * from the statically loaded program section.
926 * This circumvents a Solaris 2.X (X<=4) linker bug.
928 #if defined(sparc) || defined(__sparc)
929 # define GC_INIT() { extern end, etext; \
930 GC_noop(&end, &etext); }
932 # if defined(__CYGWIN32__) && defined(GC_DLL) || defined (_AIX)
934 * Similarly gnu-win32 DLLs need explicit initialization from
935 * the main program, as does AIX.
937 # define GC_INIT() { GC_add_roots(DATASTART, DATAEND); }
939 # if defined(__APPLE__) && defined(__MACH__)
940 # define GC_INIT() { GC_init(); }
947 #if !defined(_WIN32_WCE) \
948 && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
949 || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__))
950 /* win32S may not free all resources on process exit. */
951 /* This explicitly deallocates the heap. */
952 GC_API
void GC_win32_free_heap ();
955 #if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
956 /* Allocation really goes through GC_amiga_allocwrapper_do */
957 # include "gc_amiga_redirects.h"
960 #if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H)
961 # include "gc_local_alloc.h"
965 } /* end of extern "C" */