Import boehm-gc snapshot, taken from
[official-gcc.git] / boehm-gc / misc.c
blob7d3a1e9f866d2cd2c9b8e7ef57c91a34defe486c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 #include "private/gc_pmark.h"
18 #include <stdio.h>
19 #include <limits.h>
20 #include <stdarg.h>
22 #ifndef MSWINCE
23 # include <signal.h>
24 #endif
26 #ifdef GC_SOLARIS_THREADS
27 # include <sys/syscall.h>
28 #endif
29 #if defined(MSWIN32) || defined(MSWINCE) \
30 || (defined(CYGWIN32) && defined(GC_READ_ENV_FILE))
31 # ifndef WIN32_LEAN_AND_MEAN
32 # define WIN32_LEAN_AND_MEAN 1
33 # endif
34 # define NOSERVICE
35 # include <windows.h>
36 #endif
38 #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(SYMBIAN)
39 # include <fcntl.h>
40 # include <sys/types.h>
41 # include <sys/stat.h>
42 #endif
44 #ifdef NONSTOP
45 # include <floss.h>
46 #endif
48 #ifdef THREADS
49 # ifdef PCR
50 # include "il/PCR_IL.h"
51 GC_INNER PCR_Th_ML GC_allocate_ml;
52 # elif defined(SN_TARGET_PS3)
53 # include <pthread.h>
54 GC_INNER pthread_mutex_t GC_allocate_ml;
55 # endif
56 /* For other platforms with threads, the lock and possibly */
57 /* GC_lock_holder variables are defined in the thread support code. */
58 #endif /* THREADS */
60 #ifdef DYNAMIC_LOADING
61 /* We need to register the main data segment. Returns TRUE unless */
62 /* this is done implicitly as part of dynamic library registration. */
63 # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()
64 #elif defined(GC_DONT_REGISTER_MAIN_STATIC_DATA)
65 # define GC_REGISTER_MAIN_STATIC_DATA() FALSE
66 #else
67 /* Don't unnecessarily call GC_register_main_static_data() in case */
68 /* dyn_load.c isn't linked in. */
69 # define GC_REGISTER_MAIN_STATIC_DATA() TRUE
70 #endif
72 #ifdef NEED_CANCEL_DISABLE_COUNT
73 __thread unsigned char GC_cancel_disable_count = 0;
74 #endif
76 GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
78 GC_INNER GC_bool GC_debugging_started = FALSE;
79 /* defined here so we don't have to load debug_malloc.o */
81 ptr_t GC_stackbottom = 0;
83 #ifdef IA64
84 ptr_t GC_register_stackbottom = 0;
85 #endif
87 int GC_dont_gc = FALSE;
89 int GC_dont_precollect = FALSE;
91 GC_bool GC_quiet = 0; /* used also in pcr_interface.c */
93 #ifndef SMALL_CONFIG
94 int GC_real_print_stats = 0;
95 #endif
97 #ifdef GC_PRINT_BACK_HEIGHT
98 GC_INNER GC_bool GC_print_back_height = TRUE;
99 #else
100 GC_INNER GC_bool GC_print_back_height = FALSE;
101 #endif
103 #ifndef NO_DEBUGGING
104 GC_INNER GC_bool GC_dump_regularly = FALSE;
105 /* Generate regular debugging dumps. */
106 #endif
108 #ifdef KEEP_BACK_PTRS
109 GC_INNER long GC_backtraces = 0;
110 /* Number of random backtraces to generate for each GC. */
111 #endif
113 #ifdef FIND_LEAK
114 int GC_find_leak = 1;
115 #else
116 int GC_find_leak = 0;
117 #endif
119 #ifndef SHORT_DBG_HDRS
120 # ifdef GC_FINDLEAK_DELAY_FREE
121 GC_INNER GC_bool GC_findleak_delay_free = TRUE;
122 # else
123 GC_INNER GC_bool GC_findleak_delay_free = FALSE;
124 # endif
125 #endif /* !SHORT_DBG_HDRS */
127 #ifdef ALL_INTERIOR_POINTERS
128 int GC_all_interior_pointers = 1;
129 #else
130 int GC_all_interior_pointers = 0;
131 #endif
133 #ifdef FINALIZE_ON_DEMAND
134 int GC_finalize_on_demand = 1;
135 #else
136 int GC_finalize_on_demand = 0;
137 #endif
139 #ifdef JAVA_FINALIZATION
140 int GC_java_finalization = 1;
141 #else
142 int GC_java_finalization = 0;
143 #endif
145 /* All accesses to it should be synchronized to avoid data races. */
146 GC_finalizer_notifier_proc GC_finalizer_notifier =
147 (GC_finalizer_notifier_proc)0;
149 #ifdef GC_FORCE_UNMAP_ON_GCOLLECT
150 /* Has no effect unless USE_MUNMAP. */
151 /* Has no effect on implicitly-initiated garbage collections. */
152 GC_INNER GC_bool GC_force_unmap_on_gcollect = TRUE;
153 #else
154 GC_INNER GC_bool GC_force_unmap_on_gcollect = FALSE;
155 #endif
157 #ifndef GC_LARGE_ALLOC_WARN_INTERVAL
158 # define GC_LARGE_ALLOC_WARN_INTERVAL 5
159 #endif
160 GC_INNER long GC_large_alloc_warn_interval = GC_LARGE_ALLOC_WARN_INTERVAL;
161 /* Interval between unsuppressed warnings. */
163 STATIC void * GC_CALLBACK GC_default_oom_fn(
164 size_t bytes_requested GC_ATTR_UNUSED)
166 return(0);
169 /* All accesses to it should be synchronized to avoid data races. */
170 GC_oom_func GC_oom_fn = GC_default_oom_fn;
172 #ifdef CAN_HANDLE_FORK
173 # ifdef HANDLE_FORK
174 GC_INNER int GC_handle_fork = 1;
175 /* The value is examined by GC_thr_init. */
176 # else
177 GC_INNER int GC_handle_fork = FALSE;
178 # endif
180 #elif !defined(HAVE_NO_FORK)
182 /* Same as above but with GC_CALL calling conventions. */
183 GC_API void GC_CALL GC_atfork_prepare(void)
185 # ifdef THREADS
186 ABORT("fork() handling unsupported");
187 # endif
190 GC_API void GC_CALL GC_atfork_parent(void)
192 /* empty */
195 GC_API void GC_CALL GC_atfork_child(void)
197 /* empty */
199 #endif /* !CAN_HANDLE_FORK && !HAVE_NO_FORK */
201 /* Overrides the default automatic handle-fork mode. Has effect only */
202 /* if called before GC_INIT. */
203 GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED)
205 # ifdef CAN_HANDLE_FORK
206 if (!GC_is_initialized)
207 GC_handle_fork = value >= -1 ? value : 1;
208 /* Map all negative values except for -1 to a positive one. */
209 # elif defined(THREADS) || (defined(DARWIN) && defined(MPROTECT_VDB))
210 if (!GC_is_initialized && value) {
211 # ifndef SMALL_CONFIG
212 GC_init(); /* just to initialize GC_stderr */
213 # endif
214 ABORT("fork() handling unsupported");
216 # else
217 /* No at-fork handler is needed in the single-threaded mode. */
218 # endif
221 /* Set things up so that GC_size_map[i] >= granules(i), */
222 /* but not too much bigger */
223 /* and so that size_map contains relatively few distinct entries */
224 /* This was originally stolen from Russ Atkinson's Cedar */
225 /* quantization algorithm (but we precompute it). */
226 STATIC void GC_init_size_map(void)
228 int i;
230 /* Map size 0 to something bigger. */
231 /* This avoids problems at lower levels. */
232 GC_size_map[0] = 1;
233 for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
234 GC_size_map[i] = ROUNDED_UP_GRANULES(i);
235 # ifndef _MSC_VER
236 GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);
237 /* Seems to tickle bug in VC++ 2008 for AMD64 */
238 # endif
240 /* We leave the rest of the array to be filled in on demand. */
243 /* Fill in additional entries in GC_size_map, including the ith one */
244 /* We assume the ith entry is currently 0. */
245 /* Note that a filled in section of the array ending at n always */
246 /* has length at least n/4. */
247 GC_INNER void GC_extend_size_map(size_t i)
249 size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
250 size_t granule_sz = orig_granule_sz;
251 size_t byte_sz = GRANULES_TO_BYTES(granule_sz);
252 /* The size we try to preserve. */
253 /* Close to i, unless this would */
254 /* introduce too many distinct sizes. */
255 size_t smaller_than_i = byte_sz - (byte_sz >> 3);
256 size_t much_smaller_than_i = byte_sz - (byte_sz >> 2);
257 size_t low_limit; /* The lowest indexed entry we */
258 /* initialize. */
259 size_t j;
261 if (GC_size_map[smaller_than_i] == 0) {
262 low_limit = much_smaller_than_i;
263 while (GC_size_map[low_limit] != 0) low_limit++;
264 } else {
265 low_limit = smaller_than_i + 1;
266 while (GC_size_map[low_limit] != 0) low_limit++;
267 granule_sz = ROUNDED_UP_GRANULES(low_limit);
268 granule_sz += granule_sz >> 3;
269 if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz;
271 /* For these larger sizes, we use an even number of granules. */
272 /* This makes it easier to, for example, construct a 16byte-aligned */
273 /* allocator even if GRANULE_BYTES is 8. */
274 granule_sz += 1;
275 granule_sz &= ~1;
276 if (granule_sz > MAXOBJGRANULES) {
277 granule_sz = MAXOBJGRANULES;
279 /* If we can fit the same number of larger objects in a block, */
280 /* do so. */
282 size_t number_of_objs = HBLK_GRANULES/granule_sz;
283 granule_sz = HBLK_GRANULES/number_of_objs;
284 granule_sz &= ~1;
286 byte_sz = GRANULES_TO_BYTES(granule_sz);
287 /* We may need one extra byte; */
288 /* don't always fill in GC_size_map[byte_sz] */
289 byte_sz -= EXTRA_BYTES;
291 for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz;
296 * The following is a gross hack to deal with a problem that can occur
297 * on machines that are sloppy about stack frame sizes, notably SPARC.
298 * Bogus pointers may be written to the stack and not cleared for
299 * a LONG time, because they always fall into holes in stack frames
300 * that are not written. We partially address this by clearing
301 * sections of the stack whenever we get control.
303 # ifdef THREADS
304 # define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */
305 # define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */
306 # else
307 STATIC word GC_stack_last_cleared = 0; /* GC_no when we last did this */
308 STATIC ptr_t GC_min_sp = NULL;
309 /* Coolest stack pointer value from which */
310 /* we've already cleared the stack. */
311 STATIC ptr_t GC_high_water = NULL;
312 /* "hottest" stack pointer value we have seen */
313 /* recently. Degrades over time. */
314 STATIC word GC_bytes_allocd_at_reset = 0;
315 # define DEGRADE_RATE 50
316 # endif
318 # define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
320 #if defined(ASM_CLEAR_CODE)
321 void *GC_clear_stack_inner(void *, ptr_t);
322 #else
323 /* Clear the stack up to about limit. Return arg. This function is */
324 /* not static because it could also be errorneously defined in .S */
325 /* file, so this error would be caught by the linker. */
326 void * GC_clear_stack_inner(void *arg, ptr_t limit)
328 volatile word dummy[CLEAR_SIZE];
330 BZERO((/* no volatile */ void *)dummy, sizeof(dummy));
331 if ((word)GC_approx_sp() COOLER_THAN (word)limit) {
332 (void) GC_clear_stack_inner(arg, limit);
334 /* Make sure the recursive call is not a tail call, and the bzero */
335 /* call is not recognized as dead code. */
336 GC_noop1((word)dummy);
337 return(arg);
339 #endif
341 /* Clear some of the inaccessible part of the stack. Returns its */
342 /* argument, so it can be used in a tail call position, hence clearing */
343 /* another frame. */
344 GC_API void * GC_CALL GC_clear_stack(void *arg)
346 ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */
347 # ifdef THREADS
348 word volatile dummy[SMALL_CLEAR_SIZE];
349 static unsigned random_no = 0;
350 /* Should be more random than it is ... */
351 /* Used to occasionally clear a bigger */
352 /* chunk. */
353 # endif
354 ptr_t limit;
356 # define SLOP 400
357 /* Extra bytes we clear every time. This clears our own */
358 /* activation record, and should cause more frequent */
359 /* clearing near the cold end of the stack, a good thing. */
360 # define GC_SLOP 4000
361 /* We make GC_high_water this much hotter than we really saw */
362 /* saw it, to cover for GC noise etc. above our current frame. */
363 # define CLEAR_THRESHOLD 100000
364 /* We restart the clearing process after this many bytes of */
365 /* allocation. Otherwise very heavily recursive programs */
366 /* with sparse stacks may result in heaps that grow almost */
367 /* without bounds. As the heap gets larger, collection */
368 /* frequency decreases, thus clearing frequency would decrease, */
369 /* thus more junk remains accessible, thus the heap gets */
370 /* larger ... */
371 # ifdef THREADS
372 if (++random_no % 13 == 0) {
373 limit = sp;
374 MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
375 limit = (ptr_t)((word)limit & ~0xf);
376 /* Make it sufficiently aligned for assembly */
377 /* implementations of GC_clear_stack_inner. */
378 return GC_clear_stack_inner(arg, limit);
379 } else {
380 BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
381 return arg;
383 # else
384 if (GC_gc_no > GC_stack_last_cleared) {
385 /* Start things over, so we clear the entire stack again */
386 if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom;
387 GC_min_sp = GC_high_water;
388 GC_stack_last_cleared = GC_gc_no;
389 GC_bytes_allocd_at_reset = GC_bytes_allocd;
391 /* Adjust GC_high_water */
392 MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
393 if ((word)sp HOTTER_THAN (word)GC_high_water) {
394 GC_high_water = sp;
396 MAKE_HOTTER(GC_high_water, GC_SLOP);
397 limit = GC_min_sp;
398 MAKE_HOTTER(limit, SLOP);
399 if ((word)sp COOLER_THAN (word)limit) {
400 limit = (ptr_t)((word)limit & ~0xf);
401 /* Make it sufficiently aligned for assembly */
402 /* implementations of GC_clear_stack_inner. */
403 GC_min_sp = sp;
404 return(GC_clear_stack_inner(arg, limit));
405 } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
406 /* Restart clearing process, but limit how much clearing we do. */
407 GC_min_sp = sp;
408 MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
409 if ((word)GC_min_sp HOTTER_THAN (word)GC_high_water)
410 GC_min_sp = GC_high_water;
411 GC_bytes_allocd_at_reset = GC_bytes_allocd;
413 return(arg);
414 # endif
418 /* Return a pointer to the base address of p, given a pointer to a */
419 /* an address within an object. Return 0 o.w. */
420 GC_API void * GC_CALL GC_base(void * p)
422 ptr_t r;
423 struct hblk *h;
424 bottom_index *bi;
425 hdr *candidate_hdr;
426 ptr_t limit;
428 r = p;
429 if (!EXPECT(GC_is_initialized, TRUE)) return 0;
430 h = HBLKPTR(r);
431 GET_BI(r, bi);
432 candidate_hdr = HDR_FROM_BI(bi, r);
433 if (candidate_hdr == 0) return(0);
434 /* If it's a pointer to the middle of a large object, move it */
435 /* to the beginning. */
436 while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
437 h = FORWARDED_ADDR(h,candidate_hdr);
438 r = (ptr_t)h;
439 candidate_hdr = HDR(h);
441 if (HBLK_IS_FREE(candidate_hdr)) return(0);
442 /* Make sure r points to the beginning of the object */
443 r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
445 size_t offset = HBLKDISPL(r);
446 word sz = candidate_hdr -> hb_sz;
447 size_t obj_displ = offset % sz;
449 r -= obj_displ;
450 limit = r + sz;
451 if ((word)limit > (word)(h + 1) && sz <= HBLKSIZE) {
452 return(0);
454 if ((word)p >= (word)limit) return(0);
456 return((void *)r);
459 /* Return TRUE if and only if p points to somewhere in GC heap. */
460 GC_API int GC_CALL GC_is_heap_ptr(const void *p)
462 bottom_index *bi;
464 GC_ASSERT(GC_is_initialized);
465 GET_BI(p, bi);
466 return HDR_FROM_BI(bi, p) != 0;
469 /* Return the size of an object, given a pointer to its base. */
470 /* (For small objects this also happens to work from interior pointers, */
471 /* but that shouldn't be relied upon.) */
472 GC_API size_t GC_CALL GC_size(const void * p)
474 hdr * hhdr = HDR(p);
476 return hhdr -> hb_sz;
480 /* These getters remain unsynchronized for compatibility (since some */
481 /* clients could call some of them from a GC callback holding the */
482 /* allocator lock). */
483 GC_API size_t GC_CALL GC_get_heap_size(void)
485 /* ignore the memory space returned to OS (i.e. count only the */
486 /* space owned by the garbage collector) */
487 return (size_t)(GC_heapsize - GC_unmapped_bytes);
490 GC_API size_t GC_CALL GC_get_free_bytes(void)
492 /* ignore the memory space returned to OS */
493 return (size_t)(GC_large_free_bytes - GC_unmapped_bytes);
496 GC_API size_t GC_CALL GC_get_unmapped_bytes(void)
498 return (size_t)GC_unmapped_bytes;
501 GC_API size_t GC_CALL GC_get_bytes_since_gc(void)
503 return (size_t)GC_bytes_allocd;
506 GC_API size_t GC_CALL GC_get_total_bytes(void)
508 return (size_t)(GC_bytes_allocd + GC_bytes_allocd_before_gc);
511 #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
513 /* Return the heap usage information. This is a thread-safe (atomic) */
514 /* alternative for the five above getters. NULL pointer is allowed for */
515 /* any argument. Returned (filled in) values are of word type. */
516 GC_API void GC_CALL GC_get_heap_usage_safe(GC_word *pheap_size,
517 GC_word *pfree_bytes, GC_word *punmapped_bytes,
518 GC_word *pbytes_since_gc, GC_word *ptotal_bytes)
520 DCL_LOCK_STATE;
522 LOCK();
523 if (pheap_size != NULL)
524 *pheap_size = GC_heapsize - GC_unmapped_bytes;
525 if (pfree_bytes != NULL)
526 *pfree_bytes = GC_large_free_bytes - GC_unmapped_bytes;
527 if (punmapped_bytes != NULL)
528 *punmapped_bytes = GC_unmapped_bytes;
529 if (pbytes_since_gc != NULL)
530 *pbytes_since_gc = GC_bytes_allocd;
531 if (ptotal_bytes != NULL)
532 *ptotal_bytes = GC_bytes_allocd + GC_bytes_allocd_before_gc;
533 UNLOCK();
536 GC_INNER word GC_reclaimed_bytes_before_gc = 0;
538 /* Fill in GC statistics provided the destination is of enough size. */
539 static void fill_prof_stats(struct GC_prof_stats_s *pstats)
541 pstats->heapsize_full = GC_heapsize;
542 pstats->free_bytes_full = GC_large_free_bytes;
543 pstats->unmapped_bytes = GC_unmapped_bytes;
544 pstats->bytes_allocd_since_gc = GC_bytes_allocd;
545 pstats->allocd_bytes_before_gc = GC_bytes_allocd_before_gc;
546 pstats->non_gc_bytes = GC_non_gc_bytes;
547 pstats->gc_no = GC_gc_no; /* could be -1 */
548 # ifdef PARALLEL_MARK
549 pstats->markers_m1 = (word)GC_markers_m1;
550 # else
551 pstats->markers_m1 = 0; /* one marker */
552 # endif
553 pstats->bytes_reclaimed_since_gc = GC_bytes_found > 0 ?
554 (word)GC_bytes_found : 0;
555 pstats->reclaimed_bytes_before_gc = GC_reclaimed_bytes_before_gc;
558 # include <string.h> /* for memset() */
560 GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *pstats,
561 size_t stats_sz)
563 struct GC_prof_stats_s stats;
564 DCL_LOCK_STATE;
566 LOCK();
567 fill_prof_stats(stats_sz >= sizeof(stats) ? pstats : &stats);
568 UNLOCK();
570 if (stats_sz == sizeof(stats)) {
571 return sizeof(stats);
572 } else if (stats_sz > sizeof(stats)) {
573 /* Fill in the remaining part with -1. */
574 memset((char *)pstats + sizeof(stats), 0xff, stats_sz - sizeof(stats));
575 return sizeof(stats);
576 } else {
577 BCOPY(&stats, pstats, stats_sz);
578 return stats_sz;
582 # ifdef THREADS
583 /* The _unsafe version assumes the caller holds the allocation lock. */
584 GC_API size_t GC_CALL GC_get_prof_stats_unsafe(
585 struct GC_prof_stats_s *pstats,
586 size_t stats_sz)
588 struct GC_prof_stats_s stats;
590 if (stats_sz >= sizeof(stats)) {
591 fill_prof_stats(pstats);
592 if (stats_sz > sizeof(stats))
593 memset((char *)pstats + sizeof(stats), 0xff,
594 stats_sz - sizeof(stats));
595 return sizeof(stats);
596 } else {
597 fill_prof_stats(&stats);
598 BCOPY(&stats, pstats, stats_sz);
599 return stats_sz;
602 # endif /* THREADS */
604 #endif /* !GC_GET_HEAP_USAGE_NOT_NEEDED */
606 #if defined(GC_DARWIN_THREADS) || defined(GC_OPENBSD_THREADS) \
607 || defined(GC_WIN32_THREADS) || (defined(NACL) && defined(THREADS))
608 /* GC does not use signals to suspend and restart threads. */
609 GC_API void GC_CALL GC_set_suspend_signal(int sig GC_ATTR_UNUSED)
611 /* empty */
614 GC_API void GC_CALL GC_set_thr_restart_signal(int sig GC_ATTR_UNUSED)
616 /* empty */
619 GC_API int GC_CALL GC_get_suspend_signal(void)
621 return -1;
624 GC_API int GC_CALL GC_get_thr_restart_signal(void)
626 return -1;
628 #endif /* GC_DARWIN_THREADS || GC_WIN32_THREADS || ... */
630 #if !defined(_MAX_PATH) && (defined(MSWIN32) || defined(MSWINCE) \
631 || defined(CYGWIN32))
632 # define _MAX_PATH MAX_PATH
633 #endif
635 #ifdef GC_READ_ENV_FILE
636 /* This works for Win32/WinCE for now. Really useful only for WinCE. */
637 STATIC char *GC_envfile_content = NULL;
638 /* The content of the GC "env" file with CR and */
639 /* LF replaced to '\0'. NULL if the file is */
640 /* missing or empty. Otherwise, always ends */
641 /* with '\0'. */
642 STATIC unsigned GC_envfile_length = 0;
643 /* Length of GC_envfile_content (if non-NULL). */
645 # ifndef GC_ENVFILE_MAXLEN
646 # define GC_ENVFILE_MAXLEN 0x4000
647 # endif
649 /* The routine initializes GC_envfile_content from the GC "env" file. */
650 STATIC void GC_envfile_init(void)
652 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
653 HANDLE hFile;
654 char *content;
655 unsigned ofs;
656 unsigned len;
657 DWORD nBytesRead;
658 TCHAR path[_MAX_PATH + 0x10]; /* buffer for path + ext */
659 len = (unsigned)GetModuleFileName(NULL /* hModule */, path,
660 _MAX_PATH + 1);
661 /* If GetModuleFileName() has failed then len is 0. */
662 if (len > 4 && path[len - 4] == (TCHAR)'.') {
663 len -= 4; /* strip executable file extension */
665 BCOPY(TEXT(".gc.env"), &path[len], sizeof(TEXT(".gc.env")));
666 hFile = CreateFile(path, GENERIC_READ,
667 FILE_SHARE_READ | FILE_SHARE_WRITE,
668 NULL /* lpSecurityAttributes */, OPEN_EXISTING,
669 FILE_ATTRIBUTE_NORMAL, NULL /* hTemplateFile */);
670 if (hFile == INVALID_HANDLE_VALUE)
671 return; /* the file is absent or the operation is failed */
672 len = (unsigned)GetFileSize(hFile, NULL);
673 if (len <= 1 || len >= GC_ENVFILE_MAXLEN) {
674 CloseHandle(hFile);
675 return; /* invalid file length - ignoring the file content */
677 /* At this execution point, GC_setpagesize() and GC_init_win32() */
678 /* must already be called (for GET_MEM() to work correctly). */
679 content = (char *)GET_MEM(len + 1);
680 if (content == NULL) {
681 CloseHandle(hFile);
682 return; /* allocation failure */
684 ofs = 0;
685 nBytesRead = (DWORD)-1L;
686 /* Last ReadFile() call should clear nBytesRead on success. */
687 while (ReadFile(hFile, content + ofs, len - ofs + 1, &nBytesRead,
688 NULL /* lpOverlapped */) && nBytesRead != 0) {
689 if ((ofs += nBytesRead) > len)
690 break;
692 CloseHandle(hFile);
693 if (ofs != len || nBytesRead != 0)
694 return; /* read operation is failed - ignoring the file content */
695 content[ofs] = '\0';
696 while (ofs-- > 0) {
697 if (content[ofs] == '\r' || content[ofs] == '\n')
698 content[ofs] = '\0';
700 GC_envfile_length = len + 1;
701 GC_envfile_content = content;
702 # endif
705 /* This routine scans GC_envfile_content for the specified */
706 /* environment variable (and returns its value if found). */
707 GC_INNER char * GC_envfile_getenv(const char *name)
709 char *p;
710 char *end_of_content;
711 unsigned namelen;
712 # ifndef NO_GETENV
713 p = getenv(name); /* try the standard getenv() first */
714 if (p != NULL)
715 return *p != '\0' ? p : NULL;
716 # endif
717 p = GC_envfile_content;
718 if (p == NULL)
719 return NULL; /* "env" file is absent (or empty) */
720 namelen = strlen(name);
721 if (namelen == 0) /* a sanity check */
722 return NULL;
723 for (end_of_content = p + GC_envfile_length;
724 p != end_of_content; p += strlen(p) + 1) {
725 if (strncmp(p, name, namelen) == 0 && *(p += namelen) == '=') {
726 p++; /* the match is found; skip '=' */
727 return *p != '\0' ? p : NULL;
729 /* If not matching then skip to the next line. */
731 return NULL; /* no match found */
733 #endif /* GC_READ_ENV_FILE */
735 GC_INNER GC_bool GC_is_initialized = FALSE;
737 #if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
738 GC_INNER CRITICAL_SECTION GC_write_cs;
739 #endif
741 STATIC void GC_exit_check(void)
743 if (GC_find_leak) {
744 GC_gcollect();
748 #if defined(UNIX_LIKE) && !defined(NO_DEBUGGING)
749 static void looping_handler(int sig)
751 GC_err_printf("Caught signal %d: looping in handler\n", sig);
752 for (;;) {
753 /* empty */
757 static GC_bool installed_looping_handler = FALSE;
759 static void maybe_install_looping_handler(void)
761 /* Install looping handler before the write fault handler, so we */
762 /* handle write faults correctly. */
763 if (!installed_looping_handler && 0 != GETENV("GC_LOOP_ON_ABORT")) {
764 GC_set_and_save_fault_handler(looping_handler);
765 installed_looping_handler = TRUE;
769 #else /* !UNIX_LIKE */
770 # define maybe_install_looping_handler()
771 #endif
773 #define GC_DEFAULT_STDOUT_FD 1
774 #define GC_DEFAULT_STDERR_FD 2
776 #if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE)
777 STATIC int GC_stdout = GC_DEFAULT_STDOUT_FD;
778 STATIC int GC_stderr = GC_DEFAULT_STDERR_FD;
779 STATIC int GC_log = GC_DEFAULT_STDERR_FD;
780 #endif
782 STATIC word GC_parse_mem_size_arg(const char *str)
784 char *endptr;
785 word result = 0; /* bad value */
786 char ch;
788 if (*str != '\0') {
789 result = (word)STRTOULL(str, &endptr, 10);
790 ch = *endptr;
791 if (ch != '\0') {
792 if (*(endptr + 1) != '\0')
793 return 0;
794 /* Allow k, M or G suffix. */
795 switch (ch) {
796 case 'K':
797 case 'k':
798 result <<= 10;
799 break;
800 case 'M':
801 case 'm':
802 result <<= 20;
803 break;
804 case 'G':
805 case 'g':
806 result <<= 30;
807 break;
808 default:
809 result = 0;
813 return result;
816 #define GC_LOG_STD_NAME "gc.log"
818 GC_API void GC_CALL GC_init(void)
820 /* LOCK(); -- no longer does anything this early. */
821 word initial_heap_sz;
822 IF_CANCEL(int cancel_state;)
824 if (EXPECT(GC_is_initialized, TRUE)) return;
825 # ifdef REDIRECT_MALLOC
827 static GC_bool init_started = FALSE;
828 if (init_started)
829 ABORT("Redirected malloc() called during GC init");
830 init_started = TRUE;
832 # endif
834 # ifdef GC_INITIAL_HEAP_SIZE
835 initial_heap_sz = divHBLKSZ(GC_INITIAL_HEAP_SIZE);
836 # else
837 initial_heap_sz = (word)MINHINCR;
838 # endif
839 DISABLE_CANCEL(cancel_state);
840 /* Note that although we are nominally called with the */
841 /* allocation lock held, the allocation lock is now */
842 /* only really acquired once a second thread is forked.*/
843 /* And the initialization code needs to run before */
844 /* then. Thus we really don't hold any locks, and can */
845 /* in fact safely initialize them here. */
846 # ifdef THREADS
847 GC_ASSERT(!GC_need_to_lock);
848 # ifdef SN_TARGET_PS3
850 pthread_mutexattr_t mattr;
851 pthread_mutexattr_init(&mattr);
852 pthread_mutex_init(&GC_allocate_ml, &mattr);
853 pthread_mutexattr_destroy(&mattr);
855 # endif
856 # endif /* THREADS */
857 # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
859 # ifndef MSWINCE
860 BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL;
861 HMODULE hK32 = GetModuleHandle(TEXT("kernel32.dll"));
862 if (hK32)
863 pfn = (BOOL (WINAPI *) (LPCRITICAL_SECTION, DWORD))
864 GetProcAddress (hK32,
865 "InitializeCriticalSectionAndSpinCount");
866 if (pfn)
867 pfn(&GC_allocate_ml, 4000);
868 else
869 # endif /* !MSWINCE */
870 /* else */ InitializeCriticalSection (&GC_allocate_ml);
872 # endif /* GC_WIN32_THREADS */
873 # if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
874 InitializeCriticalSection(&GC_write_cs);
875 # endif
876 GC_setpagesize();
877 # ifdef MSWIN32
878 GC_init_win32();
879 # endif
880 # ifdef GC_READ_ENV_FILE
881 GC_envfile_init();
882 # endif
883 # ifndef SMALL_CONFIG
884 # ifdef GC_PRINT_VERBOSE_STATS
885 /* This is useful for debugging and profiling on platforms with */
886 /* missing getenv() (like WinCE). */
887 GC_real_print_stats = VERBOSE;
888 # else
889 if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
890 GC_real_print_stats = VERBOSE;
891 } else if (0 != GETENV("GC_PRINT_STATS")) {
892 GC_real_print_stats = 1;
894 # endif
895 # if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(SYMBIAN)
897 char * file_name = GETENV("GC_LOG_FILE");
898 # ifdef GC_LOG_TO_FILE_ALWAYS
899 if (NULL == file_name)
900 file_name = GC_LOG_STD_NAME;
901 # else
902 if (0 != file_name)
903 # endif
905 int log_d = open(file_name, O_CREAT|O_WRONLY|O_APPEND, 0666);
906 if (log_d < 0) {
907 GC_err_printf("Failed to open %s as log file\n", file_name);
908 } else {
909 char *str;
910 GC_log = log_d;
911 str = GETENV("GC_ONLY_LOG_TO_FILE");
912 # ifdef GC_ONLY_LOG_TO_FILE
913 /* The similar environment variable set to "0" */
914 /* overrides the effect of the macro defined. */
915 if (str != NULL && *str == '0' && *(str + 1) == '\0')
916 # else
917 /* Otherwise setting the environment variable */
918 /* to anything other than "0" will prevent from */
919 /* redirecting stdout/err to the log file. */
920 if (str == NULL || (*str == '0' && *(str + 1) == '\0'))
921 # endif
923 GC_stdout = log_d;
924 GC_stderr = log_d;
929 # endif
930 # endif /* !SMALL_CONFIG */
931 # ifndef NO_DEBUGGING
932 if (0 != GETENV("GC_DUMP_REGULARLY")) {
933 GC_dump_regularly = TRUE;
935 # endif
936 # ifdef KEEP_BACK_PTRS
938 char * backtraces_string = GETENV("GC_BACKTRACES");
939 if (0 != backtraces_string) {
940 GC_backtraces = atol(backtraces_string);
941 if (backtraces_string[0] == '\0') GC_backtraces = 1;
944 # endif
945 if (0 != GETENV("GC_FIND_LEAK")) {
946 GC_find_leak = 1;
948 # ifndef SHORT_DBG_HDRS
949 if (0 != GETENV("GC_FINDLEAK_DELAY_FREE")) {
950 GC_findleak_delay_free = TRUE;
952 # endif
953 if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
954 GC_all_interior_pointers = 1;
956 if (0 != GETENV("GC_DONT_GC")) {
957 GC_dont_gc = 1;
959 if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) {
960 GC_print_back_height = TRUE;
962 if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
963 GC_large_alloc_warn_interval = LONG_MAX;
966 char * addr_string = GETENV("GC_TRACE");
967 if (0 != addr_string) {
968 # ifndef ENABLE_TRACE
969 WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0);
970 # else
971 word addr = (word)STRTOULL(addr_string, NULL, 16);
972 if (addr < 0x1000)
973 WARN("Unlikely trace address: %p\n", addr);
974 GC_trace_addr = (ptr_t)addr;
975 # endif
978 # ifdef GC_COLLECT_AT_MALLOC
980 char * string = GETENV("GC_COLLECT_AT_MALLOC");
981 if (0 != string) {
982 size_t min_lb = (size_t)STRTOULL(string, NULL, 10);
983 if (min_lb > 0)
984 GC_dbg_collect_at_malloc_min_lb = min_lb;
987 # endif
988 # ifndef GC_DISABLE_INCREMENTAL
990 char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
991 if (0 != time_limit_string) {
992 long time_limit = atol(time_limit_string);
993 if (time_limit < 5) {
994 WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
995 "or bad syntax: Ignoring\n", 0);
996 } else {
997 GC_time_limit = time_limit;
1001 # endif
1002 # ifndef SMALL_CONFIG
1004 char * full_freq_string = GETENV("GC_FULL_FREQUENCY");
1005 if (full_freq_string != NULL) {
1006 int full_freq = atoi(full_freq_string);
1007 if (full_freq > 0)
1008 GC_full_freq = full_freq;
1011 # endif
1013 char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL");
1014 if (0 != interval_string) {
1015 long interval = atol(interval_string);
1016 if (interval <= 0) {
1017 WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has "
1018 "bad value: Ignoring\n", 0);
1019 } else {
1020 GC_large_alloc_warn_interval = interval;
1025 char * space_divisor_string = GETENV("GC_FREE_SPACE_DIVISOR");
1026 if (space_divisor_string != NULL) {
1027 int space_divisor = atoi(space_divisor_string);
1028 if (space_divisor > 0)
1029 GC_free_space_divisor = (GC_word)space_divisor;
1032 # ifdef USE_MUNMAP
1034 char * string = GETENV("GC_UNMAP_THRESHOLD");
1035 if (string != NULL) {
1036 if (*string == '0' && *(string + 1) == '\0') {
1037 /* "0" is used to disable unmapping. */
1038 GC_unmap_threshold = 0;
1039 } else {
1040 int unmap_threshold = atoi(string);
1041 if (unmap_threshold > 0)
1042 GC_unmap_threshold = unmap_threshold;
1047 char * string = GETENV("GC_FORCE_UNMAP_ON_GCOLLECT");
1048 if (string != NULL) {
1049 if (*string == '0' && *(string + 1) == '\0') {
1050 /* "0" is used to turn off the mode. */
1051 GC_force_unmap_on_gcollect = FALSE;
1052 } else {
1053 GC_force_unmap_on_gcollect = TRUE;
1058 char * string = GETENV("GC_USE_ENTIRE_HEAP");
1059 if (string != NULL) {
1060 if (*string == '0' && *(string + 1) == '\0') {
1061 /* "0" is used to turn off the mode. */
1062 GC_use_entire_heap = FALSE;
1063 } else {
1064 GC_use_entire_heap = TRUE;
1068 # endif
1069 maybe_install_looping_handler();
1070 /* Adjust normal object descriptor for extra allocation. */
1071 if (ALIGNMENT > GC_DS_TAGS && EXTRA_BYTES != 0) {
1072 GC_obj_kinds[NORMAL].ok_descriptor = ((word)(-ALIGNMENT) | GC_DS_LENGTH);
1074 GC_exclude_static_roots_inner(beginGC_arrays, endGC_arrays);
1075 GC_exclude_static_roots_inner(beginGC_obj_kinds, endGC_obj_kinds);
1076 # ifdef SEPARATE_GLOBALS
1077 GC_exclude_static_roots_inner(beginGC_objfreelist, endGC_objfreelist);
1078 GC_exclude_static_roots_inner(beginGC_aobjfreelist, endGC_aobjfreelist);
1079 # endif
1080 # if defined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS)
1081 WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0);
1082 /* If thread stacks are cached, they tend to be scanned in */
1083 /* entirety as part of the root set. This wil grow them to */
1084 /* maximum size, and is generally not desirable. */
1085 # endif
1086 # if defined(SEARCH_FOR_DATA_START)
1087 GC_init_linux_data_start();
1088 # endif
1089 # if defined(NETBSD) && defined(__ELF__)
1090 GC_init_netbsd_elf();
1091 # endif
1092 # if !defined(THREADS) || defined(GC_PTHREADS) \
1093 || defined(GC_WIN32_THREADS) || defined(GC_SOLARIS_THREADS)
1094 if (GC_stackbottom == 0) {
1095 GC_stackbottom = GC_get_main_stack_base();
1096 # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
1097 GC_register_stackbottom = GC_get_register_stack_base();
1098 # endif
1099 } else {
1100 # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
1101 if (GC_register_stackbottom == 0) {
1102 WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0);
1103 /* The following may fail, since we may rely on */
1104 /* alignment properties that may not hold with a user set */
1105 /* GC_stackbottom. */
1106 GC_register_stackbottom = GC_get_register_stack_base();
1108 # endif
1110 # endif
1111 GC_STATIC_ASSERT(sizeof (ptr_t) == sizeof(word));
1112 GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
1113 GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
1114 # ifndef THREADS
1115 GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp()));
1116 # endif
1117 # if !defined(_AUX_SOURCE) || defined(__GNUC__)
1118 GC_STATIC_ASSERT((word)(-1) > (word)0);
1119 /* word should be unsigned */
1120 # endif
1121 /* We no longer check for ((void*)(-1) > NULL) since all pointers */
1122 /* are explicitly cast to word in every less-greater comparison. */
1123 GC_STATIC_ASSERT((signed_word)(-1) < (signed_word)0);
1124 # ifndef GC_DISABLE_INCREMENTAL
1125 if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
1126 /* For GWW_VDB on Win32, this needs to happen before any */
1127 /* heap memory is allocated. */
1128 GC_dirty_init();
1129 GC_ASSERT(GC_bytes_allocd == 0);
1130 GC_incremental = TRUE;
1132 # endif
1134 /* Add initial guess of root sets. Do this first, since sbrk(0) */
1135 /* might be used. */
1136 if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments();
1137 GC_init_headers();
1138 GC_bl_init();
1139 GC_mark_init();
1141 char * sz_str = GETENV("GC_INITIAL_HEAP_SIZE");
1142 if (sz_str != NULL) {
1143 initial_heap_sz = GC_parse_mem_size_arg(sz_str);
1144 if (initial_heap_sz <= MINHINCR * HBLKSIZE) {
1145 WARN("Bad initial heap size %s - ignoring it.\n", sz_str);
1147 initial_heap_sz = divHBLKSZ(initial_heap_sz);
1151 char * sz_str = GETENV("GC_MAXIMUM_HEAP_SIZE");
1152 if (sz_str != NULL) {
1153 word max_heap_sz = GC_parse_mem_size_arg(sz_str);
1154 if (max_heap_sz < initial_heap_sz * HBLKSIZE) {
1155 WARN("Bad maximum heap size %s - ignoring it.\n", sz_str);
1157 if (0 == GC_max_retries) GC_max_retries = 2;
1158 GC_set_max_heap_size(max_heap_sz);
1161 if (!GC_expand_hp_inner(initial_heap_sz)) {
1162 GC_err_printf("Can't start up: not enough memory\n");
1163 EXIT();
1164 } else {
1165 GC_requested_heapsize += initial_heap_sz;
1167 if (GC_all_interior_pointers)
1168 GC_initialize_offsets();
1169 GC_register_displacement_inner(0L);
1170 # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
1171 if (!GC_all_interior_pointers) {
1172 /* TLS ABI uses pointer-sized offsets for dtv. */
1173 GC_register_displacement_inner(sizeof(void *));
1175 # endif
1176 GC_init_size_map();
1177 # ifdef PCR
1178 if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
1179 != PCR_ERes_okay) {
1180 ABORT("Can't lock load state");
1181 } else if (PCR_IL_Unlock() != PCR_ERes_okay) {
1182 ABORT("Can't unlock load state");
1184 PCR_IL_Unlock();
1185 GC_pcr_install();
1186 # endif
1187 GC_is_initialized = TRUE;
1188 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
1189 GC_thr_init();
1190 # endif
1191 COND_DUMP;
1192 /* Get black list set up and/or incremental GC started */
1193 if (!GC_dont_precollect || GC_incremental) GC_gcollect_inner();
1194 # ifdef STUBBORN_ALLOC
1195 GC_stubborn_init();
1196 # endif
1197 if (GC_find_leak) {
1198 /* This is to give us at least one chance to detect leaks. */
1199 /* This may report some very benign leaks, but ... */
1200 atexit(GC_exit_check);
1203 /* The rest of this again assumes we don't really hold */
1204 /* the allocation lock. */
1205 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1206 /* Make sure marker threads are started and thread local */
1207 /* allocation is initialized, in case we didn't get */
1208 /* called from GC_init_parallel. */
1209 GC_init_parallel();
1210 # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1212 # if defined(DYNAMIC_LOADING) && defined(DARWIN)
1213 /* This must be called WITHOUT the allocation lock held */
1214 /* and before any threads are created. */
1215 GC_init_dyld();
1216 # endif
1217 RESTORE_CANCEL(cancel_state);
1220 GC_API void GC_CALL GC_enable_incremental(void)
1222 # if !defined(GC_DISABLE_INCREMENTAL) && !defined(KEEP_BACK_PTRS)
1223 DCL_LOCK_STATE;
1224 /* If we are keeping back pointers, the GC itself dirties all */
1225 /* pages on which objects have been marked, making */
1226 /* incremental GC pointless. */
1227 if (!GC_find_leak && 0 == GETENV("GC_DISABLE_INCREMENTAL")) {
1228 LOCK();
1229 if (!GC_incremental) {
1230 GC_setpagesize();
1231 /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
1232 maybe_install_looping_handler(); /* Before write fault handler! */
1233 GC_incremental = TRUE;
1234 if (!GC_is_initialized) {
1235 GC_init();
1236 } else {
1237 GC_dirty_init();
1239 if (GC_dirty_maintained && !GC_dont_gc) {
1240 /* Can't easily do it if GC_dont_gc. */
1241 if (GC_bytes_allocd > 0) {
1242 /* There may be unmarked reachable objects. */
1243 GC_gcollect_inner();
1245 /* else we're OK in assuming everything's */
1246 /* clean since nothing can point to an */
1247 /* unmarked object. */
1248 GC_read_dirty();
1251 UNLOCK();
1252 return;
1254 # endif
1255 GC_init();
1258 #if defined(THREADS) && (!defined(PARALLEL_MARK) || !defined(CAN_HANDLE_FORK))
1259 GC_API void GC_CALL GC_start_mark_threads(void)
1261 /* No action since parallel markers are disabled (or no POSIX fork). */
1262 GC_ASSERT(I_DONT_HOLD_LOCK());
1264 #endif
1266 #if defined(MSWIN32) || defined(MSWINCE)
1268 # if defined(_MSC_VER) && defined(_DEBUG) && !defined(MSWINCE)
1269 # include <crtdbg.h>
1270 # endif
1272 STATIC HANDLE GC_log = 0;
1274 void GC_deinit(void)
1276 # ifdef THREADS
1277 if (GC_is_initialized) {
1278 DeleteCriticalSection(&GC_write_cs);
1280 # endif
1283 # ifdef THREADS
1284 # ifdef PARALLEL_MARK
1285 # define IF_NEED_TO_LOCK(x) if (GC_parallel || GC_need_to_lock) x
1286 # else
1287 # define IF_NEED_TO_LOCK(x) if (GC_need_to_lock) x
1288 # endif
1289 # else
1290 # define IF_NEED_TO_LOCK(x)
1291 # endif /* !THREADS */
1293 STATIC HANDLE GC_CreateLogFile(void)
1295 HANDLE hFile;
1296 TCHAR *logPath;
1297 BOOL appendToFile = FALSE;
1298 # if !defined(NO_GETENV_WIN32) || !defined(OLD_WIN32_LOG_FILE)
1299 TCHAR pathBuf[_MAX_PATH + 0x10]; /* buffer for path + ext */
1301 logPath = pathBuf;
1302 # endif
1304 /* Use GetEnvironmentVariable instead of GETENV() for unicode support. */
1305 # ifndef NO_GETENV_WIN32
1306 if (GetEnvironmentVariable(TEXT("GC_LOG_FILE"), pathBuf,
1307 _MAX_PATH + 1) - 1U < (DWORD)_MAX_PATH) {
1308 appendToFile = TRUE;
1309 } else
1310 # endif
1311 /* else */ {
1312 /* Env var not found or its value too long. */
1313 # ifdef OLD_WIN32_LOG_FILE
1314 logPath = TEXT(GC_LOG_STD_NAME);
1315 # else
1316 int len = (int)GetModuleFileName(NULL /* hModule */, pathBuf,
1317 _MAX_PATH + 1);
1318 /* If GetModuleFileName() has failed then len is 0. */
1319 if (len > 4 && pathBuf[len - 4] == (TCHAR)'.') {
1320 len -= 4; /* strip executable file extension */
1322 BCOPY(TEXT("." GC_LOG_STD_NAME), &pathBuf[len],
1323 sizeof(TEXT("." GC_LOG_STD_NAME)));
1324 # endif
1327 hFile = CreateFile(logPath, GENERIC_WRITE, FILE_SHARE_READ,
1328 NULL /* lpSecurityAttributes */,
1329 appendToFile ? OPEN_ALWAYS : CREATE_ALWAYS,
1330 GC_print_stats == VERBOSE ? FILE_ATTRIBUTE_NORMAL :
1331 /* immediately flush writes unless very verbose */
1332 FILE_ATTRIBUTE_NORMAL | FILE_FLAG_WRITE_THROUGH,
1333 NULL /* hTemplateFile */);
1334 # ifndef NO_GETENV_WIN32
1335 if (appendToFile && hFile != INVALID_HANDLE_VALUE) {
1336 LONG posHigh = 0;
1337 (void)SetFilePointer(hFile, 0, &posHigh, FILE_END);
1338 /* Seek to file end (ignoring any error) */
1340 # endif
1341 return hFile;
1344 STATIC int GC_write(const char *buf, size_t len)
1346 BOOL tmp;
1347 DWORD written;
1348 # if (defined(THREADS) && defined(GC_ASSERTIONS)) \
1349 || !defined(GC_PRINT_VERBOSE_STATS)
1350 static GC_bool inside_write = FALSE;
1351 /* to prevent infinite recursion at abort. */
1352 if (inside_write)
1353 return -1;
1354 # endif
1356 if (len == 0)
1357 return 0;
1358 IF_NEED_TO_LOCK(EnterCriticalSection(&GC_write_cs));
1359 # if defined(THREADS) && defined(GC_ASSERTIONS)
1360 if (GC_write_disabled) {
1361 inside_write = TRUE;
1362 ABORT("Assertion failure: GC_write called with write_disabled");
1364 # endif
1365 if (GC_log == INVALID_HANDLE_VALUE) {
1366 IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
1367 return -1;
1368 } else if (GC_log == 0) {
1369 GC_log = GC_CreateLogFile();
1370 /* Ignore open log failure if the collector is built with */
1371 /* print_stats always set on. */
1372 # ifndef GC_PRINT_VERBOSE_STATS
1373 if (GC_log == INVALID_HANDLE_VALUE) {
1374 inside_write = TRUE;
1375 ABORT("Open of log file failed");
1377 # endif
1379 tmp = WriteFile(GC_log, buf, (DWORD)len, &written, NULL);
1380 if (!tmp)
1381 DebugBreak();
1382 # if defined(_MSC_VER) && defined(_DEBUG)
1383 # ifdef MSWINCE
1384 /* There is no CrtDbgReport() in WinCE */
1386 WCHAR wbuf[1024];
1387 /* Always use Unicode variant of OutputDebugString() */
1388 wbuf[MultiByteToWideChar(CP_ACP, 0 /* dwFlags */,
1389 buf, len, wbuf,
1390 sizeof(wbuf) / sizeof(wbuf[0]) - 1)] = 0;
1391 OutputDebugStringW(wbuf);
1393 # else
1394 _CrtDbgReport(_CRT_WARN, NULL, 0, NULL, "%.*s", len, buf);
1395 # endif
1396 # endif
1397 IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
1398 return tmp ? (int)written : -1;
1401 /* FIXME: This is pretty ugly ... */
1402 # define WRITE(f, buf, len) GC_write(buf, len)
1404 #elif defined(OS2) || defined(MACOS)
1405 STATIC FILE * GC_stdout = NULL;
1406 STATIC FILE * GC_stderr = NULL;
1407 STATIC FILE * GC_log = NULL;
1409 /* Initialize GC_log (and the friends) passed to GC_write(). */
1410 STATIC void GC_set_files(void)
1412 if (GC_stdout == NULL) {
1413 GC_stdout = stdout;
1415 if (GC_stderr == NULL) {
1416 GC_stderr = stderr;
1418 if (GC_log == NULL) {
1419 GC_log = stderr;
1423 GC_INLINE int GC_write(FILE *f, const char *buf, size_t len)
1425 int res = fwrite(buf, 1, len, f);
1426 fflush(f);
1427 return res;
1430 # define WRITE(f, buf, len) (GC_set_files(), GC_write(f, buf, len))
1432 #else
1433 # if !defined(AMIGA) && !defined(__CC_ARM)
1434 # include <unistd.h>
1435 # endif
1437 STATIC int GC_write(int fd, const char *buf, size_t len)
1439 # if defined(ECOS) || defined(NOSYS)
1440 # ifdef ECOS
1441 /* FIXME: This seems to be defined nowhere at present. */
1442 /* _Jv_diag_write(buf, len); */
1443 # else
1444 /* No writing. */
1445 # endif
1446 return len;
1447 # else
1448 int bytes_written = 0;
1449 int result;
1450 IF_CANCEL(int cancel_state;)
1452 DISABLE_CANCEL(cancel_state);
1453 while ((size_t)bytes_written < len) {
1454 # ifdef GC_SOLARIS_THREADS
1455 result = syscall(SYS_write, fd, buf + bytes_written,
1456 len - bytes_written);
1457 # else
1458 result = write(fd, buf + bytes_written, len - bytes_written);
1459 # endif
1460 if (-1 == result) {
1461 RESTORE_CANCEL(cancel_state);
1462 return(result);
1464 bytes_written += result;
1466 RESTORE_CANCEL(cancel_state);
1467 return(bytes_written);
1468 # endif
1471 # define WRITE(f, buf, len) GC_write(f, buf, len)
1472 #endif /* !MSWIN32 && !OS2 && !MACOS */
1474 #ifdef GC_ANDROID_LOG
1475 # include <android/log.h>
1477 # ifndef GC_ANDROID_LOG_TAG
1478 # define GC_ANDROID_LOG_TAG "BDWGC"
1479 # endif
1480 #endif
1482 #define BUFSZ 1024
1484 #ifdef NO_VSNPRINTF
1485 /* In case this function is missing (eg., in DJGPP v2.0.3). */
1486 # define vsnprintf(buf, bufsz, format, args) vsprintf(buf, format, args)
1487 #elif defined(_MSC_VER)
1488 # ifdef MSWINCE
1489 /* _vsnprintf is deprecated in WinCE */
1490 # define vsnprintf StringCchVPrintfA
1491 # else
1492 # define vsnprintf _vsnprintf
1493 # endif
1494 #endif
1496 /* A version of printf that is unlikely to call malloc, and is thus safer */
1497 /* to call from the collector in case malloc has been bound to GC_malloc. */
1498 /* Floating point arguments and formats should be avoided, since FP */
1499 /* conversion is more likely to allocate memory. */
1500 /* Assumes that no more than BUFSZ-1 characters are written at once. */
1501 #define GC_PRINTF_FILLBUF(buf, format) { \
1502 va_list args; \
1503 va_start(args, format); \
1504 (buf)[sizeof(buf) - 1] = 0x15; /* guard */ \
1505 (void)vsnprintf(buf, sizeof(buf) - 1, format, args); \
1506 va_end(args); \
1507 if ((buf)[sizeof(buf) - 1] != 0x15) \
1508 ABORT("GC_printf clobbered stack"); \
1511 void GC_printf(const char *format, ...)
1513 char buf[BUFSZ + 1];
1515 # ifdef GC_ANDROID_LOG
1516 GC_PRINTF_FILLBUF(buf, format);
1517 __android_log_write(ANDROID_LOG_DEBUG, GC_ANDROID_LOG_TAG, buf);
1518 if (GC_stdout == GC_DEFAULT_STDOUT_FD)
1519 return; /* skip duplicate write to stdout */
1520 # endif
1521 if (!GC_quiet) {
1522 # ifndef GC_ANDROID_LOG
1523 GC_PRINTF_FILLBUF(buf, format);
1524 # endif
1525 if (WRITE(GC_stdout, buf, strlen(buf)) < 0)
1526 ABORT("write to stdout failed");
1530 void GC_err_printf(const char *format, ...)
1532 char buf[BUFSZ + 1];
1534 GC_PRINTF_FILLBUF(buf, format);
1535 GC_err_puts(buf);
1538 #ifndef GC_ANDROID_LOG
1540 void GC_log_printf(const char *format, ...)
1542 char buf[BUFSZ + 1];
1544 GC_PRINTF_FILLBUF(buf, format);
1545 if (WRITE(GC_log, buf, strlen(buf)) < 0)
1546 ABORT("write to GC log failed");
1549 # define GC_warn_printf GC_err_printf
1551 #else
1553 # define GC_LOG_PRINTF_IMPL(loglevel, fileLogCond, format) \
1555 char buf[BUFSZ + 1]; \
1556 GC_PRINTF_FILLBUF(buf, format); \
1557 __android_log_write(loglevel, GC_ANDROID_LOG_TAG, buf); \
1558 if (GC_log != GC_DEFAULT_STDERR_FD && (fileLogCond) \
1559 && WRITE(GC_log, buf, strlen(buf)) < 0) \
1560 ABORT("write to GC log file failed"); \
1563 void GC_log_printf(const char *format, ...)
1565 GC_LOG_PRINTF_IMPL(ANDROID_LOG_DEBUG, TRUE, format);
1568 GC_INNER void GC_stats_log_printf(const char *format, ...)
1570 GC_LOG_PRINTF_IMPL(ANDROID_LOG_INFO, GC_real_print_stats != 0, format);
1573 GC_INNER void GC_verbose_log_printf(const char *format, ...)
1575 GC_LOG_PRINTF_IMPL(ANDROID_LOG_VERBOSE, GC_real_print_stats == VERBOSE,
1576 format);
1579 STATIC void GC_warn_printf(const char *format, ...)
1581 char buf[BUFSZ + 1];
1583 GC_PRINTF_FILLBUF(buf, format);
1584 __android_log_write(ANDROID_LOG_WARN, GC_ANDROID_LOG_TAG, buf);
1585 if (GC_real_print_stats && GC_stderr != GC_DEFAULT_STDERR_FD
1586 && WRITE(GC_stderr, buf, strlen(buf)) < 0)
1587 ABORT("write to stderr failed");
1590 #endif /* GC_ANDROID_LOG */
1592 void GC_err_puts(const char *s)
1594 # ifdef GC_ANDROID_LOG
1595 __android_log_write(ANDROID_LOG_ERROR, GC_ANDROID_LOG_TAG, s);
1596 if (GC_stderr == GC_DEFAULT_STDERR_FD)
1597 return; /* skip duplicate write to stderr */
1598 # endif
1599 if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
1602 STATIC void GC_CALLBACK GC_default_warn_proc(char *msg, GC_word arg)
1604 /* TODO: Add assertion on arg comply with msg (format). */
1605 GC_warn_printf(msg, arg);
1608 GC_INNER GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
1610 /* This is recommended for production code (release). */
1611 GC_API void GC_CALLBACK GC_ignore_warn_proc(char *msg, GC_word arg)
1613 if (GC_print_stats) {
1614 /* Don't ignore warnings if stats printing is on. */
1615 GC_default_warn_proc(msg, arg);
1619 GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc p)
1621 DCL_LOCK_STATE;
1622 GC_ASSERT(p != 0);
1623 # ifdef GC_WIN32_THREADS
1624 # ifdef CYGWIN32
1625 /* Need explicit GC_INIT call */
1626 GC_ASSERT(GC_is_initialized);
1627 # else
1628 if (!GC_is_initialized) GC_init();
1629 # endif
1630 # endif
1631 LOCK();
1632 GC_current_warn_proc = p;
1633 UNLOCK();
1636 GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void)
1638 GC_warn_proc result;
1639 DCL_LOCK_STATE;
1640 LOCK();
1641 result = GC_current_warn_proc;
1642 UNLOCK();
1643 return(result);
1646 #if !defined(PCR) && !defined(SMALL_CONFIG)
1647 /* Print (or display) a message before abnormal exit (including */
1648 /* abort). Invoked from ABORT(msg) macro (there msg is non-NULL) */
1649 /* and from EXIT() macro (msg is NULL in that case). */
1650 STATIC void GC_CALLBACK GC_default_on_abort(const char *msg)
1652 GC_find_leak = FALSE; /* disable at-exit GC_gcollect() */
1654 if (msg != NULL) {
1655 # if defined(MSWIN32)
1656 # ifndef DONT_USE_USER32_DLL
1657 /* Use static binding to "user32.dll". */
1658 (void)MessageBoxA(NULL, msg, "Fatal error in GC",
1659 MB_ICONERROR | MB_OK);
1660 # else
1661 /* This simplifies linking - resolve "MessageBoxA" at run-time. */
1662 HINSTANCE hU32 = LoadLibrary(TEXT("user32.dll"));
1663 if (hU32) {
1664 FARPROC pfn = GetProcAddress(hU32, "MessageBoxA");
1665 if (pfn)
1666 (void)(*(int (WINAPI *)(HWND, LPCSTR, LPCSTR, UINT))pfn)(
1667 NULL /* hWnd */, msg, "Fatal error in GC",
1668 MB_ICONERROR | MB_OK);
1669 (void)FreeLibrary(hU32);
1671 # endif
1672 /* Also duplicate msg to GC log file. */
1673 # endif
1674 /* Avoid calling GC_err_printf() here, as GC_on_abort() could be */
1675 /* called from it. Note 1: this is not an atomic output. */
1676 /* Note 2: possible write errors are ignored. */
1678 # if defined(THREADS) && defined(GC_ASSERTIONS) \
1679 && (defined(MSWIN32) || defined(MSWINCE))
1680 if (!GC_write_disabled)
1681 # endif
1683 if (WRITE(GC_stderr, (void *)msg, strlen(msg)) >= 0)
1684 (void)WRITE(GC_stderr, (void *)("\n"), 1);
1686 # ifdef GC_ANDROID_LOG
1687 __android_log_assert("*" /* cond */, GC_ANDROID_LOG_TAG, "%s\n", msg);
1688 # endif
1691 # if !defined(NO_DEBUGGING) && !defined(GC_ANDROID_LOG)
1692 if (GETENV("GC_LOOP_ON_ABORT") != NULL) {
1693 /* In many cases it's easier to debug a running process. */
1694 /* It's arguably nicer to sleep, but that makes it harder */
1695 /* to look at the thread if the debugger doesn't know much */
1696 /* about threads. */
1697 for(;;) {
1698 /* Empty */
1701 # endif
1704 GC_abort_func GC_on_abort = GC_default_on_abort;
1706 GC_API void GC_CALL GC_set_abort_func(GC_abort_func fn)
1708 DCL_LOCK_STATE;
1709 GC_ASSERT(fn != 0);
1710 LOCK();
1711 GC_on_abort = fn;
1712 UNLOCK();
1715 GC_API GC_abort_func GC_CALL GC_get_abort_func(void)
1717 GC_abort_func fn;
1718 DCL_LOCK_STATE;
1719 LOCK();
1720 fn = GC_on_abort;
1721 UNLOCK();
1722 return fn;
1724 #endif /* !SMALL_CONFIG */
1726 GC_API void GC_CALL GC_enable(void)
1728 DCL_LOCK_STATE;
1729 LOCK();
1730 GC_dont_gc--;
1731 UNLOCK();
1734 GC_API void GC_CALL GC_disable(void)
1736 DCL_LOCK_STATE;
1737 LOCK();
1738 GC_dont_gc++;
1739 UNLOCK();
1742 GC_API int GC_CALL GC_is_disabled(void)
1744 return GC_dont_gc != 0;
1747 /* Helper procedures for new kind creation. */
1748 GC_API void ** GC_CALL GC_new_free_list_inner(void)
1750 void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),
1751 PTRFREE);
1752 if (result == 0) ABORT("Failed to allocate freelist for new kind");
1753 BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t));
1754 return result;
1757 GC_API void ** GC_CALL GC_new_free_list(void)
1759 void *result;
1760 DCL_LOCK_STATE;
1761 LOCK();
1762 result = GC_new_free_list_inner();
1763 UNLOCK();
1764 return result;
1767 GC_API unsigned GC_CALL GC_new_kind_inner(void **fl, GC_word descr,
1768 int adjust, int clear)
1770 unsigned result = GC_n_kinds++;
1772 if (GC_n_kinds > MAXOBJKINDS) ABORT("Too many kinds");
1773 GC_obj_kinds[result].ok_freelist = fl;
1774 GC_obj_kinds[result].ok_reclaim_list = 0;
1775 GC_obj_kinds[result].ok_descriptor = descr;
1776 GC_obj_kinds[result].ok_relocate_descr = adjust;
1777 GC_obj_kinds[result].ok_init = clear;
1778 # ifdef ENABLE_DISCLAIM
1779 GC_obj_kinds[result].ok_mark_unconditionally = FALSE;
1780 GC_obj_kinds[result].ok_disclaim_proc = 0;
1781 # endif
1782 return result;
1785 GC_API unsigned GC_CALL GC_new_kind(void **fl, GC_word descr, int adjust,
1786 int clear)
1788 unsigned result;
1789 DCL_LOCK_STATE;
1790 LOCK();
1791 result = GC_new_kind_inner(fl, descr, adjust, clear);
1792 UNLOCK();
1793 return result;
1796 GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc proc)
1798 unsigned result = GC_n_mark_procs++;
1800 if (GC_n_mark_procs > MAX_MARK_PROCS) ABORT("Too many mark procedures");
1801 GC_mark_procs[result] = proc;
1802 return result;
1805 GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc proc)
1807 unsigned result;
1808 DCL_LOCK_STATE;
1809 LOCK();
1810 result = GC_new_proc_inner(proc);
1811 UNLOCK();
1812 return result;
1815 GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type fn, void *client_data)
1817 void * result;
1818 DCL_LOCK_STATE;
1820 # ifdef THREADS
1821 LOCK();
1822 /* FIXME - This looks wrong!! */
1823 SET_LOCK_HOLDER();
1824 # endif
1825 result = (*fn)(client_data);
1826 # ifdef THREADS
1827 # ifndef GC_ASSERTIONS
1828 UNSET_LOCK_HOLDER();
1829 # endif /* o.w. UNLOCK() does it implicitly */
1830 UNLOCK();
1831 # endif
1832 return(result);
1835 GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func fn, void *arg)
1837 struct GC_stack_base base;
1838 void *result;
1840 base.mem_base = (void *)&base;
1841 # ifdef IA64
1842 base.reg_base = (void *)GC_save_regs_in_stack();
1843 /* Unnecessarily flushes register stack, */
1844 /* but that probably doesn't hurt. */
1845 # endif
1846 result = fn(&base, arg);
1847 /* Strongly discourage the compiler from treating the above */
1848 /* as a tail call. */
1849 GC_noop1((word)(&base));
1850 return result;
1853 #ifndef THREADS
1855 GC_INNER ptr_t GC_blocked_sp = NULL;
1856 /* NULL value means we are not inside GC_do_blocking() call. */
1857 # ifdef IA64
1858 STATIC ptr_t GC_blocked_register_sp = NULL;
1859 # endif
1861 GC_INNER struct GC_traced_stack_sect_s *GC_traced_stack_sect = NULL;
1863 /* This is nearly the same as in win32_threads.c */
1864 GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
1865 void * client_data)
1867 struct GC_traced_stack_sect_s stacksect;
1868 GC_ASSERT(GC_is_initialized);
1870 /* Adjust our stack base value (this could happen if */
1871 /* GC_get_main_stack_base() is unimplemented or broken for */
1872 /* the platform). */
1873 if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect))
1874 GC_stackbottom = (ptr_t)(&stacksect);
1876 if (GC_blocked_sp == NULL) {
1877 /* We are not inside GC_do_blocking() - do nothing more. */
1878 return fn(client_data);
1881 /* Setup new "stack section". */
1882 stacksect.saved_stack_ptr = GC_blocked_sp;
1883 # ifdef IA64
1884 /* This is the same as in GC_call_with_stack_base(). */
1885 stacksect.backing_store_end = GC_save_regs_in_stack();
1886 /* Unnecessarily flushes register stack, */
1887 /* but that probably doesn't hurt. */
1888 stacksect.saved_backing_store_ptr = GC_blocked_register_sp;
1889 # endif
1890 stacksect.prev = GC_traced_stack_sect;
1891 GC_blocked_sp = NULL;
1892 GC_traced_stack_sect = &stacksect;
1894 client_data = fn(client_data);
1895 GC_ASSERT(GC_blocked_sp == NULL);
1896 GC_ASSERT(GC_traced_stack_sect == &stacksect);
1898 /* Restore original "stack section". */
1899 GC_traced_stack_sect = stacksect.prev;
1900 # ifdef IA64
1901 GC_blocked_register_sp = stacksect.saved_backing_store_ptr;
1902 # endif
1903 GC_blocked_sp = stacksect.saved_stack_ptr;
1905 return client_data; /* result */
1908 /* This is nearly the same as in win32_threads.c */
1909 STATIC void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED)
1911 struct blocking_data * d = (struct blocking_data *) data;
1912 GC_ASSERT(GC_is_initialized);
1913 GC_ASSERT(GC_blocked_sp == NULL);
1914 # ifdef SPARC
1915 GC_blocked_sp = GC_save_regs_in_stack();
1916 # else
1917 GC_blocked_sp = (ptr_t) &d; /* save approx. sp */
1918 # endif
1919 # ifdef IA64
1920 GC_blocked_register_sp = GC_save_regs_in_stack();
1921 # endif
1923 d -> client_data = (d -> fn)(d -> client_data);
1925 # ifdef SPARC
1926 GC_ASSERT(GC_blocked_sp != NULL);
1927 # else
1928 GC_ASSERT(GC_blocked_sp == (ptr_t) &d);
1929 # endif
1930 GC_blocked_sp = NULL;
1933 #endif /* !THREADS */
1935 /* Wrapper for functions that are likely to block (or, at least, do not */
1936 /* allocate garbage collected memory and/or manipulate pointers to the */
1937 /* garbage collected heap) for an appreciable length of time. */
1938 /* In the single threaded case, GC_do_blocking() (together */
1939 /* with GC_call_with_gc_active()) might be used to make stack scanning */
1940 /* more precise (i.e. scan only stack frames of functions that allocate */
1941 /* garbage collected memory and/or manipulate pointers to the garbage */
1942 /* collected heap). */
1943 GC_API void * GC_CALL GC_do_blocking(GC_fn_type fn, void * client_data)
1945 struct blocking_data my_data;
1947 my_data.fn = fn;
1948 my_data.client_data = client_data;
1949 GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data));
1950 return my_data.client_data; /* result */
1953 #if !defined(NO_DEBUGGING)
1954 GC_API void GC_CALL GC_dump(void)
1956 GC_printf("***Static roots:\n");
1957 GC_print_static_roots();
1958 GC_printf("\n***Heap sections:\n");
1959 GC_print_heap_sects();
1960 GC_printf("\n***Free blocks:\n");
1961 GC_print_hblkfreelist();
1962 GC_printf("\n***Blocks in use:\n");
1963 GC_print_block_list();
1965 #endif /* !NO_DEBUGGING */
1967 /* Getter functions for the public Read-only variables. */
1969 /* GC_get_gc_no() is unsynchronized and should be typically called */
1970 /* inside the context of GC_call_with_alloc_lock() to prevent data */
1971 /* races (on multiprocessors). */
1972 GC_API GC_word GC_CALL GC_get_gc_no(void)
1974 return GC_gc_no;
1977 #ifdef THREADS
1978 GC_API int GC_CALL GC_get_parallel(void)
1980 /* GC_parallel is initialized at start-up. */
1981 return GC_parallel;
1983 #endif
1985 /* Setter and getter functions for the public R/W function variables. */
1986 /* These functions are synchronized (like GC_set_warn_proc() and */
1987 /* GC_get_warn_proc()). */
1989 GC_API void GC_CALL GC_set_oom_fn(GC_oom_func fn)
1991 GC_ASSERT(fn != 0);
1992 DCL_LOCK_STATE;
1993 LOCK();
1994 GC_oom_fn = fn;
1995 UNLOCK();
1998 GC_API GC_oom_func GC_CALL GC_get_oom_fn(void)
2000 GC_oom_func fn;
2001 DCL_LOCK_STATE;
2002 LOCK();
2003 fn = GC_oom_fn;
2004 UNLOCK();
2005 return fn;
2008 GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc fn)
2010 /* fn may be 0 (means no event notifier). */
2011 DCL_LOCK_STATE;
2012 LOCK();
2013 GC_on_heap_resize = fn;
2014 UNLOCK();
2017 GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void)
2019 GC_on_heap_resize_proc fn;
2020 DCL_LOCK_STATE;
2021 LOCK();
2022 fn = GC_on_heap_resize;
2023 UNLOCK();
2024 return fn;
2027 GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc fn)
2029 /* fn may be 0 (means no finalizer notifier). */
2030 DCL_LOCK_STATE;
2031 LOCK();
2032 GC_finalizer_notifier = fn;
2033 UNLOCK();
2036 GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void)
2038 GC_finalizer_notifier_proc fn;
2039 DCL_LOCK_STATE;
2040 LOCK();
2041 fn = GC_finalizer_notifier;
2042 UNLOCK();
2043 return fn;
2046 /* Setter and getter functions for the public numeric R/W variables. */
2047 /* It is safe to call these functions even before GC_INIT(). */
2048 /* These functions are unsynchronized and should be typically called */
2049 /* inside the context of GC_call_with_alloc_lock() (if called after */
2050 /* GC_INIT()) to prevent data races (unless it is guaranteed the */
2051 /* collector is not multi-threaded at that execution point). */
2053 GC_API void GC_CALL GC_set_find_leak(int value)
2055 /* value is of boolean type. */
2056 GC_find_leak = value;
2059 GC_API int GC_CALL GC_get_find_leak(void)
2061 return GC_find_leak;
2064 GC_API void GC_CALL GC_set_all_interior_pointers(int value)
2066 DCL_LOCK_STATE;
2068 GC_all_interior_pointers = value ? 1 : 0;
2069 if (GC_is_initialized) {
2070 /* It is not recommended to change GC_all_interior_pointers value */
2071 /* after GC is initialized but it seems GC could work correctly */
2072 /* even after switching the mode. */
2073 LOCK();
2074 GC_initialize_offsets(); /* NOTE: this resets manual offsets as well */
2075 if (!GC_all_interior_pointers)
2076 GC_bl_init_no_interiors();
2077 UNLOCK();
2081 GC_API int GC_CALL GC_get_all_interior_pointers(void)
2083 return GC_all_interior_pointers;
2086 GC_API void GC_CALL GC_set_finalize_on_demand(int value)
2088 GC_ASSERT(value != -1);
2089 /* value is of boolean type. */
2090 GC_finalize_on_demand = value;
2093 GC_API int GC_CALL GC_get_finalize_on_demand(void)
2095 return GC_finalize_on_demand;
2098 GC_API void GC_CALL GC_set_java_finalization(int value)
2100 GC_ASSERT(value != -1);
2101 /* value is of boolean type. */
2102 GC_java_finalization = value;
2105 GC_API int GC_CALL GC_get_java_finalization(void)
2107 return GC_java_finalization;
2110 GC_API void GC_CALL GC_set_dont_expand(int value)
2112 GC_ASSERT(value != -1);
2113 /* value is of boolean type. */
2114 GC_dont_expand = value;
2117 GC_API int GC_CALL GC_get_dont_expand(void)
2119 return GC_dont_expand;
2122 GC_API void GC_CALL GC_set_no_dls(int value)
2124 GC_ASSERT(value != -1);
2125 /* value is of boolean type. */
2126 GC_no_dls = value;
2129 GC_API int GC_CALL GC_get_no_dls(void)
2131 return GC_no_dls;
2134 GC_API void GC_CALL GC_set_non_gc_bytes(GC_word value)
2136 GC_non_gc_bytes = value;
2139 GC_API GC_word GC_CALL GC_get_non_gc_bytes(void)
2141 return GC_non_gc_bytes;
2144 GC_API void GC_CALL GC_set_free_space_divisor(GC_word value)
2146 GC_ASSERT(value > 0);
2147 GC_free_space_divisor = value;
2150 GC_API GC_word GC_CALL GC_get_free_space_divisor(void)
2152 return GC_free_space_divisor;
2155 GC_API void GC_CALL GC_set_max_retries(GC_word value)
2157 GC_ASSERT(value != ~(GC_word)0);
2158 GC_max_retries = value;
2161 GC_API GC_word GC_CALL GC_get_max_retries(void)
2163 return GC_max_retries;
2166 GC_API void GC_CALL GC_set_dont_precollect(int value)
2168 GC_ASSERT(value != -1);
2169 /* value is of boolean type. */
2170 GC_dont_precollect = value;
2173 GC_API int GC_CALL GC_get_dont_precollect(void)
2175 return GC_dont_precollect;
2178 GC_API void GC_CALL GC_set_full_freq(int value)
2180 GC_ASSERT(value >= 0);
2181 GC_full_freq = value;
2184 GC_API int GC_CALL GC_get_full_freq(void)
2186 return GC_full_freq;
2189 GC_API void GC_CALL GC_set_time_limit(unsigned long value)
2191 GC_ASSERT(value != (unsigned long)-1L);
2192 GC_time_limit = value;
2195 GC_API unsigned long GC_CALL GC_get_time_limit(void)
2197 return GC_time_limit;
2200 GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int value)
2202 GC_force_unmap_on_gcollect = (GC_bool)value;
2205 GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void)
2207 return (int)GC_force_unmap_on_gcollect;