1999-12-16 Mark Mitchell <mark@codesourcery.com>
[official-gcc.git] / boehm-gc / gc_priv.h
blob23977d3b37eb9882de6a39e71a3928da933697fa
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
17 /* Boehm, February 16, 1996 2:30 pm PST */
20 # ifndef GC_PRIVATE_H
21 # define GC_PRIVATE_H
23 #if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
24 /* sony RISC NEWS, NEWSOS 4 */
25 # define BSD_TIME
26 /* typedef long ptrdiff_t; -- necessary on some really old systems */
27 #endif
29 #if defined(mips) && defined(SYSTYPE_BSD43)
30 /* MIPS RISCOS 4 */
31 # define BSD_TIME
32 #endif
34 #ifdef BSD_TIME
35 # include <sys/types.h>
36 # include <sys/time.h>
37 # include <sys/resource.h>
38 #endif /* BSD_TIME */
40 # ifndef GC_H
41 # include "gc.h"
42 # endif
44 typedef GC_word word;
45 typedef GC_signed_word signed_word;
47 # ifndef GCCONFIG_H
48 # include "gcconfig.h"
49 # endif
51 # ifndef HEADERS_H
52 # include "gc_hdrs.h"
53 # endif
55 typedef int GC_bool;
56 # define TRUE 1
57 # define FALSE 0
59 typedef char * ptr_t; /* A generic pointer to which we can add */
60 /* byte displacements. */
61 /* Preferably identical to caddr_t, if it */
62 /* exists. */
64 #if defined(__STDC__)
65 # include <stdlib.h>
66 # if !(defined( sony_news ) )
67 # include <stddef.h>
68 # endif
69 # define VOLATILE volatile
70 #else
71 # ifdef MSWIN32
72 # include <stdlib.h>
73 # endif
74 # define VOLATILE
75 #endif
77 #define CONST GC_CONST
79 #if 0 /* was once defined for AMIGA */
80 # define GC_FAR __far
81 #else
82 # define GC_FAR
83 #endif
85 /*********************************/
86 /* */
87 /* Definitions for conservative */
88 /* collector */
89 /* */
90 /*********************************/
92 /*********************************/
93 /* */
94 /* Easily changeable parameters */
95 /* */
96 /*********************************/
98 #define STUBBORN_ALLOC /* Define stubborn allocation primitives */
99 #if defined(SRC_M3) || defined(SMALL_CONFIG)
100 # undef STUBBORN_ALLOC
101 #endif
104 /* #define ALL_INTERIOR_POINTERS */
105 /* Forces all pointers into the interior of an */
106 /* object to be considered valid. Also causes the */
107 /* sizes of all objects to be inflated by at least */
108 /* one byte. This should suffice to guarantee */
109 /* that in the presence of a compiler that does */
110 /* not perform garbage-collector-unsafe */
111 /* optimizations, all portable, strictly ANSI */
112 /* conforming C programs should be safely usable */
113 /* with malloc replaced by GC_malloc and free */
114 /* calls removed. There are several disadvantages: */
115 /* 1. There are probably no interesting, portable, */
116 /* strictly ANSI conforming C programs. */
117 /* 2. This option makes it hard for the collector */
118 /* to allocate space that is not ``pointed to'' */
119 /* by integers, etc. Under SunOS 4.X with a */
120 /* statically linked libc, we empiricaly */
121 /* observed that it would be difficult to */
122 /* allocate individual objects larger than 100K. */
123 /* Even if only smaller objects are allocated, */
124 /* more swap space is likely to be needed. */
125 /* Fortunately, much of this will never be */
126 /* touched. */
127 /* If you can easily avoid using this option, do. */
128 /* If not, try to keep individual objects small. */
130 #define PRINTSTATS /* Print garbage collection statistics */
131 /* For less verbose output, undefine in reclaim.c */
133 #define PRINTTIMES /* Print the amount of time consumed by each garbage */
134 /* collection. */
136 #define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
137 /* whether the objects are atomic or composite, and */
138 /* whether or not the block was found to be empty */
139 /* during the reclaim phase. Typically generates */
140 /* about one screenful per garbage collection. */
141 #undef PRINTBLOCKS
143 #ifdef SILENT
144 # ifdef PRINTSTATS
145 # undef PRINTSTATS
146 # endif
147 # ifdef PRINTTIMES
148 # undef PRINTTIMES
149 # endif
150 # ifdef PRINTNBLOCKS
151 # undef PRINTNBLOCKS
152 # endif
153 #endif
155 #if defined(PRINTSTATS) && !defined(GATHERSTATS)
156 # define GATHERSTATS
157 #endif
159 #ifdef FINALIZE_ON_DEMAND
160 # define GC_INVOKE_FINALIZERS()
161 #else
162 # define GC_INVOKE_FINALIZERS() (void)GC_invoke_finalizers()
163 #endif
165 #define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
166 /* free lists are actually maintained. This applies */
167 /* only to the top level routines in misc.c, not to */
168 /* user generated code that calls GC_allocobj and */
169 /* GC_allocaobj directly. */
170 /* Slows down average programs slightly. May however */
171 /* substantially reduce fragmentation if allocation */
172 /* request sizes are widely scattered. */
173 /* May save significant amounts of space for obj_map */
174 /* entries. */
176 #ifndef OLD_BLOCK_ALLOC
177 /* Macros controlling large block allocation strategy. */
178 # define EXACT_FIRST /* Make a complete pass through the large object */
179 /* free list before splitting a block */
180 # define PRESERVE_LAST /* Do not divide last allocated heap segment */
181 /* unless we would otherwise need to expand the */
182 /* heap. */
183 #endif
185 /* ALIGN_DOUBLE requires MERGE_SIZES at present. */
186 # if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
187 # define MERGE_SIZES
188 # endif
190 #if defined(ALL_INTERIOR_POINTERS) && !defined(DONT_ADD_BYTE_AT_END)
191 # define ADD_BYTE_AT_END
192 #endif
195 # ifndef LARGE_CONFIG
196 # define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
197 /* Must be multiple of largest page size. */
198 # define MAXHINCR 512 /* Maximum heap increment, in blocks */
199 # else
200 # define MINHINCR 64
201 # define MAXHINCR 4096
202 # endif
204 # define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
205 /* this by much. In milliseconds. */
207 # define BL_LIMIT GC_black_list_spacing
208 /* If we need a block of N bytes, and we have */
209 /* a block of N + BL_LIMIT bytes available, */
210 /* and N > BL_LIMIT, */
211 /* but all possible positions in it are */
212 /* blacklisted, we just use it anyway (and */
213 /* print a warning, if warnings are enabled). */
214 /* This risks subsequently leaking the block */
215 /* due to a false reference. But not using */
216 /* the block risks unreasonable immediate */
217 /* heap growth. */
219 /*********************************/
220 /* */
221 /* Stack saving for debugging */
222 /* */
223 /*********************************/
225 #ifdef SAVE_CALL_CHAIN
228 * Number of frames and arguments to save in objects allocated by
229 * debugging allocator.
231 # define NFRAMES 6 /* Number of frames to save. Even for */
232 /* alignment reasons. */
233 # define NARGS 2 /* Mumber of arguments to save for each call. */
235 # define NEED_CALLINFO
237 /* Fill in the pc and argument information for up to NFRAMES of my */
238 /* callers. Ignore my frame and my callers frame. */
239 void GC_save_callers (/* struct callinfo info[NFRAMES] */);
241 void GC_print_callers (/* struct callinfo info[NFRAMES] */);
243 #else
245 # ifdef GC_ADD_CALLER
246 # define NFRAMES 1
247 # define NARGS 0
248 # define NEED_CALLINFO
249 # endif
251 #endif
253 #ifdef NEED_CALLINFO
254 struct callinfo {
255 word ci_pc;
256 # if NARGS > 0
257 word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
258 # endif
259 # if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
260 /* Likely alignment problem. */
261 word ci_dummy;
262 # endif
264 #endif
267 /*********************************/
268 /* */
269 /* OS interface routines */
270 /* */
271 /*********************************/
273 #ifdef BSD_TIME
274 # undef CLOCK_TYPE
275 # undef GET_TIME
276 # undef MS_TIME_DIFF
277 # define CLOCK_TYPE struct timeval
278 # define GET_TIME(x) { struct rusage rusage; \
279 getrusage (RUSAGE_SELF, &rusage); \
280 x = rusage.ru_utime; }
281 # define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
282 + (double) (a.tv_usec - b.tv_usec) / 1000.0)
283 #else /* !BSD_TIME */
284 # include <time.h>
285 # if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
286 clock_t clock(); /* Not in time.h, where it belongs */
287 # endif
288 # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
289 # include <machine/limits.h>
290 # define CLOCKS_PER_SEC CLK_TCK
291 # endif
292 # if !defined(CLOCKS_PER_SEC)
293 # define CLOCKS_PER_SEC 1000000
295 * This is technically a bug in the implementation. ANSI requires that
296 * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
297 * Also note that the combination of ANSI C and POSIX is incredibly gross
298 * here. The type clock_t is used by both clock() and times(). But on
299 * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
300 * seems to apply only to clock. Hence we use it here. On many machines,
301 * including SunOS, clock actually uses units of microseconds (which are
302 * not really clock ticks).
304 # endif
305 # define CLOCK_TYPE clock_t
306 # define GET_TIME(x) x = clock()
307 # define MS_TIME_DIFF(a,b) ((unsigned long) \
308 (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
309 #endif /* !BSD_TIME */
311 /* We use bzero and bcopy internally. They may not be available. */
312 # if defined(SPARC) && defined(SUNOS4)
313 # define BCOPY_EXISTS
314 # endif
315 # if defined(M68K) && defined(AMIGA)
316 # define BCOPY_EXISTS
317 # endif
318 # if defined(M68K) && defined(NEXT)
319 # define BCOPY_EXISTS
320 # endif
321 # if defined(VAX)
322 # define BCOPY_EXISTS
323 # endif
324 # if defined(AMIGA)
325 # include <string.h>
326 # define BCOPY_EXISTS
327 # endif
329 # ifndef BCOPY_EXISTS
330 # include <string.h>
331 # define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
332 # define BZERO(x,n) memset(x, 0, (size_t)(n))
333 # else
334 # define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
335 # define BZERO(x,n) bzero((char *)(x),(int)(n))
336 # endif
338 /* HBLKSIZE aligned allocation. 0 is taken to mean failure */
339 /* space is assumed to be cleared. */
340 /* In the case os USE_MMAP, the argument must also be a */
341 /* physical page size. */
342 /* GET_MEM is currently not assumed to retrieve 0 filled space, */
343 /* though we should perhaps take advantage of the case in which */
344 /* does. */
345 # ifdef PCR
346 char * real_malloc();
347 # define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
348 + GC_page_size-1)
349 # else
350 # ifdef OS2
351 void * os2_alloc(size_t bytes);
352 # define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \
353 + GC_page_size) \
354 + GC_page_size-1)
355 # else
356 # if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
357 # define GET_MEM(bytes) HBLKPTR((size_t) \
358 calloc(1, (size_t)bytes + GC_page_size) \
359 + GC_page_size-1)
360 # else
361 # ifdef MSWIN32
362 extern ptr_t GC_win32_get_mem();
363 # define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
364 # else
365 # ifdef MACOS
366 # if defined(USE_TEMPORARY_MEMORY)
367 extern Ptr GC_MacTemporaryNewPtr(size_t size,
368 Boolean clearMemory);
369 # define GET_MEM(bytes) HBLKPTR( \
370 GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
371 + GC_page_size-1)
372 # else
373 # define GET_MEM(bytes) HBLKPTR( \
374 NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
375 # endif
376 # else
377 extern ptr_t GC_unix_get_mem();
378 # define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
379 # endif
380 # endif
381 # endif
382 # endif
383 # endif
386 * Mutual exclusion between allocator/collector routines.
387 * Needed if there is more than one allocator thread.
388 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
389 * dirty way that is acceptable for a few instructions, e.g. by
390 * inhibiting preemption. This is assumed to have succeeded only
391 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
392 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
393 * If signals cannot be tolerated with the FASTLOCK held, then
394 * FASTLOCK should disable signals. The code executed under
395 * FASTLOCK is otherwise immune to interruption, provided it is
396 * not restarted.
397 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
398 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
399 * (There is currently no equivalent for FASTLOCK.)
401 # ifdef THREADS
402 # ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
403 # include "th/PCR_Th.h"
404 # include "th/PCR_ThCrSec.h"
405 extern struct PCR_Th_MLRep GC_allocate_ml;
406 # define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
407 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
408 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
409 # define FASTLOCK() PCR_ThCrSec_EnterSys()
410 /* Here we cheat (a lot): */
411 # define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
412 /* TRUE if nobody currently holds the lock */
413 # define FASTUNLOCK() PCR_ThCrSec_ExitSys()
414 # endif
415 # ifdef PCR
416 # include <base/PCR_Base.h>
417 # include <th/PCR_Th.h>
418 extern PCR_Th_ML GC_allocate_ml;
419 # define DCL_LOCK_STATE \
420 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
421 # define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
422 # define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
423 # define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
424 # define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
425 # define FASTUNLOCK() {\
426 if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
427 # endif
428 # ifdef SRC_M3
429 extern word RT0u__inCritical;
430 # define LOCK() RT0u__inCritical++
431 # define UNLOCK() RT0u__inCritical--
432 # endif
433 # ifdef SOLARIS_THREADS
434 # include <thread.h>
435 # include <signal.h>
436 extern mutex_t GC_allocate_ml;
437 # define LOCK() mutex_lock(&GC_allocate_ml);
438 # define UNLOCK() mutex_unlock(&GC_allocate_ml);
439 # endif
440 # ifdef LINUX_THREADS
441 # include <pthread.h>
442 # if defined(I386)
443 inline static int GC_test_and_set(volatile unsigned int *addr) {
444 int oldval;
445 /* Note: the "xchg" instruction does not need a "lock" prefix */
446 __asm__ __volatile__("xchgl %0, %1"
447 : "=r"(oldval), "=m"(*(addr))
448 : "0"(1), "m"(*(addr)));
449 return oldval;
451 # else
452 # if defined(POWERPC)
453 inline static int GC_test_and_set(volatile unsigned int *addr) {
454 int oldval;
455 int temp = 1; // locked value
457 __asm__ __volatile__(
458 "1:\tlwarx %0,0,%3\n" // load and reserve
459 "\tcmpwi %0, 0\n" // if load is
460 "\tbne 2f\n" // non-zero, return already set
461 "\tstwcx. %2,0,%1\n" // else store conditional
462 "\tbne- 1b\n" // retry if lost reservation
463 "2:\t\n" // oldval is zero if we set
464 : "=&r"(oldval), "=p"(addr)
465 : "r"(temp), "1"(addr)
466 : "memory");
467 return (int)oldval;
469 # else
470 # ifdef ALPHA
471 inline static int GC_test_and_set(volatile unsigned int *
472 addr)
474 unsigned long oldvalue;
475 unsigned long temp;
477 __asm__ __volatile__(
478 "1: ldl_l %0,%1\n"
479 " and %0,%3,%2\n"
480 " bne %2,2f\n"
481 " xor %0,%3,%0\n"
482 " stl_c %0,%1\n"
483 " beq %0,3f\n"
484 " mb\n"
485 "2:\n"
486 ".section .text2,\"ax\"\n"
487 "3: br 1b\n"
488 ".previous"
489 :"=&r" (temp), "=m" (*addr), "=&r"
490 (oldvalue)
491 :"Ir" (1), "m" (*addr));
493 return oldvalue;
495 # else
496 -- > Need implementation of GC_test_and_set()
497 # endif
498 # endif
499 # endif
500 inline static void GC_clear(volatile unsigned int *addr) {
501 *(addr) = 0;
504 extern volatile unsigned int GC_allocate_lock;
505 /* This is not a mutex because mutexes that obey the (optional) */
506 /* POSIX scheduling rules are subject to convoys in high contention */
507 /* applications. This is basically a spin lock. */
508 extern pthread_t GC_lock_holder;
509 extern void GC_lock(void);
510 /* Allocation lock holder. Only set if acquired by client through */
511 /* GC_call_with_alloc_lock. */
512 # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
513 # define NO_THREAD (pthread_t)(-1)
514 # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
515 # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
516 # define LOCK() \
517 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
518 # define UNLOCK() \
519 GC_clear(&GC_allocate_lock)
520 extern GC_bool GC_collecting;
521 # define ENTER_GC() \
523 GC_collecting = 1; \
525 # define EXIT_GC() GC_collecting = 0;
526 # endif /* LINUX_THREADS */
527 # if defined(HPUX_THREADS)
528 # include <pthread.h>
529 extern pthread_mutex_t GC_allocate_ml;
530 # define LOCK() pthread_mutex_lock(&GC_allocate_ml)
531 # define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
532 # endif
533 # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
534 /* This may also eventually be appropriate for HPUX_THREADS */
535 # include <pthread.h>
536 # ifndef HPUX_THREADS
537 /* This probably should never be included, but I can't test */
538 /* on Irix anymore. */
539 # include <mutex.h>
540 # endif
542 # ifndef HPUX_THREADS
543 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
544 || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
545 # define GC_test_and_set(addr, v) test_and_set(addr,v)
546 # else
547 # define GC_test_and_set(addr, v) __test_and_set(addr,v)
548 # endif
549 # else
550 /* I couldn't find a way to do this inline on HP/UX */
551 # endif
552 extern unsigned long GC_allocate_lock;
553 /* This is not a mutex because mutexes that obey the (optional) */
554 /* POSIX scheduling rules are subject to convoys in high contention */
555 /* applications. This is basically a spin lock. */
556 extern pthread_t GC_lock_holder;
557 extern void GC_lock(void);
558 /* Allocation lock holder. Only set if acquired by client through */
559 /* GC_call_with_alloc_lock. */
560 # define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
561 # define NO_THREAD (pthread_t)(-1)
562 # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
563 # define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
564 # ifdef HPUX_THREADS
565 # define LOCK() { if (!GC_test_and_clear(&GC_allocate_lock)) GC_lock(); }
566 /* The following is INCORRECT, since the memory model is too weak. */
567 # define UNLOCK() { GC_noop1(&GC_allocate_lock); \
568 *(volatile unsigned long *)(&GC_allocate_lock) = 1; }
569 # else
570 # define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
571 # if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
572 && defined(_COMPILER_VERSION) && _COMPILER_VERSION >= 700
573 # define UNLOCK() __lock_release(&GC_allocate_lock)
574 # else
575 /* The function call in the following should prevent the */
576 /* compiler from moving assignments to below the UNLOCK. */
577 /* This is probably not necessary for ucode or gcc 2.8. */
578 /* It may be necessary for Ragnarok and future gcc */
579 /* versions. */
580 # define UNLOCK() { GC_noop1(&GC_allocate_lock); \
581 *(volatile unsigned long *)(&GC_allocate_lock) = 0; }
582 # endif
583 # endif
584 extern GC_bool GC_collecting;
585 # define ENTER_GC() \
587 GC_collecting = 1; \
589 # define EXIT_GC() GC_collecting = 0;
590 # endif /* IRIX_THREADS || IRIX_JDK_THREADS */
591 # ifdef WIN32_THREADS
592 # include <windows.h>
593 GC_API CRITICAL_SECTION GC_allocate_ml;
594 # define LOCK() EnterCriticalSection(&GC_allocate_ml);
595 # define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
596 # endif
597 # ifndef SET_LOCK_HOLDER
598 # define SET_LOCK_HOLDER()
599 # define UNSET_LOCK_HOLDER()
600 # define I_HOLD_LOCK() FALSE
601 /* Used on platforms were locks can be reacquired, */
602 /* so it doesn't matter if we lie. */
603 # endif
604 # else
605 # define LOCK()
606 # define UNLOCK()
607 # endif
608 # ifndef SET_LOCK_HOLDER
609 # define SET_LOCK_HOLDER()
610 # define UNSET_LOCK_HOLDER()
611 # define I_HOLD_LOCK() FALSE
612 /* Used on platforms were locks can be reacquired, */
613 /* so it doesn't matter if we lie. */
614 # endif
615 # ifndef ENTER_GC
616 # define ENTER_GC()
617 # define EXIT_GC()
618 # endif
620 # ifndef DCL_LOCK_STATE
621 # define DCL_LOCK_STATE
622 # endif
623 # ifndef FASTLOCK
624 # define FASTLOCK() LOCK()
625 # define FASTLOCK_SUCCEEDED() TRUE
626 # define FASTUNLOCK() UNLOCK()
627 # endif
629 /* Delay any interrupts or signals that may abort this thread. Data */
630 /* structures are in a consistent state outside this pair of calls. */
631 /* ANSI C allows both to be empty (though the standard isn't very */
632 /* clear on that point). Standard malloc implementations are usually */
633 /* neither interruptable nor thread-safe, and thus correspond to */
634 /* empty definitions. */
635 # ifdef PCR
636 # define DISABLE_SIGNALS() \
637 PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
638 # define ENABLE_SIGNALS() \
639 PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
640 # else
641 # if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
642 || defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
643 || defined(NO_SIGNALS) || defined(IRIX_THREADS) \
644 || defined(IRIX_JDK_THREADS) || defined(LINUX_THREADS)
645 /* Also useful for debugging. */
646 /* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
647 # define DISABLE_SIGNALS()
648 # define ENABLE_SIGNALS()
649 # else
650 # define DISABLE_SIGNALS() GC_disable_signals()
651 void GC_disable_signals();
652 # define ENABLE_SIGNALS() GC_enable_signals()
653 void GC_enable_signals();
654 # endif
655 # endif
658 * Stop and restart mutator threads.
660 # ifdef PCR
661 # include "th/PCR_ThCtl.h"
662 # define STOP_WORLD() \
663 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
664 PCR_allSigsBlocked, \
665 PCR_waitForever)
666 # define START_WORLD() \
667 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
668 PCR_allSigsBlocked, \
669 PCR_waitForever);
670 # else
671 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
672 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
673 || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
674 void GC_stop_world();
675 void GC_start_world();
676 # define STOP_WORLD() GC_stop_world()
677 # define START_WORLD() GC_start_world()
678 # else
679 # define STOP_WORLD()
680 # define START_WORLD()
681 # endif
682 # endif
684 /* Abandon ship */
685 # ifdef PCR
686 # define ABORT(s) PCR_Base_Panic(s)
687 # else
688 # ifdef SMALL_CONFIG
689 # define ABORT(msg) abort();
690 # else
691 GC_API void GC_abort();
692 # define ABORT(msg) GC_abort(msg);
693 # endif
694 # endif
696 /* Exit abnormally, but without making a mess (e.g. out of memory) */
697 # ifdef PCR
698 # define EXIT() PCR_Base_Exit(1,PCR_waitForever)
699 # else
700 # define EXIT() (void)exit(1)
701 # endif
703 /* Print warning message, e.g. almost out of memory. */
704 # define WARN(msg,arg) (*GC_current_warn_proc)(msg, (GC_word)(arg))
705 extern GC_warn_proc GC_current_warn_proc;
707 /*********************************/
708 /* */
709 /* Word-size-dependent defines */
710 /* */
711 /*********************************/
713 #if CPP_WORDSZ == 32
714 # define WORDS_TO_BYTES(x) ((x)<<2)
715 # define BYTES_TO_WORDS(x) ((x)>>2)
716 # define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
717 # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
718 # if ALIGNMENT != 4
719 # define UNALIGNED
720 # endif
721 #endif
723 #if CPP_WORDSZ == 64
724 # define WORDS_TO_BYTES(x) ((x)<<3)
725 # define BYTES_TO_WORDS(x) ((x)>>3)
726 # define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
727 # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
728 # if ALIGNMENT != 8
729 # define UNALIGNED
730 # endif
731 #endif
733 #define WORDSZ ((word)CPP_WORDSZ)
734 #define SIGNB ((word)1 << (WORDSZ-1))
735 #define BYTES_PER_WORD ((word)(sizeof (word)))
736 #define ONES ((word)(-1))
737 #define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
739 /*********************/
740 /* */
741 /* Size Parameters */
742 /* */
743 /*********************/
745 /* heap block size, bytes. Should be power of 2 */
747 #ifndef HBLKSIZE
748 # ifdef SMALL_CONFIG
749 # define CPP_LOG_HBLKSIZE 10
750 # else
751 # if CPP_WORDSZ == 32
752 # define CPP_LOG_HBLKSIZE 12
753 # else
754 # define CPP_LOG_HBLKSIZE 13
755 # endif
756 # endif
757 #else
758 # if HBLKSIZE == 512
759 # define CPP_LOG_HBLKSIZE 9
760 # endif
761 # if HBLKSIZE == 1024
762 # define CPP_LOG_HBLKSIZE 10
763 # endif
764 # if HBLKSIZE == 2048
765 # define CPP_LOG_HBLKSIZE 11
766 # endif
767 # if HBLKSIZE == 4096
768 # define CPP_LOG_HBLKSIZE 12
769 # endif
770 # if HBLKSIZE == 8192
771 # define CPP_LOG_HBLKSIZE 13
772 # endif
773 # if HBLKSIZE == 16384
774 # define CPP_LOG_HBLKSIZE 14
775 # endif
776 # ifndef CPP_LOG_HBLKSIZE
777 --> fix HBLKSIZE
778 # endif
779 # undef HBLKSIZE
780 #endif
781 # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
782 # define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
783 # define HBLKSIZE ((word)CPP_HBLKSIZE)
786 /* max size objects supported by freelist (larger objects may be */
787 /* allocated, but less efficiently) */
789 #define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_HBLKSIZE/2)
790 #define MAXOBJSZ ((word)CPP_MAXOBJSZ)
792 # define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
794 # define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
795 /* Equivalent to subtracting 2 hblk pointers. */
796 /* We do it this way because a compiler should */
797 /* find it hard to use an integer division */
798 /* instead of a shift. The bundled SunOS 4.1 */
799 /* o.w. sometimes pessimizes the subtraction to */
800 /* involve a call to .div. */
802 # define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
804 # define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
806 # define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
808 /* Round up byte allocation requests to integral number of words, etc. */
809 # ifdef ADD_BYTE_AT_END
810 # define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
811 # ifdef ALIGN_DOUBLE
812 # define ALIGNED_WORDS(n) (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2)) & ~1)
813 # else
814 # define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
815 # endif
816 # define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
817 # define ADD_SLOP(bytes) ((bytes)+1)
818 # else
819 # define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1))
820 # ifdef ALIGN_DOUBLE
821 # define ALIGNED_WORDS(n) \
822 (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1) & ~1)
823 # else
824 # define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
825 # endif
826 # define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
827 # define ADD_SLOP(bytes) (bytes)
828 # endif
832 * Hash table representation of sets of pages. This assumes it is
833 * OK to add spurious entries to sets.
834 * Used by black-listing code, and perhaps by dirty bit maintenance code.
837 # ifdef LARGE_CONFIG
838 # define LOG_PHT_ENTRIES 17
839 # else
840 # define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
841 /* to more than 16K hblks = 64MB. */
842 /* Each hash table occupies 2K bytes. */
843 # endif
844 # define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
845 # define PHT_SIZE (PHT_ENTRIES >> LOGWL)
846 typedef word page_hash_table[PHT_SIZE];
848 # define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
850 # define get_pht_entry_from_index(bl, index) \
851 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
852 # define set_pht_entry_from_index(bl, index) \
853 (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
854 # define clear_pht_entry_from_index(bl, index) \
855 (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
859 /********************************************/
860 /* */
861 /* H e a p B l o c k s */
862 /* */
863 /********************************************/
865 /* heap block header */
866 #define HBLKMASK (HBLKSIZE-1)
868 #define BITS_PER_HBLK (HBLKSIZE * 8)
870 #define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
871 /* upper bound */
872 /* We allocate 1 bit/word. Only the first word */
873 /* in each object is actually marked. */
875 # ifdef ALIGN_DOUBLE
876 # define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*CPP_WORDSZ - 1) \
877 / (2*CPP_WORDSZ))*2)
878 # else
879 # define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + CPP_WORDSZ - 1)/CPP_WORDSZ)
880 # endif
881 /* Upper bound on number of mark words per heap block */
883 struct hblkhdr {
884 word hb_sz; /* If in use, size in words, of objects in the block. */
885 /* if free, the size in bytes of the whole block */
886 struct hblk * hb_next; /* Link field for hblk free list */
887 /* and for lists of chunks waiting to be */
888 /* reclaimed. */
889 struct hblk * hb_prev; /* Backwards link for free list. */
890 word hb_descr; /* object descriptor for marking. See */
891 /* mark.h. */
892 char* hb_map; /* A pointer to a pointer validity map of the block. */
893 /* See GC_obj_map. */
894 /* Valid for all blocks with headers. */
895 /* Free blocks point to GC_invalid_map. */
896 unsigned char hb_obj_kind;
897 /* Kind of objects in the block. Each kind */
898 /* identifies a mark procedure and a set of */
899 /* list headers. Sometimes called regions. */
900 unsigned char hb_flags;
901 # define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
902 /* point to the first page of */
903 /* this object. */
904 # define WAS_UNMAPPED 2 /* This is a free block, which has */
905 /* been unmapped from the address */
906 /* space. */
907 /* GC_remap must be invoked on it */
908 /* before it can be reallocated. */
909 /* Only set with USE_MUNMAP. */
910 unsigned short hb_last_reclaimed;
911 /* Value of GC_gc_no when block was */
912 /* last allocated or swept. May wrap. */
913 /* For a free block, this is maintained */
914 /* unly for USE_MUNMAP, and indicates */
915 /* when the header was allocated, or */
916 /* when the size of the block last */
917 /* changed. */
918 word hb_marks[MARK_BITS_SZ];
919 /* Bit i in the array refers to the */
920 /* object starting at the ith word (header */
921 /* INCLUDED) in the heap block. */
922 /* The lsb of word 0 is numbered 0. */
923 /* Unused bits are invalid, and are */
924 /* occasionally set, e.g for uncollectable */
925 /* objects. */
928 /* heap block body */
930 # define DISCARD_WORDS 0
931 /* Number of words to be dropped at the beginning of each block */
932 /* Must be a multiple of WORDSZ. May reasonably be nonzero */
933 /* on machines that don't guarantee longword alignment of */
934 /* pointers, so that the number of false hits is minimized. */
935 /* 0 and WORDSZ are probably the only reasonable values. */
937 # define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
939 struct hblk {
940 # if (DISCARD_WORDS != 0)
941 word garbage[DISCARD_WORDS];
942 # endif
943 word hb_body[BODY_SZ];
946 # define HDR_WORDS ((word)DISCARD_WORDS)
947 # define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS))
949 # define OBJ_SZ_TO_BLOCKS(sz) \
950 divHBLKSZ(HDR_BYTES + WORDS_TO_BYTES(sz) + HBLKSIZE-1)
951 /* Size of block (in units of HBLKSIZE) needed to hold objects of */
952 /* given sz (in words). */
954 /* Object free list link */
955 # define obj_link(p) (*(ptr_t *)(p))
957 /* The type of mark procedures. This really belongs in gc_mark.h. */
958 /* But we put it here, so that we can avoid scanning the mark proc */
959 /* table. */
960 typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr,
961 mark_stack_limit, env */);
962 # define LOG_MAX_MARK_PROCS 6
963 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
965 /* Root sets. Logically private to mark_rts.c. But we don't want the */
966 /* tables scanned, so we put them here. */
967 /* MAX_ROOT_SETS is the maximum number of ranges that can be */
968 /* registered as static roots. */
969 # ifdef LARGE_CONFIG
970 # define MAX_ROOT_SETS 4096
971 # else
972 # ifdef PCR
973 # define MAX_ROOT_SETS 1024
974 # else
975 # ifdef MSWIN32
976 # define MAX_ROOT_SETS 512
977 /* Under NT, we add only written pages, which can result */
978 /* in many small root sets. */
979 # else
980 # define MAX_ROOT_SETS 64
981 # endif
982 # endif
983 # endif
985 # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
986 /* Maximum number of segments that can be excluded from root sets. */
989 * Data structure for excluded static roots.
991 struct exclusion {
992 ptr_t e_start;
993 ptr_t e_end;
996 /* Data structure for list of root sets. */
997 /* We keep a hash table, so that we can filter out duplicate additions. */
998 /* Under Win32, we need to do a better job of filtering overlaps, so */
999 /* we resort to sequential search, and pay the price. */
1000 struct roots {
1001 ptr_t r_start;
1002 ptr_t r_end;
1003 # ifndef MSWIN32
1004 struct roots * r_next;
1005 # endif
1006 GC_bool r_tmp;
1007 /* Delete before registering new dynamic libraries */
1010 #ifndef MSWIN32
1011 /* Size of hash table index to roots. */
1012 # define LOG_RT_SIZE 6
1013 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
1014 #endif
1016 /* Lists of all heap blocks and free lists */
1017 /* as well as other random data structures */
1018 /* that should not be scanned by the */
1019 /* collector. */
1020 /* These are grouped together in a struct */
1021 /* so that they can be easily skipped by the */
1022 /* GC_mark routine. */
1023 /* The ordering is weird to make GC_malloc */
1024 /* faster by keeping the important fields */
1025 /* sufficiently close together that a */
1026 /* single load of a base register will do. */
1027 /* Scalars that could easily appear to */
1028 /* be pointers are also put here. */
1029 /* The main fields should precede any */
1030 /* conditionally included fields, so that */
1031 /* gc_inl.h will work even if a different set */
1032 /* of macros is defined when the client is */
1033 /* compiled. */
1035 struct _GC_arrays {
1036 word _heapsize;
1037 word _max_heapsize;
1038 ptr_t _last_heap_addr;
1039 ptr_t _prev_heap_addr;
1040 word _large_free_bytes;
1041 /* Total bytes contained in blocks on large object free */
1042 /* list. */
1043 word _words_allocd_before_gc;
1044 /* Number of words allocated before this */
1045 /* collection cycle. */
1046 word _words_allocd;
1047 /* Number of words allocated during this collection cycle */
1048 word _words_wasted;
1049 /* Number of words wasted due to internal fragmentation */
1050 /* in large objects, or due to dropping blacklisted */
1051 /* blocks, since last gc. Approximate. */
1052 word _words_finalized;
1053 /* Approximate number of words in objects (and headers) */
1054 /* That became ready for finalization in the last */
1055 /* collection. */
1056 word _non_gc_bytes_at_gc;
1057 /* Number of explicitly managed bytes of storage */
1058 /* at last collection. */
1059 word _mem_freed;
1060 /* Number of explicitly deallocated words of memory */
1061 /* since last collection. */
1062 mark_proc _mark_procs[MAX_MARK_PROCS];
1063 /* Table of user-defined mark procedures. There is */
1064 /* a small number of these, which can be referenced */
1065 /* by DS_PROC mark descriptors. See gc_mark.h. */
1066 ptr_t _objfreelist[MAXOBJSZ+1];
1067 /* free list for objects */
1068 ptr_t _aobjfreelist[MAXOBJSZ+1];
1069 /* free list for atomic objs */
1071 ptr_t _uobjfreelist[MAXOBJSZ+1];
1072 /* uncollectable but traced objs */
1073 /* objects on this and auobjfreelist */
1074 /* are always marked, except during */
1075 /* garbage collections. */
1076 # ifdef ATOMIC_UNCOLLECTABLE
1077 ptr_t _auobjfreelist[MAXOBJSZ+1];
1078 # endif
1079 /* uncollectable but traced objs */
1081 # ifdef GATHERSTATS
1082 word _composite_in_use;
1083 /* Number of words in accessible composite */
1084 /* objects. */
1085 word _atomic_in_use;
1086 /* Number of words in accessible atomic */
1087 /* objects. */
1088 # endif
1089 # ifdef USE_MUNMAP
1090 word _unmapped_bytes;
1091 # endif
1092 # ifdef MERGE_SIZES
1093 unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
1094 /* Number of words to allocate for a given allocation request in */
1095 /* bytes. */
1096 # endif
1098 # ifdef STUBBORN_ALLOC
1099 ptr_t _sobjfreelist[MAXOBJSZ+1];
1100 # endif
1101 /* free list for immutable objects */
1102 ptr_t _obj_map[MAXOBJSZ+1];
1103 /* If not NIL, then a pointer to a map of valid */
1104 /* object addresses. _obj_map[sz][i] is j if the */
1105 /* address block_start+i is a valid pointer */
1106 /* to an object at */
1107 /* block_start+i&~3 - WORDS_TO_BYTES(j). */
1108 /* (If ALL_INTERIOR_POINTERS is defined, then */
1109 /* instead ((short *)(hb_map[sz])[i] is j if */
1110 /* block_start+WORDS_TO_BYTES(i) is in the */
1111 /* interior of an object starting at */
1112 /* block_start+WORDS_TO_BYTES(i-j)). */
1113 /* It is OBJ_INVALID if */
1114 /* block_start+WORDS_TO_BYTES(i) is not */
1115 /* valid as a pointer to an object. */
1116 /* We assume all values of j <= OBJ_INVALID. */
1117 /* The zeroth entry corresponds to large objects.*/
1118 # ifdef ALL_INTERIOR_POINTERS
1119 # define map_entry_type short
1120 # define OBJ_INVALID 0x7fff
1121 # define MAP_ENTRY(map, bytes) \
1122 (((map_entry_type *)(map))[BYTES_TO_WORDS(bytes)])
1123 # define MAP_ENTRIES BYTES_TO_WORDS(HBLKSIZE)
1124 # define MAP_SIZE (MAP_ENTRIES * sizeof(map_entry_type))
1125 # define OFFSET_VALID(displ) TRUE
1126 # define CPP_MAX_OFFSET (HBLKSIZE - HDR_BYTES - 1)
1127 # define MAX_OFFSET ((word)CPP_MAX_OFFSET)
1128 # else
1129 # define map_entry_type char
1130 # define OBJ_INVALID 0x7f
1131 # define MAP_ENTRY(map, bytes) \
1132 (map)[bytes]
1133 # define MAP_ENTRIES HBLKSIZE
1134 # define MAP_SIZE MAP_ENTRIES
1135 # define CPP_MAX_OFFSET (WORDS_TO_BYTES(OBJ_INVALID) - 1)
1136 # define MAX_OFFSET ((word)CPP_MAX_OFFSET)
1137 # define VALID_OFFSET_SZ \
1138 (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
1139 CPP_MAX_OFFSET+1 \
1140 : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
1141 char _valid_offsets[VALID_OFFSET_SZ];
1142 /* GC_valid_offsets[i] == TRUE ==> i */
1143 /* is registered as a displacement. */
1144 # define OFFSET_VALID(displ) GC_valid_offsets[displ]
1145 char _modws_valid_offsets[sizeof(word)];
1146 /* GC_valid_offsets[i] ==> */
1147 /* GC_modws_valid_offsets[i%sizeof(word)] */
1148 # endif
1149 # ifdef STUBBORN_ALLOC
1150 page_hash_table _changed_pages;
1151 /* Stubborn object pages that were changes since last call to */
1152 /* GC_read_changed. */
1153 page_hash_table _prev_changed_pages;
1154 /* Stubborn object pages that were changes before last call to */
1155 /* GC_read_changed. */
1156 # endif
1157 # if defined(PROC_VDB) || defined(MPROTECT_VDB)
1158 page_hash_table _grungy_pages; /* Pages that were dirty at last */
1159 /* GC_read_dirty. */
1160 # endif
1161 # ifdef MPROTECT_VDB
1162 VOLATILE page_hash_table _dirty_pages;
1163 /* Pages dirtied since last GC_read_dirty. */
1164 # endif
1165 # ifdef PROC_VDB
1166 page_hash_table _written_pages; /* Pages ever dirtied */
1167 # endif
1168 # ifdef LARGE_CONFIG
1169 # if CPP_WORDSZ > 32
1170 # define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
1171 # else
1172 # define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
1173 # endif
1174 # else
1175 # define MAX_HEAP_SECTS 256
1176 # endif
1177 struct HeapSect {
1178 ptr_t hs_start; word hs_bytes;
1179 } _heap_sects[MAX_HEAP_SECTS];
1180 # ifdef MSWIN32
1181 ptr_t _heap_bases[MAX_HEAP_SECTS];
1182 /* Start address of memory regions obtained from kernel. */
1183 # endif
1184 struct roots _static_roots[MAX_ROOT_SETS];
1185 # ifndef MSWIN32
1186 struct roots * _root_index[RT_SIZE];
1187 # endif
1188 struct exclusion _excl_table[MAX_EXCLUSIONS];
1189 /* Block header index; see gc_headers.h */
1190 bottom_index * _all_nils;
1191 bottom_index * _top_index [TOP_SZ];
1192 #ifdef SAVE_CALL_CHAIN
1193 struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
1194 /* Useful for debugging mysterious */
1195 /* object disappearances. */
1196 /* In the multithreaded case, we */
1197 /* currently only save the calling */
1198 /* stack. */
1199 #endif
1202 GC_API GC_FAR struct _GC_arrays GC_arrays;
1204 # define GC_objfreelist GC_arrays._objfreelist
1205 # define GC_aobjfreelist GC_arrays._aobjfreelist
1206 # define GC_uobjfreelist GC_arrays._uobjfreelist
1207 # ifdef ATOMIC_UNCOLLECTABLE
1208 # define GC_auobjfreelist GC_arrays._auobjfreelist
1209 # endif
1210 # define GC_sobjfreelist GC_arrays._sobjfreelist
1211 # define GC_valid_offsets GC_arrays._valid_offsets
1212 # define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
1213 # ifdef STUBBORN_ALLOC
1214 # define GC_changed_pages GC_arrays._changed_pages
1215 # define GC_prev_changed_pages GC_arrays._prev_changed_pages
1216 # endif
1217 # define GC_obj_map GC_arrays._obj_map
1218 # define GC_last_heap_addr GC_arrays._last_heap_addr
1219 # define GC_prev_heap_addr GC_arrays._prev_heap_addr
1220 # define GC_words_allocd GC_arrays._words_allocd
1221 # define GC_words_wasted GC_arrays._words_wasted
1222 # define GC_large_free_bytes GC_arrays._large_free_bytes
1223 # define GC_words_finalized GC_arrays._words_finalized
1224 # define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
1225 # define GC_mem_freed GC_arrays._mem_freed
1226 # define GC_mark_procs GC_arrays._mark_procs
1227 # define GC_heapsize GC_arrays._heapsize
1228 # define GC_max_heapsize GC_arrays._max_heapsize
1229 # define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
1230 # define GC_heap_sects GC_arrays._heap_sects
1231 # define GC_last_stack GC_arrays._last_stack
1232 # ifdef USE_MUNMAP
1233 # define GC_unmapped_bytes GC_arrays._unmapped_bytes
1234 # endif
1235 # ifdef MSWIN32
1236 # define GC_heap_bases GC_arrays._heap_bases
1237 # endif
1238 # define GC_static_roots GC_arrays._static_roots
1239 # define GC_root_index GC_arrays._root_index
1240 # define GC_excl_table GC_arrays._excl_table
1241 # define GC_all_nils GC_arrays._all_nils
1242 # define GC_top_index GC_arrays._top_index
1243 # if defined(PROC_VDB) || defined(MPROTECT_VDB)
1244 # define GC_grungy_pages GC_arrays._grungy_pages
1245 # endif
1246 # ifdef MPROTECT_VDB
1247 # define GC_dirty_pages GC_arrays._dirty_pages
1248 # endif
1249 # ifdef PROC_VDB
1250 # define GC_written_pages GC_arrays._written_pages
1251 # endif
1252 # ifdef GATHERSTATS
1253 # define GC_composite_in_use GC_arrays._composite_in_use
1254 # define GC_atomic_in_use GC_arrays._atomic_in_use
1255 # endif
1256 # ifdef MERGE_SIZES
1257 # define GC_size_map GC_arrays._size_map
1258 # endif
1260 # define beginGC_arrays ((ptr_t)(&GC_arrays))
1261 # define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
1263 /* Object kinds: */
1264 # define MAXOBJKINDS 16
1266 extern struct obj_kind {
1267 ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
1268 /* Point either to GC_arrays or to storage allocated */
1269 /* with GC_scratch_alloc. */
1270 struct hblk **ok_reclaim_list;
1271 /* List headers for lists of blocks waiting to be */
1272 /* swept. */
1273 word ok_descriptor; /* Descriptor template for objects in this */
1274 /* block. */
1275 GC_bool ok_relocate_descr;
1276 /* Add object size in bytes to descriptor */
1277 /* template to obtain descriptor. Otherwise */
1278 /* template is used as is. */
1279 GC_bool ok_init; /* Clear objects before putting them on the free list. */
1280 } GC_obj_kinds[MAXOBJKINDS];
1282 # define endGC_obj_kinds (((ptr_t)(&GC_obj_kinds)) + (sizeof GC_obj_kinds))
1284 # define end_gc_area ((ptr_t)endGC_arrays == (ptr_t)(&GC_obj_kinds) ? \
1285 endGC_obj_kinds : endGC_arrays)
1287 /* Predefined kinds: */
1288 # define PTRFREE 0
1289 # define NORMAL 1
1290 # define UNCOLLECTABLE 2
1291 # ifdef ATOMIC_UNCOLLECTABLE
1292 # define AUNCOLLECTABLE 3
1293 # define STUBBORN 4
1294 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
1295 # else
1296 # define STUBBORN 3
1297 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
1298 # endif
1300 extern int GC_n_kinds;
1302 GC_API word GC_fo_entries;
1304 extern word GC_n_heap_sects; /* Number of separately added heap */
1305 /* sections. */
1307 extern word GC_page_size;
1309 # ifdef MSWIN32
1310 extern word GC_n_heap_bases; /* See GC_heap_bases. */
1311 # endif
1313 extern word GC_total_stack_black_listed;
1314 /* Number of bytes on stack blacklist. */
1316 extern word GC_black_list_spacing;
1317 /* Average number of bytes between blacklisted */
1318 /* blocks. Approximate. */
1319 /* Counts only blocks that are */
1320 /* "stack-blacklisted", i.e. that are */
1321 /* problematic in the interior of an object. */
1323 extern char * GC_invalid_map;
1324 /* Pointer to the nowhere valid hblk map */
1325 /* Blocks pointing to this map are free. */
1327 extern struct hblk * GC_hblkfreelist[];
1328 /* List of completely empty heap blocks */
1329 /* Linked through hb_next field of */
1330 /* header structure associated with */
1331 /* block. */
1333 extern GC_bool GC_is_initialized; /* GC_init() has been run. */
1335 extern GC_bool GC_objects_are_marked; /* There are marked objects in */
1336 /* the heap. */
1338 #ifndef SMALL_CONFIG
1339 extern GC_bool GC_incremental;
1340 /* Using incremental/generational collection. */
1341 #else
1342 # define GC_incremental TRUE
1343 /* Hopefully allow optimizer to remove some code. */
1344 #endif
1346 extern GC_bool GC_dirty_maintained;
1347 /* Dirty bits are being maintained, */
1348 /* either for incremental collection, */
1349 /* or to limit the root set. */
1351 extern word GC_root_size; /* Total size of registered root sections */
1353 extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
1355 extern ptr_t GC_least_plausible_heap_addr;
1356 extern ptr_t GC_greatest_plausible_heap_addr;
1357 /* Bounds on the heap. Guaranteed valid */
1358 /* Likely to include future heap expansion. */
1360 /* Operations */
1361 # ifndef abs
1362 # define abs(x) ((x) < 0? (-(x)) : (x))
1363 # endif
1366 /* Marks are in a reserved area in */
1367 /* each heap block. Each word has one mark bit associated */
1368 /* with it. Only those corresponding to the beginning of an */
1369 /* object are used. */
1372 /* Mark bit operations */
1375 * Retrieve, set, clear the mark bit corresponding
1376 * to the nth word in a given heap block.
1378 * (Recall that bit n corresponds to object beginning at word n
1379 * relative to the beginning of the block, including unused words)
1382 # define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
1383 >> (modWORDSZ(n))) & (word)1)
1384 # define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
1385 |= (word)1 << modWORDSZ(n)
1387 # define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
1388 &= ~((word)1 << modWORDSZ(n))
1390 /* Important internal collector routines */
1392 ptr_t GC_approx_sp();
1394 GC_bool GC_should_collect();
1395 #ifdef PRESERVE_LAST
1396 GC_bool GC_in_last_heap_sect(/* ptr_t */);
1397 /* In last added heap section? If so, avoid breaking up. */
1398 #endif
1399 void GC_apply_to_all_blocks(/*fn, client_data*/);
1400 /* Invoke fn(hbp, client_data) for each */
1401 /* allocated heap block. */
1402 struct hblk * GC_next_used_block(/* struct hblk * h */);
1403 /* Return first in-use block >= h */
1404 struct hblk * GC_prev_block(/* struct hblk * h */);
1405 /* Return last block <= h. Returned block */
1406 /* is managed by GC, but may or may not be in */
1407 /* use. */
1408 void GC_mark_init();
1409 void GC_clear_marks(); /* Clear mark bits for all heap objects. */
1410 void GC_invalidate_mark_state(); /* Tell the marker that marked */
1411 /* objects may point to unmarked */
1412 /* ones, and roots may point to */
1413 /* unmarked objects. */
1414 /* Reset mark stack. */
1415 void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
1416 /* Return after about one pages worth of */
1417 /* work. */
1418 GC_bool GC_mark_stack_empty();
1419 GC_bool GC_mark_some(/* cold_gc_frame */);
1420 /* Perform about one pages worth of marking */
1421 /* work of whatever kind is needed. Returns */
1422 /* quickly if no collection is in progress. */
1423 /* Return TRUE if mark phase finished. */
1424 void GC_initiate_gc(); /* initiate collection. */
1425 /* If the mark state is invalid, this */
1426 /* becomes full colleection. Otherwise */
1427 /* it's partial. */
1428 void GC_push_all(/*b,t*/); /* Push everything in a range */
1429 /* onto mark stack. */
1430 void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
1431 /* subintervals of [b,t) onto */
1432 /* mark stack. */
1433 #ifndef SMALL_CONFIG
1434 void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/);
1435 #else
1436 # define GC_push_conditional(b, t, all) GC_push_all(b, t)
1437 #endif
1438 /* Do either of the above, depending */
1439 /* on the third arg. */
1440 void GC_push_all_stack(/*b,t*/); /* As above, but consider */
1441 /* interior pointers as valid */
1442 void GC_push_all_eager(/*b,t*/); /* Same as GC_push_all_stack, but */
1443 /* ensures that stack is scanned */
1444 /* immediately, not just scheduled */
1445 /* for scanning. */
1446 #ifndef THREADS
1447 void GC_push_all_stack_partially_eager(/* bottom, top, cold_gc_frame */);
1448 /* Similar to GC_push_all_eager, but only the */
1449 /* part hotter than cold_gc_frame is scanned */
1450 /* immediately. Needed to endure that callee- */
1451 /* save registers are not missed. */
1452 #else
1453 /* In the threads case, we push part of the current thread stack */
1454 /* with GC_push_all_eager when we push the registers. This gets the */
1455 /* callee-save registers that may disappear. The remainder of the */
1456 /* stacks are scheduled for scanning in *GC_push_other_roots, which */
1457 /* is thread-package-specific. */
1458 #endif
1459 void GC_push_current_stack(/* ptr_t cold_gc_frame */);
1460 /* Push enough of the current stack eagerly to */
1461 /* ensure that callee-save registers saved in */
1462 /* GC frames are scanned. */
1463 /* In the non-threads case, schedule entire */
1464 /* stack for scanning. */
1465 void GC_push_roots(/* GC_bool all, ptr_t cold_gc_frame */);
1466 /* Push all or dirty roots. */
1467 extern void (*GC_push_other_roots)();
1468 /* Push system or application specific roots */
1469 /* onto the mark stack. In some environments */
1470 /* (e.g. threads environments) this is */
1471 /* predfined to be non-zero. A client supplied */
1472 /* replacement should also call the original */
1473 /* function. */
1474 extern void (*GC_start_call_back)(/* void */);
1475 /* Called at start of full collections. */
1476 /* Not called if 0. Called with allocation */
1477 /* lock held. */
1478 /* 0 by default. */
1479 void GC_push_regs(); /* Push register contents onto mark stack. */
1480 /* If NURSERY is defined, the default push */
1481 /* action can be overridden with GC_push_proc */
1482 void GC_remark(); /* Mark from all marked objects. Used */
1483 /* only if we had to drop something. */
1485 # ifdef NURSERY
1486 extern void (*GC_push_proc)(ptr_t);
1487 # endif
1488 # if defined(MSWIN32)
1489 void __cdecl GC_push_one();
1490 # else
1491 void GC_push_one(/*p*/); /* If p points to an object, mark it */
1492 /* and push contents on the mark stack */
1493 # endif
1494 void GC_push_one_checked(/*p*/); /* Ditto, omits plausibility test */
1495 void GC_push_marked(/* struct hblk h, hdr * hhdr */);
1496 /* Push contents of all marked objects in h onto */
1497 /* mark stack. */
1498 #ifdef SMALL_CONFIG
1499 # define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
1500 #else
1501 struct hblk * GC_push_next_marked_dirty(/* h */);
1502 /* Invoke GC_push_marked on next dirty block above h. */
1503 /* Return a pointer just past the end of this block. */
1504 #endif /* !SMALL_CONFIG */
1505 struct hblk * GC_push_next_marked(/* h */);
1506 /* Ditto, but also mark from clean pages. */
1507 struct hblk * GC_push_next_marked_uncollectable(/* h */);
1508 /* Ditto, but mark only from uncollectable pages. */
1509 GC_bool GC_stopped_mark(); /* Stop world and mark from all roots */
1510 /* and rescuers. */
1511 void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
1512 void GC_set_hdr_marks(/* hhdr */); /* Set the mark bits in a header */
1513 void GC_add_roots_inner();
1514 GC_bool GC_is_static_root(/* ptr_t p */);
1515 /* Is the address p in one of the registered static */
1516 /* root sections? */
1517 void GC_register_dynamic_libraries();
1518 /* Add dynamic library data sections to the root set. */
1520 /* Machine dependent startup routines */
1521 ptr_t GC_get_stack_base();
1522 void GC_register_data_segments();
1524 /* Black listing: */
1525 void GC_bl_init();
1526 # ifndef ALL_INTERIOR_POINTERS
1527 void GC_add_to_black_list_normal(/* bits, maybe source */);
1528 /* Register bits as a possible future false */
1529 /* reference from the heap or static data */
1530 # ifdef PRINT_BLACK_LIST
1531 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1532 GC_add_to_black_list_normal(bits, source)
1533 # else
1534 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1535 GC_add_to_black_list_normal(bits)
1536 # endif
1537 # else
1538 # ifdef PRINT_BLACK_LIST
1539 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1540 GC_add_to_black_list_stack(bits, source)
1541 # else
1542 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1543 GC_add_to_black_list_stack(bits)
1544 # endif
1545 # endif
1547 void GC_add_to_black_list_stack(/* bits, maybe source */);
1548 struct hblk * GC_is_black_listed(/* h, len */);
1549 /* If there are likely to be false references */
1550 /* to a block starting at h of the indicated */
1551 /* length, then return the next plausible */
1552 /* starting location for h that might avoid */
1553 /* these false references. */
1554 void GC_promote_black_lists();
1555 /* Declare an end to a black listing phase. */
1556 void GC_unpromote_black_lists();
1557 /* Approximately undo the effect of the above. */
1558 /* This actually loses some information, but */
1559 /* only in a reasonably safe way. */
1560 word GC_number_stack_black_listed(/*struct hblk *start, struct hblk *endp1 */);
1561 /* Return the number of (stack) blacklisted */
1562 /* blocks in the range for statistical */
1563 /* purposes. */
1565 ptr_t GC_scratch_alloc(/*bytes*/);
1566 /* GC internal memory allocation for */
1567 /* small objects. Deallocation is not */
1568 /* possible. */
1570 /* Heap block layout maps: */
1571 void GC_invalidate_map(/* hdr */);
1572 /* Remove the object map associated */
1573 /* with the block. This identifies */
1574 /* the block as invalid to the mark */
1575 /* routines. */
1576 GC_bool GC_add_map_entry(/*sz*/);
1577 /* Add a heap block map for objects of */
1578 /* size sz to obj_map. */
1579 /* Return FALSE on failure. */
1580 void GC_register_displacement_inner(/*offset*/);
1581 /* Version of GC_register_displacement */
1582 /* that assumes lock is already held */
1583 /* and signals are already disabled. */
1585 /* hblk allocation: */
1586 void GC_new_hblk(/*size_in_words, kind*/);
1587 /* Allocate a new heap block, and build */
1588 /* a free list in it. */
1589 struct hblk * GC_allochblk(/*size_in_words, kind*/);
1590 /* Allocate a heap block, clear it if */
1591 /* for composite objects, inform */
1592 /* the marker that block is valid */
1593 /* for objects of indicated size. */
1594 /* sz < 0 ==> atomic. */
1595 void GC_freehblk(); /* Deallocate a heap block and mark it */
1596 /* as invalid. */
1598 /* Misc GC: */
1599 void GC_init_inner();
1600 GC_bool GC_expand_hp_inner();
1601 void GC_start_reclaim(/*abort_if_found*/);
1602 /* Restore unmarked objects to free */
1603 /* lists, or (if abort_if_found is */
1604 /* TRUE) report them. */
1605 /* Sweeping of small object pages is */
1606 /* largely deferred. */
1607 void GC_continue_reclaim(/*size, kind*/);
1608 /* Sweep pages of the given size and */
1609 /* kind, as long as possible, and */
1610 /* as long as the corr. free list is */
1611 /* empty. */
1612 void GC_reclaim_or_delete_all();
1613 /* Arrange for all reclaim lists to be */
1614 /* empty. Judiciously choose between */
1615 /* sweeping and discarding each page. */
1616 GC_bool GC_reclaim_all(/* GC_stop_func f*/);
1617 /* Reclaim all blocks. Abort (in a */
1618 /* consistent state) if f returns TRUE. */
1619 GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
1620 GC_bool GC_never_stop_func(); /* Returns FALSE. */
1621 GC_bool GC_try_to_collect_inner(/* GC_stop_func f */);
1622 /* Collect; caller must have acquired */
1623 /* lock and disabled signals. */
1624 /* Collection is aborted if f returns */
1625 /* TRUE. Returns TRUE if it completes */
1626 /* successfully. */
1627 # define GC_gcollect_inner() \
1628 (void) GC_try_to_collect_inner(GC_never_stop_func)
1629 void GC_finish_collection(); /* Finish collection. Mark bits are */
1630 /* consistent and lock is still held. */
1631 GC_bool GC_collect_or_expand(/* needed_blocks */);
1632 /* Collect or expand heap in an attempt */
1633 /* make the indicated number of free */
1634 /* blocks available. Should be called */
1635 /* until the blocks are available or */
1636 /* until it fails by returning FALSE. */
1637 GC_API void GC_init(); /* Initialize collector. */
1638 void GC_collect_a_little_inner(/* int n */);
1639 /* Do n units worth of garbage */
1640 /* collection work, if appropriate. */
1641 /* A unit is an amount appropriate for */
1642 /* HBLKSIZE bytes of allocation. */
1643 ptr_t GC_generic_malloc(/* bytes, kind */);
1644 /* Allocate an object of the given */
1645 /* kind. By default, there are only */
1646 /* a few kinds: composite(pointerfree), */
1647 /* atomic, uncollectable, etc. */
1648 /* We claim it's possible for clever */
1649 /* client code that understands GC */
1650 /* internals to add more, e.g. to */
1651 /* communicate object layout info */
1652 /* to the collector. */
1653 ptr_t GC_generic_malloc_ignore_off_page(/* bytes, kind */);
1654 /* As above, but pointers past the */
1655 /* first page of the resulting object */
1656 /* are ignored. */
1657 ptr_t GC_generic_malloc_inner(/* bytes, kind */);
1658 /* Ditto, but I already hold lock, etc. */
1659 ptr_t GC_generic_malloc_words_small GC_PROTO((size_t words, int kind));
1660 /* As above, but size in units of words */
1661 /* Bypasses MERGE_SIZES. Assumes */
1662 /* words <= MAXOBJSZ. */
1663 ptr_t GC_generic_malloc_inner_ignore_off_page(/* bytes, kind */);
1664 /* Allocate an object, where */
1665 /* the client guarantees that there */
1666 /* will always be a pointer to the */
1667 /* beginning of the object while the */
1668 /* object is live. */
1669 ptr_t GC_allocobj(/* sz_inn_words, kind */);
1670 /* Make the indicated */
1671 /* free list nonempty, and return its */
1672 /* head. */
1674 void GC_init_headers();
1675 GC_bool GC_install_header(/*h*/);
1676 /* Install a header for block h. */
1677 /* Return FALSE on failure. */
1678 GC_bool GC_install_counts(/*h, sz*/);
1679 /* Set up forwarding counts for block */
1680 /* h of size sz. */
1681 /* Return FALSE on failure. */
1682 void GC_remove_header(/*h*/);
1683 /* Remove the header for block h. */
1684 void GC_remove_counts(/*h, sz*/);
1685 /* Remove forwarding counts for h. */
1686 hdr * GC_find_header(/*p*/); /* Debugging only. */
1688 void GC_finalize(); /* Perform all indicated finalization actions */
1689 /* on unmarked objects. */
1690 /* Unreachable finalizable objects are enqueued */
1691 /* for processing by GC_invoke_finalizers. */
1692 /* Invoked with lock. */
1694 void GC_add_to_heap(/*p, bytes*/);
1695 /* Add a HBLKSIZE aligned chunk to the heap. */
1697 void GC_print_obj(/* ptr_t p */);
1698 /* P points to somewhere inside an object with */
1699 /* debugging info. Print a human readable */
1700 /* description of the object to stderr. */
1701 ptr_t GC_debug_object_start(/* ptr_t p */);
1702 /* P points to the start of an object that may */
1703 /* have debug info at its head. Return the */
1704 /* start of the real data. */
1705 extern void (*GC_check_heap)();
1706 /* Check that all objects in the heap with */
1707 /* debugging info are intact. Print */
1708 /* descriptions of any that are not. */
1709 extern void (*GC_print_heap_obj)(/* ptr_t p */);
1710 /* If possible print s followed by a more */
1711 /* detailed description of the object */
1712 /* referred to by p. */
1714 /* Memory unmapping: */
1715 #ifdef USE_MUNMAP
1716 void GC_unmap_old(void);
1717 void GC_merge_unmapped(void);
1718 void GC_unmap(ptr_t start, word bytes);
1719 void GC_remap(ptr_t start, word bytes);
1720 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
1721 #endif
1723 /* Virtual dirty bit implementation: */
1724 /* Each implementation exports the following: */
1725 void GC_read_dirty(); /* Retrieve dirty bits. */
1726 GC_bool GC_page_was_dirty(/* struct hblk * h */);
1727 /* Read retrieved dirty bits. */
1728 GC_bool GC_page_was_ever_dirty(/* struct hblk * h */);
1729 /* Could the page contain valid heap pointers? */
1730 void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
1731 /* Assert the region currently contains no */
1732 /* valid pointers. */
1733 void GC_write_hint(/* struct hblk * h */);
1734 /* h is about to be written. */
1735 void GC_dirty_init();
1737 /* Slow/general mark bit manipulation: */
1738 GC_API GC_bool GC_is_marked();
1739 void GC_clear_mark_bit();
1740 void GC_set_mark_bit();
1742 /* Stubborn objects: */
1743 void GC_read_changed(); /* Analogous to GC_read_dirty */
1744 GC_bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
1745 void GC_clean_changing_list(); /* Collect obsolete changing list entries */
1746 void GC_stubborn_init();
1748 /* Debugging print routines: */
1749 void GC_print_block_list();
1750 void GC_print_hblkfreelist();
1751 void GC_print_heap_sects();
1752 void GC_print_static_roots();
1753 void GC_dump();
1755 #ifdef KEEP_BACK_PTRS
1756 void GC_store_back_pointer(ptr_t source, ptr_t dest);
1757 void GC_marked_for_finalization(ptr_t dest);
1758 # define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
1759 # define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
1760 #else
1761 # define GC_STORE_BACK_PTR(source, dest)
1762 # define GC_MARKED_FOR_FINALIZATION(dest)
1763 #endif
1765 /* Make arguments appear live to compiler */
1766 # ifdef __WATCOMC__
1767 void GC_noop(void*, ...);
1768 # else
1769 GC_API void GC_noop();
1770 # endif
1772 void GC_noop1(/* word arg */);
1774 /* Logging and diagnostic output: */
1775 GC_API void GC_printf GC_PROTO((char * format, long, long, long, long, long, long));
1776 /* A version of printf that doesn't allocate, */
1777 /* is restricted to long arguments, and */
1778 /* (unfortunately) doesn't use varargs for */
1779 /* portability. Restricted to 6 args and */
1780 /* 1K total output length. */
1781 /* (We use sprintf. Hopefully that doesn't */
1782 /* allocate for long arguments.) */
1783 # define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
1784 # define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
1785 # define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
1786 # define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
1787 # define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
1788 (long)d, 0l, 0l)
1789 # define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
1790 (long)d, (long)e, 0l)
1791 # define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
1792 (long)d, (long)e, (long)g)
1794 void GC_err_printf(/* format, a, b, c, d, e, f */);
1795 # define GC_err_printf0(f) GC_err_puts(f)
1796 # define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
1797 # define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
1798 # define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
1799 0l, 0l, 0l)
1800 # define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
1801 (long)c, (long)d, 0l, 0l)
1802 # define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
1803 (long)c, (long)d, \
1804 (long)e, 0l)
1805 # define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
1806 (long)c, (long)d, \
1807 (long)e, (long)g)
1808 /* Ditto, writes to stderr. */
1810 void GC_err_puts(/* char *s */);
1811 /* Write s to stderr, don't buffer, don't add */
1812 /* newlines, don't ... */
1815 # ifdef GC_ASSERTIONS
1816 # define GC_ASSERT(expr) if(!(expr)) {\
1817 GC_err_printf2("Assertion failure: %s:%ld\n", \
1818 __FILE__, (unsigned long)__LINE__); \
1819 ABORT("assertion failure"); }
1820 # else
1821 # define GC_ASSERT(expr)
1822 # endif
1824 # endif /* GC_PRIVATE_H */