2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
49 # include <sys/types.h>
50 # if !defined(MSWIN32) && !defined(SUNOS4)
58 /* Blatantly OS dependent routines, except for those that are related */
59 /* to dynamic loading. */
61 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
62 # define NEED_FIND_LIMIT
65 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
66 # define NEED_FIND_LIMIT
69 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
70 # define NEED_FIND_LIMIT
73 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
74 # define NEED_FIND_LIMIT
77 # if defined(LINUX) && \
78 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
80 # define NEED_FIND_LIMIT
83 #ifdef NEED_FIND_LIMIT
88 # include <machine/trap.h>
92 # include <proto/exec.h>
93 # include <proto/dos.h>
94 # include <dos/dosextens.h>
95 # include <workbench/startup.h>
99 # define WIN32_LEAN_AND_MEAN
101 # include <windows.h>
105 # include <Processes.h>
109 # include <sys/uio.h>
110 # include <malloc.h> /* for locking */
113 # include <sys/types.h>
114 # include <sys/mman.h>
115 # include <sys/stat.h>
120 # include <sys/siginfo.h>
123 # define setjmp(env) sigsetjmp(env, 1)
124 # define longjmp(env, val) siglongjmp(env, val)
125 # define jmp_buf sigjmp_buf
129 /* Apparently necessary for djgpp 2.01. May casuse problems with */
130 /* other versions. */
131 typedef long unsigned int caddr_t
;
135 # include "il/PCR_IL.h"
136 # include "th/PCR_ThCtl.h"
137 # include "mm/PCR_MM.h"
140 #if !defined(NO_EXECUTE_PERMISSION)
141 # define OPT_PROT_EXEC PROT_EXEC
143 # define OPT_PROT_EXEC 0
146 #if defined(SEARCH_FOR_DATA_START)
147 /* The following doesn't work if the GC is in a dynamic library. */
148 /* The I386 case can be handled without a search. The Alpha case */
149 /* used to be handled differently as well, but the rules changed */
150 /* for recent Linux versions. This seems to be the easiest way to */
151 /* cover all versions. */
154 extern char * GC_copyright
[]; /* Any data symbol would do. */
156 void GC_init_linux_data_start()
158 extern ptr_t
GC_find_limit();
160 GC_data_start
= GC_find_limit((ptr_t
)GC_copyright
, FALSE
);
166 # ifndef ECOS_GC_MEMORY_SIZE
167 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
168 # endif /* ECOS_GC_MEMORY_SIZE */
170 // setjmp() function, as described in ANSI para 7.6.1.1
171 #define setjmp( __env__ ) hal_setjmp( __env__ )
173 // FIXME: This is a simple way of allocating memory which is
174 // compatible with ECOS early releases. Later releases use a more
175 // sophisticated means of allocating memory than this simple static
176 // allocator, but this method is at least bound to work.
177 static char memory
[ECOS_GC_MEMORY_SIZE
];
178 static char *brk
= memory
;
180 static void *tiny_sbrk(ptrdiff_t increment
)
186 if (brk
> memory
+ sizeof memory
)
194 #define sbrk tiny_sbrk
201 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
204 unsigned short magic_number
;
205 unsigned short padding
[29];
209 #define E_MAGIC(x) (x).magic_number
210 #define EMAGIC 0x5A4D
211 #define E_LFANEW(x) (x).new_exe_offset
214 unsigned char magic_number
[2];
215 unsigned char byte_order
;
216 unsigned char word_order
;
217 unsigned long exe_format_level
;
220 unsigned long padding1
[13];
221 unsigned long object_table_offset
;
222 unsigned long object_count
;
223 unsigned long padding2
[31];
226 #define E32_MAGIC1(x) (x).magic_number[0]
227 #define E32MAGIC1 'L'
228 #define E32_MAGIC2(x) (x).magic_number[1]
229 #define E32MAGIC2 'X'
230 #define E32_BORDER(x) (x).byte_order
232 #define E32_WORDER(x) (x).word_order
234 #define E32_CPU(x) (x).cpu
236 #define E32_OBJTAB(x) (x).object_table_offset
237 #define E32_OBJCNT(x) (x).object_count
243 unsigned long pagemap
;
244 unsigned long mapsize
;
245 unsigned long reserved
;
248 #define O32_FLAGS(x) (x).flags
249 #define OBJREAD 0x0001L
250 #define OBJWRITE 0x0002L
251 #define OBJINVALID 0x0080L
252 #define O32_SIZE(x) (x).size
253 #define O32_BASE(x) (x).base
255 # else /* IBM's compiler */
257 /* A kludge to get around what appears to be a header file bug */
259 # define WORD unsigned short
262 # define DWORD unsigned long
269 # endif /* __IBMC__ */
271 # define INCL_DOSEXCEPTIONS
272 # define INCL_DOSPROCESS
273 # define INCL_DOSERRORS
274 # define INCL_DOSMODULEMGR
275 # define INCL_DOSMEMMGR
279 /* Disable and enable signals during nontrivial allocations */
281 void GC_disable_signals(void)
285 DosEnterMustComplete(&nest
);
286 if (nest
!= 1) ABORT("nested GC_disable_signals");
289 void GC_enable_signals(void)
293 DosExitMustComplete(&nest
);
294 if (nest
!= 0) ABORT("GC_enable_signals");
300 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
301 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
302 && !defined(NO_SIGSET)
304 # if defined(sigmask) && !defined(UTS4)
305 /* Use the traditional BSD interface */
306 # define SIGSET_T int
307 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
308 # define SIG_FILL(set) (set) = 0x7fffffff
309 /* Setting the leading bit appears to provoke a bug in some */
310 /* longjmp implementations. Most systems appear not to have */
312 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
314 /* Use POSIX/SYSV interface */
315 # define SIGSET_T sigset_t
316 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
317 # define SIG_FILL(set) sigfillset(&set)
318 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
321 static GC_bool mask_initialized
= FALSE
;
323 static SIGSET_T new_mask
;
325 static SIGSET_T old_mask
;
327 static SIGSET_T dummy
;
329 #if defined(PRINTSTATS) && !defined(THREADS)
330 # define CHECK_SIGNALS
331 int GC_sig_disabled
= 0;
334 void GC_disable_signals()
336 if (!mask_initialized
) {
339 SIG_DEL(new_mask
, SIGSEGV
);
340 SIG_DEL(new_mask
, SIGILL
);
341 SIG_DEL(new_mask
, SIGQUIT
);
343 SIG_DEL(new_mask
, SIGBUS
);
346 SIG_DEL(new_mask
, SIGIOT
);
349 SIG_DEL(new_mask
, SIGEMT
);
352 SIG_DEL(new_mask
, SIGTRAP
);
354 mask_initialized
= TRUE
;
356 # ifdef CHECK_SIGNALS
357 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
360 SIGSETMASK(old_mask
,new_mask
);
363 void GC_enable_signals()
365 # ifdef CHECK_SIGNALS
366 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
369 SIGSETMASK(dummy
,old_mask
);
376 /* Ivan Demakov: simplest way (to me) */
377 #if defined (DOS4GW) || defined (NO_SIGSET)
378 void GC_disable_signals() { }
379 void GC_enable_signals() { }
382 /* Find the page size */
386 void GC_setpagesize()
390 GetSystemInfo(&sysinfo
);
391 GC_page_size
= sysinfo
.dwPageSize
;
395 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
396 || defined(USE_MUNMAP)
397 void GC_setpagesize()
399 GC_page_size
= GETPAGESIZE();
402 /* It's acceptable to fake it. */
403 void GC_setpagesize()
405 GC_page_size
= HBLKSIZE
;
411 * Find the base of the stack.
412 * Used only in single-threaded environment.
413 * With threads, GC_mark_roots needs to know how to do this.
414 * Called with allocator lock held.
417 # define is_writable(prot) ((prot) == PAGE_READWRITE \
418 || (prot) == PAGE_WRITECOPY \
419 || (prot) == PAGE_EXECUTE_READWRITE \
420 || (prot) == PAGE_EXECUTE_WRITECOPY)
421 /* Return the number of bytes that are writable starting at p. */
422 /* The pointer p is assumed to be page aligned. */
423 /* If base is not 0, *base becomes the beginning of the */
424 /* allocation region containing p. */
425 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
427 MEMORY_BASIC_INFORMATION buf
;
431 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
432 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
433 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
434 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
435 if (!is_writable(protect
)) {
438 if (buf
.State
!= MEM_COMMIT
) return(0);
439 return(buf
.RegionSize
);
442 ptr_t
GC_get_stack_base()
445 ptr_t sp
= (ptr_t
)(&dummy
);
446 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
447 word size
= GC_get_writable_length(trunc_sp
, 0);
449 return(trunc_sp
+ size
);
457 ptr_t
GC_get_stack_base()
462 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
463 GC_err_printf0("DosGetInfoBlocks failed\n");
464 ABORT("DosGetInfoBlocks failed\n");
466 return((ptr_t
)(ptib
-> tib_pstacklimit
));
473 ptr_t
GC_get_stack_base()
475 struct Process
*proc
= (struct Process
*)SysBase
->ThisTask
;
477 /* Reference: Amiga Guru Book Pages: 42,567,574 */
478 if (proc
->pr_Task
.tc_Node
.ln_Type
==NT_PROCESS
479 && proc
->pr_CLI
!= NULL
) {
480 /* first ULONG is StackSize */
481 /*longPtr = proc->pr_ReturnAddr;
484 return (char *)proc
->pr_ReturnAddr
+ sizeof(ULONG
);
486 return (char *)proc
->pr_Task
.tc_SPUpper
;
490 #if 0 /* old version */
491 ptr_t
GC_get_stack_base()
493 extern struct WBStartup
*_WBenchMsg
;
497 struct Process
*proc
;
498 struct CommandLineInterface
*cli
;
501 if ((task
= FindTask(0)) == 0) {
502 GC_err_puts("Cannot find own task structure\n");
503 ABORT("task missing");
505 proc
= (struct Process
*)task
;
506 cli
= BADDR(proc
->pr_CLI
);
508 if (_WBenchMsg
!= 0 || cli
== 0) {
509 size
= (char *)task
->tc_SPUpper
- (char *)task
->tc_SPLower
;
511 size
= cli
->cli_DefaultStack
* 4;
513 return (ptr_t
)(__base
+ GC_max(size
, __stack
));
517 # else /* !AMIGA, !OS2, ... */
519 # ifdef NEED_FIND_LIMIT
520 /* Some tools to implement HEURISTIC2 */
521 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
522 /* static */ jmp_buf GC_jmp_buf
;
525 void GC_fault_handler(sig
)
528 longjmp(GC_jmp_buf
, 1);
532 typedef void (*handler
)(int);
534 typedef void (*handler
)();
537 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
538 static struct sigaction old_segv_act
;
539 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
540 static struct sigaction old_bus_act
;
543 static handler old_segv_handler
, old_bus_handler
;
546 void GC_setup_temporary_fault_handler()
549 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
550 struct sigaction act
;
552 act
.sa_handler
= GC_fault_handler
;
553 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
554 /* The presence of SA_NODEFER represents yet another gross */
555 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
556 /* interact correctly with -lthread. We hide the confusion */
557 /* by making sure that signal handling doesn't affect the */
560 (void) sigemptyset(&act
.sa_mask
);
562 /* Older versions have a bug related to retrieving and */
563 /* and setting a handler at the same time. */
564 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
565 (void) sigaction(SIGSEGV
, &act
, 0);
567 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
568 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
570 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
571 /* Pthreads doesn't exist under Irix 5.x, so we */
572 /* don't have to worry in the threads case. */
573 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
575 # endif /* IRIX_THREADS */
577 old_segv_handler
= signal(SIGSEGV
, GC_fault_handler
);
579 old_bus_handler
= signal(SIGBUS
, GC_fault_handler
);
585 void GC_reset_fault_handler()
588 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
589 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
590 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
592 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
595 (void) signal(SIGSEGV
, old_segv_handler
);
597 (void) signal(SIGBUS
, old_bus_handler
);
603 /* Return the first nonaddressible location > p (up) or */
604 /* the smallest location q s.t. [q,p] is addressible (!up). */
605 ptr_t
GC_find_limit(p
, up
)
610 static VOLATILE ptr_t result
;
611 /* Needs to be static, since otherwise it may not be */
612 /* preserved across the longjmp. Can safely be */
613 /* static since it's only called once, with the */
614 /* allocation lock held. */
617 GC_setup_temporary_fault_handler();
618 if (setjmp(GC_jmp_buf
) == 0) {
619 result
= (ptr_t
)(((word
)(p
))
620 & ~(MIN_PAGE_SIZE
-1));
623 result
+= MIN_PAGE_SIZE
;
625 result
-= MIN_PAGE_SIZE
;
627 GC_noop1((word
)(*result
));
630 GC_reset_fault_handler();
632 result
+= MIN_PAGE_SIZE
;
643 #ifdef LINUX_STACKBOTTOM
645 #include <sys/types.h>
646 #include <sys/stat.h>
649 # define STAT_SKIP 27 /* Number of fields preceding startstack */
650 /* field in /proc/self/stat */
652 ptr_t
GC_linux_stack_base(void)
654 /* We read the stack base value from /proc/self/stat. We do this */
655 /* using direct I/O system calls in order to avoid calling malloc */
656 /* in case REDIRECT_MALLOC is defined. */
657 # define STAT_BUF_SIZE 4096
659 # define STAT_READ __real_read
661 # define STAT_READ read
663 char stat_buf
[STAT_BUF_SIZE
];
667 size_t i
, buf_offset
= 0;
669 f
= open("/proc/self/stat", O_RDONLY
);
670 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
671 ABORT("Couldn't read /proc/self/stat");
673 c
= stat_buf
[buf_offset
++];
674 /* Skip the required number of fields. This number is hopefully */
675 /* constant across all Linux implementations. */
676 for (i
= 0; i
< STAT_SKIP
; ++i
) {
677 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
678 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
680 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
684 c
= stat_buf
[buf_offset
++];
687 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
688 return (ptr_t
)result
;
691 #endif /* LINUX_STACKBOTTOM */
693 ptr_t
GC_get_stack_base()
698 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
700 # if defined(STACKBASE)
701 extern ptr_t STACKBASE
;
708 # ifdef STACK_GROWS_DOWN
709 result
= (ptr_t
)((((word
)(&dummy
))
710 + STACKBOTTOM_ALIGNMENT_M1
)
711 & ~STACKBOTTOM_ALIGNMENT_M1
);
713 result
= (ptr_t
)(((word
)(&dummy
))
714 & ~STACKBOTTOM_ALIGNMENT_M1
);
716 # endif /* HEURISTIC1 */
717 # ifdef LINUX_STACKBOTTOM
718 result
= GC_linux_stack_base();
721 # ifdef STACK_GROWS_DOWN
722 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
723 # ifdef HEURISTIC2_LIMIT
724 if (result
> HEURISTIC2_LIMIT
725 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
726 result
= HEURISTIC2_LIMIT
;
730 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
731 # ifdef HEURISTIC2_LIMIT
732 if (result
< HEURISTIC2_LIMIT
733 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
734 result
= HEURISTIC2_LIMIT
;
739 # endif /* HEURISTIC2 */
740 # ifdef STACK_GROWS_DOWN
741 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
744 # endif /* STACKBOTTOM */
745 # endif /* STACKBASE */
749 # endif /* ! AMIGA */
751 # endif /* ! MSWIN32 */
754 * Register static data segment(s) as roots.
755 * If more data segments are added later then they need to be registered
756 * add that point (as we do with SunOS dynamic loading),
757 * or GC_mark_roots needs to check for them (as we do with PCR).
758 * Called with allocator lock held.
763 void GC_register_data_segments()
767 HMODULE module_handle
;
771 struct exe_hdr hdrdos
; /* MSDOS header. */
772 struct e32_exe hdr386
; /* Real header for my executable */
773 struct o32_obj seg
; /* Currrent segment */
777 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
778 GC_err_printf0("DosGetInfoBlocks failed\n");
779 ABORT("DosGetInfoBlocks failed\n");
781 module_handle
= ppib
-> pib_hmte
;
782 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
783 GC_err_printf0("DosQueryModuleName failed\n");
784 ABORT("DosGetInfoBlocks failed\n");
786 myexefile
= fopen(path
, "rb");
787 if (myexefile
== 0) {
788 GC_err_puts("Couldn't open executable ");
789 GC_err_puts(path
); GC_err_puts("\n");
790 ABORT("Failed to open executable\n");
792 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
793 GC_err_puts("Couldn't read MSDOS header from ");
794 GC_err_puts(path
); GC_err_puts("\n");
795 ABORT("Couldn't read MSDOS header");
797 if (E_MAGIC(hdrdos
) != EMAGIC
) {
798 GC_err_puts("Executable has wrong DOS magic number: ");
799 GC_err_puts(path
); GC_err_puts("\n");
800 ABORT("Bad DOS magic number");
802 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
803 GC_err_puts("Seek to new header failed in ");
804 GC_err_puts(path
); GC_err_puts("\n");
805 ABORT("Bad DOS magic number");
807 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
808 GC_err_puts("Couldn't read MSDOS header from ");
809 GC_err_puts(path
); GC_err_puts("\n");
810 ABORT("Couldn't read OS/2 header");
812 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
813 GC_err_puts("Executable has wrong OS/2 magic number:");
814 GC_err_puts(path
); GC_err_puts("\n");
815 ABORT("Bad OS/2 magic number");
817 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
818 GC_err_puts("Executable %s has wrong byte order: ");
819 GC_err_puts(path
); GC_err_puts("\n");
820 ABORT("Bad byte order");
822 if ( E32_CPU(hdr386
) == E32CPU286
) {
823 GC_err_puts("GC can't handle 80286 executables: ");
824 GC_err_puts(path
); GC_err_puts("\n");
827 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
829 GC_err_puts("Seek to object table failed: ");
830 GC_err_puts(path
); GC_err_puts("\n");
831 ABORT("Seek to object table failed");
833 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
835 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
836 GC_err_puts("Couldn't read obj table entry from ");
837 GC_err_puts(path
); GC_err_puts("\n");
838 ABORT("Couldn't read obj table entry");
840 flags
= O32_FLAGS(seg
);
841 if (!(flags
& OBJWRITE
)) continue;
842 if (!(flags
& OBJREAD
)) continue;
843 if (flags
& OBJINVALID
) {
844 GC_err_printf0("Object with invalid pages?\n");
847 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
854 /* Unfortunately, we have to handle win32s very differently from NT, */
855 /* Since VirtualQuery has very different semantics. In particular, */
856 /* under win32s a VirtualQuery call on an unmapped page returns an */
857 /* invalid result. Under GC_register_data_segments is a noop and */
858 /* all real work is done by GC_register_dynamic_libraries. Under */
859 /* win32s, we cannot find the data segments associated with dll's. */
860 /* We rgister the main data segment here. */
861 GC_bool GC_win32s
= FALSE
; /* We're running under win32s. */
863 GC_bool
GC_is_win32s()
865 DWORD v
= GetVersion();
867 /* Check that this is not NT, and Windows major version <= 3 */
868 return ((v
& 0x80000000) && (v
& 0xff) <= 3);
873 GC_win32s
= GC_is_win32s();
876 /* Return the smallest address a such that VirtualQuery */
877 /* returns correct results for all addresses between a and start. */
878 /* Assumes VirtualQuery returns correct information for start. */
879 ptr_t
GC_least_described_address(ptr_t start
)
881 MEMORY_BASIC_INFORMATION buf
;
888 GetSystemInfo(&sysinfo
);
889 limit
= sysinfo
.lpMinimumApplicationAddress
;
890 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
892 q
= (LPVOID
)(p
- GC_page_size
);
893 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
894 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
895 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
896 p
= (ptr_t
)(buf
.AllocationBase
);
901 /* Is p the start of either the malloc heap, or of one of our */
903 GC_bool
GC_is_heap_base (ptr_t p
)
908 # ifndef REDIRECT_MALLOC
909 static ptr_t malloc_heap_pointer
= 0;
911 if (0 == malloc_heap_pointer
) {
912 MEMORY_BASIC_INFORMATION buf
;
913 register DWORD result
= VirtualQuery(malloc(1), &buf
, sizeof(buf
));
915 if (result
!= sizeof(buf
)) {
916 ABORT("Weird VirtualQuery result");
918 malloc_heap_pointer
= (ptr_t
)(buf
.AllocationBase
);
920 if (p
== malloc_heap_pointer
) return(TRUE
);
922 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
923 if (GC_heap_bases
[i
] == p
) return(TRUE
);
928 void GC_register_root_section(ptr_t static_root
)
930 MEMORY_BASIC_INFORMATION buf
;
936 char * limit
, * new_limit
;
938 if (!GC_win32s
) return;
939 p
= base
= limit
= GC_least_described_address(static_root
);
940 GetSystemInfo(&sysinfo
);
941 while (p
< sysinfo
.lpMaximumApplicationAddress
) {
942 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
943 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
944 || GC_is_heap_base(buf
.AllocationBase
)) break;
945 new_limit
= (char *)p
+ buf
.RegionSize
;
946 protect
= buf
.Protect
;
947 if (buf
.State
== MEM_COMMIT
948 && is_writable(protect
)) {
949 if ((char *)p
== limit
) {
952 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
957 if (p
> (LPVOID
)new_limit
/* overflow */) break;
958 p
= (LPVOID
)new_limit
;
960 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
963 void GC_register_data_segments()
967 GC_register_root_section((ptr_t
)(&dummy
));
972 void GC_register_data_segments()
974 struct Process
*proc
;
975 struct CommandLineInterface
*cli
;
984 GC_bool found_segment
= FALSE
;
985 extern char __data_size
[];
987 dataSegSize
=__data_size
+8;
988 /* Can`t find the Location of __data_size, because
989 it`s possible that is it, inside the segment. */
993 proc
= (struct Process
*)SysBase
->ThisTask
;
995 /* Reference: Amiga Guru Book Pages: 538ff,565,573
997 if (proc
->pr_Task
.tc_Node
.ln_Type
==NT_PROCESS
) {
998 if (proc
->pr_CLI
== NULL
) {
999 myseglist
= proc
->pr_SegList
;
1001 /* ProcLoaded 'Loaded as a command: '*/
1002 cli
= BADDR(proc
->pr_CLI
);
1003 myseglist
= cli
->cli_Module
;
1006 ABORT("Not a Process.");
1009 if (myseglist
== NULL
) {
1010 ABORT("Arrrgh.. can't find segments, aborting");
1013 /* xoper hunks Shell Process */
1016 for (data
= (ULONG
*)BADDR(myseglist
); data
!= NULL
;
1017 data
= (ULONG
*)BADDR(data
[0])) {
1018 if (((ULONG
) GC_register_data_segments
< (ULONG
) &data
[1]) ||
1019 ((ULONG
) GC_register_data_segments
> (ULONG
) &data
[1] + data
[-1])) {
1021 if (dataSegSize
== data
[-1]) {
1022 found_segment
= TRUE
;
1025 GC_add_roots_inner((char *)&data
[1],
1026 ((char *)&data
[1]) + data
[-1], FALSE
);
1031 if (!found_segment
) {
1032 ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
1037 #if 0 /* old version */
1038 void GC_register_data_segments()
1040 extern struct WBStartup
*_WBenchMsg
;
1041 struct Process
*proc
;
1042 struct CommandLineInterface
*cli
;
1046 if ( _WBenchMsg
!= 0 ) {
1047 if ((myseglist
= _WBenchMsg
->sm_Segment
) == 0) {
1048 GC_err_puts("No seglist from workbench\n");
1052 if ((proc
= (struct Process
*)FindTask(0)) == 0) {
1053 GC_err_puts("Cannot find process structure\n");
1056 if ((cli
= BADDR(proc
->pr_CLI
)) == 0) {
1057 GC_err_puts("No CLI\n");
1060 if ((myseglist
= cli
->cli_Module
) == 0) {
1061 GC_err_puts("No seglist from CLI\n");
1066 for (data
= (ULONG
*)BADDR(myseglist
); data
!= 0;
1067 data
= (ULONG
*)BADDR(data
[0])) {
1068 # ifdef AMIGA_SKIP_SEG
1069 if (((ULONG
) GC_register_data_segments
< (ULONG
) &data
[1]) ||
1070 ((ULONG
) GC_register_data_segments
> (ULONG
) &data
[1] + data
[-1])) {
1073 # endif /* AMIGA_SKIP_SEG */
1074 GC_add_roots_inner((char *)&data
[1],
1075 ((char *)&data
[1]) + data
[-1], FALSE
);
1079 #endif /* old version */
1084 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1085 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1086 char * GC_SysVGetDataStart(max_page_size
, etext_addr
)
1090 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1091 & ~(sizeof(word
) - 1);
1092 /* etext rounded to word boundary */
1093 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1094 & ~((word
)max_page_size
- 1));
1095 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1096 VOLATILE
char * result
= (char *)(next_page
+ page_offset
);
1097 /* Note that this isnt equivalent to just adding */
1098 /* max_page_size to &etext if &etext is at a page boundary */
1100 GC_setup_temporary_fault_handler();
1101 if (setjmp(GC_jmp_buf
) == 0) {
1102 /* Try writing to the address. */
1104 GC_reset_fault_handler();
1106 GC_reset_fault_handler();
1107 /* We got here via a longjmp. The address is not readable. */
1108 /* This is known to happen under Solaris 2.4 + gcc, which place */
1109 /* string constants in the text segment, but after etext. */
1110 /* Use plan B. Note that we now know there is a gap between */
1111 /* text and data segments, so plan A bought us something. */
1112 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
) - MIN_PAGE_SIZE
, FALSE
);
1114 return((char *)result
);
1119 void GC_register_data_segments()
1121 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1123 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1124 /* As of Solaris 2.3, the Solaris threads implementation */
1125 /* allocates the data structure for the initial thread with */
1126 /* sbrk at process startup. It needs to be scanned, so that */
1127 /* we don't lose some malloc allocated data structures */
1128 /* hanging from it. We're on thin ice here ... */
1129 extern caddr_t
sbrk();
1131 GC_add_roots_inner(DATASTART
, (char *)sbrk(0), FALSE
);
1133 GC_add_roots_inner(DATASTART
, (char *)(DATAEND
), FALSE
);
1136 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1137 GC_add_roots_inner(DATASTART
, (char *) get_end(), FALSE
);
1141 # if defined(THINK_C)
1142 extern void* GC_MacGetDataStart(void);
1143 /* globals begin above stack and end at a5. */
1144 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1145 (ptr_t
)LMGetCurrentA5(), FALSE
);
1147 # if defined(__MWERKS__)
1149 extern void* GC_MacGetDataStart(void);
1150 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1151 # if __option(far_data)
1152 extern void* GC_MacGetDataEnd(void);
1154 /* globals begin above stack and end at a5. */
1155 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1156 (ptr_t
)LMGetCurrentA5(), FALSE
);
1157 /* MATTHEW: Handle Far Globals */
1158 # if __option(far_data)
1159 /* Far globals follow he QD globals: */
1160 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1161 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1164 extern char __data_start__
[], __data_end__
[];
1165 GC_add_roots_inner((ptr_t
)&__data_start__
,
1166 (ptr_t
)&__data_end__
, FALSE
);
1167 # endif /* __POWERPC__ */
1168 # endif /* __MWERKS__ */
1169 # endif /* !THINK_C */
1173 /* Dynamic libraries are added at every collection, since they may */
1177 # endif /* ! AMIGA */
1178 # endif /* ! MSWIN32 */
1182 * Auxiliary routines for obtaining memory from OS.
1185 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1186 && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1189 extern caddr_t
sbrk();
1192 # define SBRK_ARG_T ptrdiff_t
1194 # define SBRK_ARG_T int
1198 /* The compiler seems to generate speculative reads one past the end of */
1199 /* an allocated object. Hence we need to make sure that the page */
1200 /* following the last heap page is also mapped. */
1201 ptr_t
GC_unix_get_mem(bytes
)
1204 caddr_t cur_brk
= (caddr_t
)sbrk(0);
1206 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1207 static caddr_t my_brk_val
= 0;
1209 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1211 if((caddr_t
)(sbrk(GC_page_size
- lsbs
)) == (caddr_t
)(-1)) return(0);
1213 if (cur_brk
== my_brk_val
) {
1214 /* Use the extra block we allocated last time. */
1215 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1216 if (result
== (caddr_t
)(-1)) return(0);
1217 result
-= GC_page_size
;
1219 result
= (ptr_t
)sbrk(GC_page_size
+ (SBRK_ARG_T
)bytes
);
1220 if (result
== (caddr_t
)(-1)) return(0);
1222 my_brk_val
= result
+ bytes
+ GC_page_size
; /* Always page aligned */
1223 return((ptr_t
)result
);
1226 #else /* Not RS6000 */
1228 #if defined(USE_MMAP)
1229 /* Tested only under IRIX5 and Solaris 2 */
1231 #ifdef USE_MMAP_FIXED
1232 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1233 /* Seems to yield better performance on Solaris 2, but can */
1234 /* be unreliable if something is already mapped at the address. */
1236 # define GC_MMAP_FLAGS MAP_PRIVATE
1239 ptr_t
GC_unix_get_mem(bytes
)
1242 static GC_bool initialized
= FALSE
;
1245 static ptr_t last_addr
= HEAP_START
;
1248 fd
= open("/dev/zero", O_RDONLY
);
1251 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1252 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1253 GC_MMAP_FLAGS
, fd
, 0/* offset */);
1254 if (result
== MAP_FAILED
) return(0);
1255 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1256 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1257 return((ptr_t
)result
);
1260 #else /* Not RS6000, not USE_MMAP */
1261 ptr_t
GC_unix_get_mem(bytes
)
1266 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1267 /* The equivalent may be needed on other systems as well. */
1271 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1272 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1274 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1276 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) return(0);
1278 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1279 if (result
== (ptr_t
)(-1)) result
= 0;
1287 #endif /* Not USE_MMAP */
1288 #endif /* Not RS6000 */
1294 void * os2_alloc(size_t bytes
)
1298 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1299 PAG_WRITE
| PAG_COMMIT
)
1303 if (result
== 0) return(os2_alloc(bytes
));
1311 word GC_n_heap_bases
= 0;
1313 ptr_t
GC_win32_get_mem(bytes
)
1319 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1320 /* There are also unconfirmed rumors of other */
1321 /* problems, so we dodge the issue. */
1322 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1323 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
) & ~(HBLKSIZE
-1));
1325 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
,
1326 MEM_COMMIT
| MEM_RESERVE
,
1327 PAGE_EXECUTE_READWRITE
);
1329 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1330 /* If I read the documentation correctly, this can */
1331 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1332 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1333 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1337 void GC_win32_free_heap ()
1340 while (GC_n_heap_bases
> 0) {
1341 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1342 GC_heap_bases
[GC_n_heap_bases
] = 0;
1352 /* For now, this only works on some Unix-like systems. If you */
1353 /* have something else, don't define USE_MUNMAP. */
1354 /* We assume ANSI C to support this feature. */
1356 #include <sys/mman.h>
1357 #include <sys/stat.h>
1358 #include <sys/types.h>
1361 /* Compute a page aligned starting address for the unmap */
1362 /* operation on a block of size bytes starting at start. */
1363 /* Return 0 if the block is too small to make this feasible. */
1364 ptr_t
GC_unmap_start(ptr_t start
, word bytes
)
1366 ptr_t result
= start
;
1367 /* Round start to next page boundary. */
1368 result
+= GC_page_size
- 1;
1369 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
1370 if (result
+ GC_page_size
> start
+ bytes
) return 0;
1374 /* Compute end address for an unmap operation on the indicated */
1376 ptr_t
GC_unmap_end(ptr_t start
, word bytes
)
1378 ptr_t end_addr
= start
+ bytes
;
1379 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
1383 /* We assume that GC_remap is called on exactly the same range */
1384 /* as a previous call to GC_unmap. It is safe to consistently */
1385 /* round the endpoints in both places. */
1386 void GC_unmap(ptr_t start
, word bytes
)
1388 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1389 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1390 word len
= end_addr
- start_addr
;
1391 if (0 == start_addr
) return;
1392 if (munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1393 GC_unmapped_bytes
+= len
;
1397 void GC_remap(ptr_t start
, word bytes
)
1399 static int zero_descr
= -1;
1400 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1401 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1402 word len
= end_addr
- start_addr
;
1405 if (-1 == zero_descr
) zero_descr
= open("/dev/zero", O_RDWR
);
1406 if (0 == start_addr
) return;
1407 result
= mmap(start_addr
, len
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1408 MAP_FIXED
| MAP_PRIVATE
, zero_descr
, 0);
1409 if (result
!= start_addr
) {
1410 ABORT("mmap remapping failed");
1412 GC_unmapped_bytes
-= len
;
1415 /* Two adjacent blocks have already been unmapped and are about to */
1416 /* be merged. Unmap the whole block. This typically requires */
1417 /* that we unmap a small section in the middle that was not previously */
1418 /* unmapped due to alignment constraints. */
1419 void GC_unmap_gap(ptr_t start1
, word bytes1
, ptr_t start2
, word bytes2
)
1421 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
1422 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
1423 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
1424 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
1425 ptr_t start_addr
= end1_addr
;
1426 ptr_t end_addr
= start2_addr
;
1428 GC_ASSERT(start1
+ bytes1
== start2
);
1429 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
1430 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
1431 if (0 == start_addr
) return;
1432 len
= end_addr
- start_addr
;
1433 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1434 GC_unmapped_bytes
+= len
;
1437 #endif /* USE_MUNMAP */
1439 /* Routine for pushing any additional roots. In THREADS */
1440 /* environment, this is also responsible for marking from */
1441 /* thread stacks. In the SRC_M3 case, it also handles */
1442 /* global variables. */
1444 void (*GC_push_other_roots
)() = 0;
1448 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
1450 struct PCR_ThCtl_TInfoRep info
;
1453 info
.ti_stkLow
= info
.ti_stkHi
= 0;
1454 result
= PCR_ThCtl_GetInfo(t
, &info
);
1455 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
1459 /* Push the contents of an old object. We treat this as stack */
1460 /* data only becasue that makes it robust against mark stack */
1462 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
1464 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
1465 return(PCR_ERes_okay
);
1469 void GC_default_push_other_roots()
1471 /* Traverse data allocated by previous memory managers. */
1473 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
1475 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
1478 ABORT("Old object enumeration failed");
1481 /* Traverse all thread stacks. */
1483 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
1484 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1485 ABORT("Thread stack marking failed\n");
1493 # ifdef ALL_INTERIOR_POINTERS
1498 extern void ThreadF__ProcessStacks();
1500 void GC_push_thread_stack(start
, stop
)
1503 GC_push_all_stack((ptr_t
)start
, (ptr_t
)stop
+ sizeof(word
));
1506 /* Push routine with M3 specific calling convention. */
1507 GC_m3_push_root(dummy1
, p
, dummy2
, dummy3
)
1509 ptr_t dummy1
, dummy2
;
1514 if ((ptr_t
)(q
) >= GC_least_plausible_heap_addr
1515 && (ptr_t
)(q
) < GC_greatest_plausible_heap_addr
) {
1516 GC_push_one_checked(q
,FALSE
);
1520 /* M3 set equivalent to RTHeap.TracedRefTypes */
1521 typedef struct { int elts
[1]; } RefTypeSet
;
1522 RefTypeSet GC_TracedRefTypes
= {{0x1}};
1524 /* From finalize.c */
1525 extern void GC_push_finalizer_structures();
1527 /* From stubborn.c: */
1528 # ifdef STUBBORN_ALLOC
1529 extern GC_PTR
* GC_changing_list_start
;
1533 void GC_default_push_other_roots()
1535 /* Use the M3 provided routine for finding static roots. */
1536 /* This is a bit dubious, since it presumes no C roots. */
1537 /* We handle the collector roots explicitly. */
1539 # ifdef STUBBORN_ALLOC
1540 GC_push_one(GC_changing_list_start
);
1542 GC_push_finalizer_structures();
1543 RTMain__GlobalMapProc(GC_m3_push_root
, 0, GC_TracedRefTypes
);
1545 if (GC_words_allocd
> 0) {
1546 ThreadF__ProcessStacks(GC_push_thread_stack
);
1548 /* Otherwise this isn't absolutely necessary, and we have */
1549 /* startup ordering problems. */
1552 # endif /* SRC_M3 */
1554 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1555 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1556 || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1558 extern void GC_push_all_stacks();
1560 void GC_default_push_other_roots()
1562 GC_push_all_stacks();
1565 # endif /* SOLARIS_THREADS || ... */
1567 void (*GC_push_other_roots
)() = GC_default_push_other_roots
;
1572 * Routines for accessing dirty bits on virtual pages.
1573 * We plan to eventaually implement four strategies for doing so:
1574 * DEFAULT_VDB: A simple dummy implementation that treats every page
1575 * as possibly dirty. This makes incremental collection
1576 * useless, but the implementation is still correct.
1577 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1578 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1579 * works under some SVR4 variants. Even then, it may be
1580 * too slow to be entirely satisfactory. Requires reading
1581 * dirty bits for entire address space. Implementations tend
1582 * to assume that the client is a (slow) debugger.
1583 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1584 * dirtied pages. The implementation (and implementability)
1585 * is highly system dependent. This usually fails when system
1586 * calls write to a protected page. We prevent the read system
1587 * call from doing so. It is the clients responsibility to
1588 * make sure that other system calls are similarly protected
1589 * or write only to the stack.
1592 GC_bool GC_dirty_maintained
= FALSE
;
1596 /* All of the following assume the allocation lock is held, and */
1597 /* signals are disabled. */
1599 /* The client asserts that unallocated pages in the heap are never */
1602 /* Initialize virtual dirty bit implementation. */
1603 void GC_dirty_init()
1605 GC_dirty_maintained
= TRUE
;
1608 /* Retrieve system dirty bits for heap to a local buffer. */
1609 /* Restore the systems notion of which pages are dirty. */
1610 void GC_read_dirty()
1613 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1614 /* If the actual page size is different, this returns TRUE if any */
1615 /* of the pages overlapping h are dirty. This routine may err on the */
1616 /* side of labelling pages as dirty (and this implementation does). */
1618 GC_bool
GC_page_was_dirty(h
)
1625 * The following two routines are typically less crucial. They matter
1626 * most with large dynamic libraries, or if we can't accurately identify
1627 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1628 * versions are adequate.
1631 /* Could any valid GC heap pointer ever have been written to this page? */
1633 GC_bool
GC_page_was_ever_dirty(h
)
1639 /* Reset the n pages starting at h to "was never dirty" status. */
1640 void GC_is_fresh(h
, n
)
1646 /* A call hints that h is about to be written. */
1647 /* May speed up some dirty bit implementations. */
1649 void GC_write_hint(h
)
1654 # endif /* DEFAULT_VDB */
1657 # ifdef MPROTECT_VDB
1660 * See DEFAULT_VDB for interface descriptions.
1664 * This implementation maintains dirty bits itself by catching write
1665 * faults and keeping track of them. We assume nobody else catches
1666 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1667 * except as a result of a read system call. This means clients must
1668 * either ensure that system calls do not touch the heap, or must
1669 * provide their own wrappers analogous to the one for read.
1670 * We assume the page size is a multiple of HBLKSIZE.
1671 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1672 * tried to use portable code where easily possible. It is known
1673 * not to work under a number of other systems.
1678 # include <sys/mman.h>
1679 # include <signal.h>
1680 # include <sys/syscall.h>
1682 # define PROTECT(addr, len) \
1683 if (mprotect((caddr_t)(addr), (size_t)(len), \
1684 PROT_READ | OPT_PROT_EXEC) < 0) { \
1685 ABORT("mprotect failed"); \
1687 # define UNPROTECT(addr, len) \
1688 if (mprotect((caddr_t)(addr), (size_t)(len), \
1689 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1690 ABORT("un-mprotect failed"); \
1695 # include <signal.h>
1697 static DWORD protect_junk
;
1698 # define PROTECT(addr, len) \
1699 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1701 DWORD last_error = GetLastError(); \
1702 GC_printf1("Last error code: %lx\n", last_error); \
1703 ABORT("VirtualProtect failed"); \
1705 # define UNPROTECT(addr, len) \
1706 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1708 ABORT("un-VirtualProtect failed"); \
1713 #if defined(SUNOS4) || defined(FREEBSD)
1714 typedef void (* SIG_PF
)();
1716 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1718 typedef void (* SIG_PF
)(int);
1720 typedef void (* SIG_PF
)();
1723 #if defined(MSWIN32)
1724 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF
;
1726 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1729 #if defined(IRIX5) || defined(OSF1)
1730 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1732 #if defined(SUNOS5SIGS)
1734 # define SIGINFO __siginfo
1736 # define SIGINFO siginfo
1739 typedef void (* REAL_SIG_PF
)(int, struct SIGINFO
*, void *);
1741 typedef void (* REAL_SIG_PF
)();
1745 # include <linux/version.h>
1746 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1747 typedef struct sigcontext s_c
;
1749 typedef struct sigcontext_struct s_c
;
1751 # if defined(ALPHA) || defined(M68K)
1752 typedef void (* REAL_SIG_PF
)(int, int, s_c
*);
1755 typedef void (* REAL_SIG_PF
)(int, siginfo_t
*, s_c
*);
1757 typedef void (* REAL_SIG_PF
)(int, s_c
);
1761 /* Retrieve fault address from sigcontext structure by decoding */
1763 char * get_fault_addr(s_c
*sc
) {
1767 instr
= *((unsigned *)(sc
->sc_pc
));
1768 faultaddr
= sc
->sc_regs
[(instr
>> 16) & 0x1f];
1769 faultaddr
+= (word
) (((int)instr
<< 16) >> 16);
1770 return (char *)faultaddr
;
1772 # endif /* !ALPHA */
1775 SIG_PF GC_old_bus_handler
;
1776 SIG_PF GC_old_segv_handler
; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1779 # if defined (SUNOS4) || defined(FREEBSD)
1780 void GC_write_fault_handler(sig
, code
, scp
, addr
)
1782 struct sigcontext
*scp
;
1785 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1786 # define CODE_OK (FC_CODE(code) == FC_PROT \
1787 || (FC_CODE(code) == FC_OBJERR \
1788 && FC_ERRNO(code) == FC_PROT))
1791 # define SIG_OK (sig == SIGBUS)
1792 # define CODE_OK (code == BUS_PAGE_FAULT)
1795 # if defined(IRIX5) || defined(OSF1)
1797 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
1798 # define SIG_OK (sig == SIGSEGV)
1800 # define CODE_OK (code == 2 /* experimentally determined */)
1803 # define CODE_OK (code == EACCES)
1807 # if defined(ALPHA) || defined(M68K)
1808 void GC_write_fault_handler(int sig
, int code
, s_c
* sc
)
1811 void GC_write_fault_handler(int sig
, siginfo_t
* si
, s_c
* scp
)
1813 void GC_write_fault_handler(int sig
, s_c sc
)
1816 # define SIG_OK (sig == SIGSEGV)
1817 # define CODE_OK TRUE
1818 /* Empirically c.trapno == 14, on IA32, but is that useful? */
1819 /* Should probably consider alignment issues on other */
1820 /* architectures. */
1822 # if defined(SUNOS5SIGS)
1824 void GC_write_fault_handler(int sig
, struct SIGINFO
*scp
, void * context
)
1826 void GC_write_fault_handler(sig
, scp
, context
)
1828 struct SIGINFO
*scp
;
1832 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1833 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
1834 || (scp -> si_code == BUS_ADRERR) \
1835 || (scp -> si_code == BUS_UNKNOWN) \
1836 || (scp -> si_code == SEGV_UNKNOWN) \
1837 || (scp -> si_code == BUS_OBJERR)
1839 # define SIG_OK (sig == SIGSEGV)
1840 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1843 # if defined(MSWIN32)
1844 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
1845 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1846 EXCEPTION_ACCESS_VIOLATION)
1847 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1851 register unsigned i
;
1853 char * addr
= (char *) (size_t) (scp
-> sc_badvaddr
);
1855 # if defined(OSF1) && defined(ALPHA)
1856 char * addr
= (char *) (scp
-> sc_traparg_a0
);
1859 char * addr
= (char *) (scp
-> si_addr
);
1863 char * addr
= (char *) (sc
.cr2
);
1868 struct sigcontext
*scp
= (struct sigcontext
*)(&sc
);
1870 int format
= (scp
->sc_formatvec
>> 12) & 0xf;
1871 unsigned long *framedata
= (unsigned long *)(scp
+ 1);
1874 if (format
== 0xa || format
== 0xb) {
1877 } else if (format
== 7) {
1880 } else if (format
== 4) {
1883 if (framedata
[1] & 0x08000000) {
1884 /* correct addr on misaligned access */
1885 ea
= (ea
+4095)&(~4095);
1891 char * addr
= get_fault_addr(sc
);
1894 char * addr
= si
-> si_addr
;
1895 /* I believe this is claimed to work on all platforms for */
1896 /* Linux 2.3.47 and later. Hopefully we don't have to */
1897 /* worry about earlier kernels on IA64. */
1899 # if defined(POWERPC)
1900 char * addr
= (char *) (sc
.regs
->dar
);
1902 --> architecture
not supported
1909 # if defined(MSWIN32)
1910 char * addr
= (char *) (exc_info
-> ExceptionRecord
1911 -> ExceptionInformation
[1]);
1912 # define sig SIGSEGV
1915 if (SIG_OK
&& CODE_OK
) {
1916 register struct hblk
* h
=
1917 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
1918 GC_bool in_allocd_block
;
1921 /* Address is only within the correct physical page. */
1922 in_allocd_block
= FALSE
;
1923 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
1924 if (HDR(h
+i
) != 0) {
1925 in_allocd_block
= TRUE
;
1929 in_allocd_block
= (HDR(addr
) != 0);
1931 if (!in_allocd_block
) {
1932 /* Heap blocks now begin and end on page boundaries */
1935 if (sig
== SIGSEGV
) {
1936 old_handler
= GC_old_segv_handler
;
1938 old_handler
= GC_old_bus_handler
;
1940 if (old_handler
== SIG_DFL
) {
1942 GC_err_printf1("Segfault at 0x%lx\n", addr
);
1943 ABORT("Unexpected bus error or segmentation fault");
1945 return(EXCEPTION_CONTINUE_SEARCH
);
1948 # if defined (SUNOS4) || defined(FREEBSD)
1949 (*old_handler
) (sig
, code
, scp
, addr
);
1952 # if defined (SUNOS5SIGS)
1953 (*(REAL_SIG_PF
)old_handler
) (sig
, scp
, context
);
1956 # if defined (LINUX)
1957 # if defined(ALPHA) || defined(M68K)
1958 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, sc
);
1961 (*(REAL_SIG_PF
)old_handler
) (sig
, si
, scp
);
1963 (*(REAL_SIG_PF
)old_handler
) (sig
, sc
);
1968 # if defined (IRIX5) || defined(OSF1)
1969 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
1973 return((*old_handler
)(exc_info
));
1977 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
1978 register int index
= PHT_HASH(h
+i
);
1980 set_pht_entry_from_index(GC_dirty_pages
, index
);
1982 UNPROTECT(h
, GC_page_size
);
1983 # if defined(OSF1) || defined(LINUX)
1984 /* These reset the signal handler each time by default. */
1985 signal(SIGSEGV
, (SIG_PF
) GC_write_fault_handler
);
1987 /* The write may not take place before dirty bits are read. */
1988 /* But then we'll fault again ... */
1990 return(EXCEPTION_CONTINUE_EXECUTION
);
1996 return EXCEPTION_CONTINUE_SEARCH
;
1998 GC_err_printf1("Segfault at 0x%lx\n", addr
);
1999 ABORT("Unexpected bus error or segmentation fault");
2004 * We hold the allocation lock. We expect block h to be written
2007 void GC_write_hint(h
)
2010 register struct hblk
* h_trunc
;
2011 register unsigned i
;
2012 register GC_bool found_clean
;
2014 if (!GC_dirty_maintained
) return;
2015 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2016 found_clean
= FALSE
;
2017 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2018 register int index
= PHT_HASH(h_trunc
+i
);
2020 if (!get_pht_entry_from_index(GC_dirty_pages
, index
)) {
2022 set_pht_entry_from_index(GC_dirty_pages
, index
);
2026 UNPROTECT(h_trunc
, GC_page_size
);
2030 void GC_dirty_init()
2032 #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2033 struct sigaction act
, oldact
;
2035 act
.sa_flags
= SA_RESTART
;
2036 act
.sa_handler
= GC_write_fault_handler
;
2038 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2039 act
.sa_sigaction
= GC_write_fault_handler
;
2041 (void)sigemptyset(&act
.sa_mask
);
2044 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2046 GC_dirty_maintained
= TRUE
;
2047 if (GC_page_size
% HBLKSIZE
!= 0) {
2048 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2049 ABORT("Page size not multiple of HBLKSIZE");
2051 # if defined(SUNOS4) || defined(FREEBSD)
2052 GC_old_bus_handler
= signal(SIGBUS
, GC_write_fault_handler
);
2053 if (GC_old_bus_handler
== SIG_IGN
) {
2054 GC_err_printf0("Previously ignored bus error!?");
2055 GC_old_bus_handler
= SIG_DFL
;
2057 if (GC_old_bus_handler
!= SIG_DFL
) {
2059 GC_err_printf0("Replaced other SIGBUS handler\n");
2063 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2064 GC_old_segv_handler
= signal(SIGSEGV
, (SIG_PF
)GC_write_fault_handler
);
2065 if (GC_old_segv_handler
== SIG_IGN
) {
2066 GC_err_printf0("Previously ignored segmentation violation!?");
2067 GC_old_segv_handler
= SIG_DFL
;
2069 if (GC_old_segv_handler
!= SIG_DFL
) {
2071 GC_err_printf0("Replaced other SIGSEGV handler\n");
2075 # if defined(SUNOS5SIGS) || defined(IRIX5)
2076 # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
2077 sigaction(SIGSEGV
, 0, &oldact
);
2078 sigaction(SIGSEGV
, &act
, 0);
2080 sigaction(SIGSEGV
, &act
, &oldact
);
2082 # if defined(_sigargs)
2083 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2085 GC_old_segv_handler
= oldact
.sa_handler
;
2086 # else /* Irix 6.x or SUNOS5SIGS */
2087 if (oldact
.sa_flags
& SA_SIGINFO
) {
2088 GC_old_segv_handler
= (SIG_PF
)(oldact
.sa_sigaction
);
2090 GC_old_segv_handler
= oldact
.sa_handler
;
2093 if (GC_old_segv_handler
== SIG_IGN
) {
2094 GC_err_printf0("Previously ignored segmentation violation!?");
2095 GC_old_segv_handler
= SIG_DFL
;
2097 if (GC_old_segv_handler
!= SIG_DFL
) {
2099 GC_err_printf0("Replaced other SIGSEGV handler\n");
2103 sigaction(SIGBUS
, &act
, &oldact
);
2104 GC_old_bus_handler
= oldact
.sa_handler
;
2105 if (GC_old_segv_handler
!= SIG_DFL
) {
2107 GC_err_printf0("Replaced other SIGBUS handler\n");
2112 # if defined(MSWIN32)
2113 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2114 if (GC_old_segv_handler
!= NULL
) {
2116 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2119 GC_old_segv_handler
= SIG_DFL
;
2126 void GC_protect_heap()
2132 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
2133 start
= GC_heap_sects
[i
].hs_start
;
2134 len
= GC_heap_sects
[i
].hs_bytes
;
2135 PROTECT(start
, len
);
2139 /* We assume that either the world is stopped or its OK to lose dirty */
2140 /* bits while this is happenning (as in GC_enable_incremental). */
2141 void GC_read_dirty()
2143 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2144 (sizeof GC_dirty_pages
));
2145 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2149 GC_bool
GC_page_was_dirty(h
)
2152 register word index
= PHT_HASH(h
);
2154 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2158 * Acquiring the allocation lock here is dangerous, since this
2159 * can be called from within GC_call_with_alloc_lock, and the cord
2160 * package does so. On systems that allow nested lock acquisition, this
2162 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2165 void GC_begin_syscall()
2167 if (!I_HOLD_LOCK()) LOCK();
2170 void GC_end_syscall()
2172 if (!I_HOLD_LOCK()) UNLOCK();
2175 void GC_unprotect_range(addr
, len
)
2179 struct hblk
* start_block
;
2180 struct hblk
* end_block
;
2181 register struct hblk
*h
;
2184 if (!GC_incremental
) return;
2185 obj_start
= GC_base(addr
);
2186 if (obj_start
== 0) return;
2187 if (GC_base(addr
+ len
- 1) != obj_start
) {
2188 ABORT("GC_unprotect_range(range bigger than object)");
2190 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
2191 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
2192 end_block
+= GC_page_size
/HBLKSIZE
- 1;
2193 for (h
= start_block
; h
<= end_block
; h
++) {
2194 register word index
= PHT_HASH(h
);
2196 set_pht_entry_from_index(GC_dirty_pages
, index
);
2198 UNPROTECT(start_block
,
2199 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
2202 #if !defined(MSWIN32) && !defined(LINUX_THREADS)
2203 /* Replacement for UNIX system call. */
2204 /* Other calls that write to the heap */
2205 /* should be handled similarly. */
2206 # if defined(__STDC__) && !defined(SUNOS4)
2207 # include <unistd.h>
2208 # include <sys/uio.h>
2209 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
2212 int read(fd
, buf
, nbyte
)
2214 int GC_read(fd
, buf
, nbyte
)
2224 GC_unprotect_range(buf
, (word
)nbyte
);
2225 # if defined(IRIX5) || defined(LINUX_THREADS)
2226 /* Indirect system call may not always be easily available. */
2227 /* We could call _read, but that would interfere with the */
2228 /* libpthread interception of read. */
2229 /* On Linux, we have to be careful with the linuxthreads */
2230 /* read interception. */
2235 iov
.iov_len
= nbyte
;
2236 result
= readv(fd
, &iov
, 1);
2239 /* The two zero args at the end of this list are because one
2240 IA-64 syscall() implementation actually requires six args
2241 to be passed, even though they aren't always used. */
2242 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
2247 #endif /* !MSWIN32 && !LINUX */
2250 /* We use the GNU ld call wrapping facility. */
2251 /* This requires that the linker be invoked with "--wrap read". */
2252 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2253 /* I'm not sure that this actually wraps whatever version of read */
2254 /* is called by stdio. That code also mentions __read. */
2255 # include <unistd.h>
2256 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2261 GC_unprotect_range(buf
, (word
)nbyte
);
2262 result
= __real_read(fd
, buf
, nbyte
);
2267 /* We should probably also do this for __read, or whatever stdio */
2268 /* actually calls. */
2272 GC_bool
GC_page_was_ever_dirty(h
)
2278 /* Reset the n pages starting at h to "was never dirty" status. */
2280 void GC_is_fresh(h
, n
)
2286 # endif /* MPROTECT_VDB */
2291 * See DEFAULT_VDB for interface descriptions.
2295 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2296 * from which we can read page modified bits. This facility is far from
2297 * optimal (e.g. we would like to get the info for only some of the
2298 * address space), but it avoids intercepting system calls.
2302 #include <sys/types.h>
2303 #include <sys/signal.h>
2304 #include <sys/fault.h>
2305 #include <sys/syscall.h>
2306 #include <sys/procfs.h>
2307 #include <sys/stat.h>
2310 #define INITIAL_BUF_SZ 4096
2311 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
2314 #ifdef SOLARIS_THREADS
2315 /* We don't have exact sp values for threads. So we count on */
2316 /* occasionally declaring stack pages to be fresh. Thus we */
2317 /* need a real implementation of GC_is_fresh. We can't clear */
2318 /* entries in GC_written_pages, since that would declare all */
2319 /* pages with the given hash address to be fresh. */
2320 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2321 struct hblk
** GC_fresh_pages
; /* A direct mapped cache. */
2322 /* Collisions are dropped. */
2324 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2325 # define ADD_FRESH_PAGE(h) \
2326 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2327 # define PAGE_IS_FRESH(h) \
2328 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2331 /* Add all pages in pht2 to pht1 */
2332 void GC_or_pages(pht1
, pht2
)
2333 page_hash_table pht1
, pht2
;
2337 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2342 void GC_dirty_init()
2347 GC_dirty_maintained
= TRUE
;
2348 if (GC_words_allocd
!= 0 || GC_words_allocd_before_gc
!= 0) {
2351 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
2353 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2355 (GC_words_allocd
+ GC_words_allocd_before_gc
));
2358 sprintf(buf
, "/proc/%d", getpid());
2359 fd
= open(buf
, O_RDONLY
);
2361 ABORT("/proc open failed");
2363 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
2365 if (GC_proc_fd
< 0) {
2366 ABORT("/proc ioctl failed");
2368 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
2369 # ifdef SOLARIS_THREADS
2370 GC_fresh_pages
= (struct hblk
**)
2371 GC_scratch_alloc(MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2372 if (GC_fresh_pages
== 0) {
2373 GC_err_printf0("No space for fresh pages\n");
2376 BZERO(GC_fresh_pages
, MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2380 /* Ignore write hints. They don't help us here. */
2382 void GC_write_hint(h
)
2387 #ifdef SOLARIS_THREADS
2388 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2390 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2393 void GC_read_dirty()
2395 unsigned long ps
, np
;
2398 struct prasmap
* map
;
2400 ptr_t current_addr
, limit
;
2404 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
2407 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2409 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2413 /* Retry with larger buffer. */
2414 word new_size
= 2 * GC_proc_buf_size
;
2415 char * new_buf
= GC_scratch_alloc(new_size
);
2418 GC_proc_buf
= bufp
= new_buf
;
2419 GC_proc_buf_size
= new_size
;
2421 if (syscall(SYS_read
, GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2422 WARN("Insufficient space for /proc read\n", 0);
2424 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
2425 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
2426 # ifdef SOLARIS_THREADS
2427 BZERO(GC_fresh_pages
,
2428 MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2434 /* Copy dirty bits into GC_grungy_pages */
2435 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
2436 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2437 nmaps, PG_REFERENCED, PG_MODIFIED); */
2438 bufp
= bufp
+ sizeof(struct prpageheader
);
2439 for (i
= 0; i
< nmaps
; i
++) {
2440 map
= (struct prasmap
*)bufp
;
2441 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
2442 ps
= map
-> pr_pagesize
;
2443 np
= map
-> pr_npage
;
2444 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2445 limit
= vaddr
+ ps
* np
;
2446 bufp
+= sizeof (struct prasmap
);
2447 for (current_addr
= vaddr
;
2448 current_addr
< limit
; current_addr
+= ps
){
2449 if ((*bufp
++) & PG_MODIFIED
) {
2450 register struct hblk
* h
= (struct hblk
*) current_addr
;
2452 while ((ptr_t
)h
< current_addr
+ ps
) {
2453 register word index
= PHT_HASH(h
);
2455 set_pht_entry_from_index(GC_grungy_pages
, index
);
2456 # ifdef SOLARIS_THREADS
2458 register int slot
= FRESH_PAGE_SLOT(h
);
2460 if (GC_fresh_pages
[slot
] == h
) {
2461 GC_fresh_pages
[slot
] = 0;
2469 bufp
+= sizeof(long) - 1;
2470 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
2472 /* Update GC_written_pages. */
2473 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2474 # ifdef SOLARIS_THREADS
2475 /* Make sure that old stacks are considered completely clean */
2476 /* unless written again. */
2477 GC_old_stacks_are_fresh();
2483 GC_bool
GC_page_was_dirty(h
)
2486 register word index
= PHT_HASH(h
);
2487 register GC_bool result
;
2489 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
2490 # ifdef SOLARIS_THREADS
2491 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2492 /* This happens only if page was declared fresh since */
2493 /* the read_dirty call, e.g. because it's in an unused */
2494 /* thread stack. It's OK to treat it as clean, in */
2495 /* that case. And it's consistent with */
2496 /* GC_page_was_ever_dirty. */
2501 GC_bool
GC_page_was_ever_dirty(h
)
2504 register word index
= PHT_HASH(h
);
2505 register GC_bool result
;
2507 result
= get_pht_entry_from_index(GC_written_pages
, index
);
2508 # ifdef SOLARIS_THREADS
2509 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2514 /* Caller holds allocation lock. */
2515 void GC_is_fresh(h
, n
)
2520 register word index
;
2522 # ifdef SOLARIS_THREADS
2525 if (GC_fresh_pages
!= 0) {
2526 for (i
= 0; i
< n
; i
++) {
2527 ADD_FRESH_PAGE(h
+ i
);
2533 # endif /* PROC_VDB */
2538 # include "vd/PCR_VD.h"
2540 # define NPAGES (32*1024) /* 128 MB */
2542 PCR_VD_DB GC_grungy_bits
[NPAGES
];
2544 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
2545 /* HBLKSIZE aligned. */
2547 void GC_dirty_init()
2549 GC_dirty_maintained
= TRUE
;
2550 /* For the time being, we assume the heap generally grows up */
2551 GC_vd_base
= GC_heap_sects
[0].hs_start
;
2552 if (GC_vd_base
== 0) {
2553 ABORT("Bad initial heap segment");
2555 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
2557 ABORT("dirty bit initialization failed");
2561 void GC_read_dirty()
2563 /* lazily enable dirty bits on newly added heap sects */
2565 static int onhs
= 0;
2566 int nhs
= GC_n_heap_sects
;
2567 for( ; onhs
< nhs
; onhs
++ ) {
2568 PCR_VD_WriteProtectEnable(
2569 GC_heap_sects
[onhs
].hs_start
,
2570 GC_heap_sects
[onhs
].hs_bytes
);
2575 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
2577 ABORT("dirty bit read failed");
2581 GC_bool
GC_page_was_dirty(h
)
2584 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
2587 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
2591 void GC_write_hint(h
)
2594 PCR_VD_WriteProtectDisable(h
, HBLKSIZE
);
2595 PCR_VD_WriteProtectEnable(h
, HBLKSIZE
);
2598 # endif /* PCR_VDB */
2601 * Call stack save code for debugging.
2602 * Should probably be in mach_dep.c, but that requires reorganization.
2609 struct frame
*fr_savfp
;
2618 # if defined(SUNOS4)
2619 # include <machine/frame.h>
2621 # if defined (DRSNX)
2622 # include <sys/sparc/frame.h>
2624 # if defined(OPENBSD)
2627 # include <sys/frame.h>
2633 --> We only know how to to get the first
6 arguments
2636 #ifdef SAVE_CALL_CHAIN
2637 /* Fill in the pc and argument information for up to NFRAMES of my */
2638 /* callers. Ignore my frame and my callers frame. */
2641 # define FR_SAVFP fr_fp
2642 # define FR_SAVPC fr_pc
2644 # define FR_SAVFP fr_savfp
2645 # define FR_SAVPC fr_savpc
2648 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
2654 void GC_save_callers (info
)
2655 struct callinfo info
[NFRAMES
];
2657 struct frame
*frame
;
2660 word
GC_save_regs_in_stack();
2662 frame
= (struct frame
*) GC_save_regs_in_stack ();
2664 for (fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
2665 fp
!= 0 && nframes
< NFRAMES
;
2666 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
2669 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
2670 for (i
= 0; i
< NARGS
; i
++) {
2671 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
2674 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
2677 #endif /* SAVE_CALL_CHAIN */