2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
49 # include <sys/types.h>
50 # if !defined(MSWIN32) && !defined(SUNOS4)
58 /* Blatantly OS dependent routines, except for those that are related */
59 /* to dynamic loading. */
61 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
62 # define NEED_FIND_LIMIT
65 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
66 # define NEED_FIND_LIMIT
69 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
70 # define NEED_FIND_LIMIT
73 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
74 # define NEED_FIND_LIMIT
77 # if defined(LINUX) && \
78 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
80 # define NEED_FIND_LIMIT
83 #ifdef NEED_FIND_LIMIT
88 # include <machine/trap.h>
92 # include <proto/exec.h>
93 # include <proto/dos.h>
94 # include <dos/dosextens.h>
95 # include <workbench/startup.h>
99 # define WIN32_LEAN_AND_MEAN
101 # include <windows.h>
105 # include <Processes.h>
109 # include <sys/uio.h>
110 # include <malloc.h> /* for locking */
113 # include <sys/types.h>
114 # include <sys/mman.h>
115 # include <sys/stat.h>
120 # include <sys/siginfo.h>
123 # define setjmp(env) sigsetjmp(env, 1)
124 # define longjmp(env, val) siglongjmp(env, val)
125 # define jmp_buf sigjmp_buf
129 /* Apparently necessary for djgpp 2.01. May casuse problems with */
130 /* other versions. */
131 typedef long unsigned int caddr_t
;
135 # include "il/PCR_IL.h"
136 # include "th/PCR_ThCtl.h"
137 # include "mm/PCR_MM.h"
140 #if !defined(NO_EXECUTE_PERMISSION)
141 # define OPT_PROT_EXEC PROT_EXEC
143 # define OPT_PROT_EXEC 0
146 #if defined(SEARCH_FOR_DATA_START)
147 /* The I386 case can be handled without a search. The Alpha case */
148 /* used to be handled differently as well, but the rules changed */
149 /* for recent Linux versions. This seems to be the easiest way to */
150 /* cover all versions. */
153 # pragma weak __data_start
154 extern int __data_start
;
155 # pragma weak data_start
156 extern int data_start
;
162 void GC_init_linux_data_start()
164 extern ptr_t
GC_find_limit();
167 /* Try the easy approaches first: */
168 if (&__data_start
!= 0) {
169 GC_data_start
= (ptr_t
)(&__data_start
);
172 if (&data_start
!= 0) {
173 GC_data_start
= (ptr_t
)(&data_start
);
177 GC_data_start
= GC_find_limit((ptr_t
)(&_end
), FALSE
);
183 # ifndef ECOS_GC_MEMORY_SIZE
184 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
185 # endif /* ECOS_GC_MEMORY_SIZE */
187 // setjmp() function, as described in ANSI para 7.6.1.1
188 #define setjmp( __env__ ) hal_setjmp( __env__ )
190 // FIXME: This is a simple way of allocating memory which is
191 // compatible with ECOS early releases. Later releases use a more
192 // sophisticated means of allocating memory than this simple static
193 // allocator, but this method is at least bound to work.
194 static char memory
[ECOS_GC_MEMORY_SIZE
];
195 static char *brk
= memory
;
197 static void *tiny_sbrk(ptrdiff_t increment
)
203 if (brk
> memory
+ sizeof memory
)
211 #define sbrk tiny_sbrk
218 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
221 unsigned short magic_number
;
222 unsigned short padding
[29];
226 #define E_MAGIC(x) (x).magic_number
227 #define EMAGIC 0x5A4D
228 #define E_LFANEW(x) (x).new_exe_offset
231 unsigned char magic_number
[2];
232 unsigned char byte_order
;
233 unsigned char word_order
;
234 unsigned long exe_format_level
;
237 unsigned long padding1
[13];
238 unsigned long object_table_offset
;
239 unsigned long object_count
;
240 unsigned long padding2
[31];
243 #define E32_MAGIC1(x) (x).magic_number[0]
244 #define E32MAGIC1 'L'
245 #define E32_MAGIC2(x) (x).magic_number[1]
246 #define E32MAGIC2 'X'
247 #define E32_BORDER(x) (x).byte_order
249 #define E32_WORDER(x) (x).word_order
251 #define E32_CPU(x) (x).cpu
253 #define E32_OBJTAB(x) (x).object_table_offset
254 #define E32_OBJCNT(x) (x).object_count
260 unsigned long pagemap
;
261 unsigned long mapsize
;
262 unsigned long reserved
;
265 #define O32_FLAGS(x) (x).flags
266 #define OBJREAD 0x0001L
267 #define OBJWRITE 0x0002L
268 #define OBJINVALID 0x0080L
269 #define O32_SIZE(x) (x).size
270 #define O32_BASE(x) (x).base
272 # else /* IBM's compiler */
274 /* A kludge to get around what appears to be a header file bug */
276 # define WORD unsigned short
279 # define DWORD unsigned long
286 # endif /* __IBMC__ */
288 # define INCL_DOSEXCEPTIONS
289 # define INCL_DOSPROCESS
290 # define INCL_DOSERRORS
291 # define INCL_DOSMODULEMGR
292 # define INCL_DOSMEMMGR
296 /* Disable and enable signals during nontrivial allocations */
298 void GC_disable_signals(void)
302 DosEnterMustComplete(&nest
);
303 if (nest
!= 1) ABORT("nested GC_disable_signals");
306 void GC_enable_signals(void)
310 DosExitMustComplete(&nest
);
311 if (nest
!= 0) ABORT("GC_enable_signals");
317 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
318 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
319 && !defined(NO_SIGSET)
321 # if defined(sigmask) && !defined(UTS4)
322 /* Use the traditional BSD interface */
323 # define SIGSET_T int
324 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
325 # define SIG_FILL(set) (set) = 0x7fffffff
326 /* Setting the leading bit appears to provoke a bug in some */
327 /* longjmp implementations. Most systems appear not to have */
329 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
331 /* Use POSIX/SYSV interface */
332 # define SIGSET_T sigset_t
333 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
334 # define SIG_FILL(set) sigfillset(&set)
335 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
338 static GC_bool mask_initialized
= FALSE
;
340 static SIGSET_T new_mask
;
342 static SIGSET_T old_mask
;
344 static SIGSET_T dummy
;
346 #if defined(PRINTSTATS) && !defined(THREADS)
347 # define CHECK_SIGNALS
348 int GC_sig_disabled
= 0;
351 void GC_disable_signals()
353 if (!mask_initialized
) {
356 SIG_DEL(new_mask
, SIGSEGV
);
357 SIG_DEL(new_mask
, SIGILL
);
358 SIG_DEL(new_mask
, SIGQUIT
);
360 SIG_DEL(new_mask
, SIGBUS
);
363 SIG_DEL(new_mask
, SIGIOT
);
366 SIG_DEL(new_mask
, SIGEMT
);
369 SIG_DEL(new_mask
, SIGTRAP
);
371 mask_initialized
= TRUE
;
373 # ifdef CHECK_SIGNALS
374 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
377 SIGSETMASK(old_mask
,new_mask
);
380 void GC_enable_signals()
382 # ifdef CHECK_SIGNALS
383 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
386 SIGSETMASK(dummy
,old_mask
);
393 /* Ivan Demakov: simplest way (to me) */
394 #if defined (DOS4GW) || defined (NO_SIGSET)
395 void GC_disable_signals() { }
396 void GC_enable_signals() { }
399 /* Find the page size */
403 void GC_setpagesize()
407 GetSystemInfo(&sysinfo
);
408 GC_page_size
= sysinfo
.dwPageSize
;
412 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
413 || defined(USE_MUNMAP)
414 void GC_setpagesize()
416 GC_page_size
= GETPAGESIZE();
419 /* It's acceptable to fake it. */
420 void GC_setpagesize()
422 GC_page_size
= HBLKSIZE
;
428 * Find the base of the stack.
429 * Used only in single-threaded environment.
430 * With threads, GC_mark_roots needs to know how to do this.
431 * Called with allocator lock held.
434 # define is_writable(prot) ((prot) == PAGE_READWRITE \
435 || (prot) == PAGE_WRITECOPY \
436 || (prot) == PAGE_EXECUTE_READWRITE \
437 || (prot) == PAGE_EXECUTE_WRITECOPY)
438 /* Return the number of bytes that are writable starting at p. */
439 /* The pointer p is assumed to be page aligned. */
440 /* If base is not 0, *base becomes the beginning of the */
441 /* allocation region containing p. */
442 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
444 MEMORY_BASIC_INFORMATION buf
;
448 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
449 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
450 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
451 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
452 if (!is_writable(protect
)) {
455 if (buf
.State
!= MEM_COMMIT
) return(0);
456 return(buf
.RegionSize
);
459 ptr_t
GC_get_stack_base()
462 ptr_t sp
= (ptr_t
)(&dummy
);
463 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
464 word size
= GC_get_writable_length(trunc_sp
, 0);
466 return(trunc_sp
+ size
);
474 ptr_t
GC_get_stack_base()
479 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
480 GC_err_printf0("DosGetInfoBlocks failed\n");
481 ABORT("DosGetInfoBlocks failed\n");
483 return((ptr_t
)(ptib
-> tib_pstacklimit
));
490 ptr_t
GC_get_stack_base()
492 struct Process
*proc
= (struct Process
*)SysBase
->ThisTask
;
494 /* Reference: Amiga Guru Book Pages: 42,567,574 */
495 if (proc
->pr_Task
.tc_Node
.ln_Type
==NT_PROCESS
496 && proc
->pr_CLI
!= NULL
) {
497 /* first ULONG is StackSize */
498 /*longPtr = proc->pr_ReturnAddr;
501 return (char *)proc
->pr_ReturnAddr
+ sizeof(ULONG
);
503 return (char *)proc
->pr_Task
.tc_SPUpper
;
507 #if 0 /* old version */
508 ptr_t
GC_get_stack_base()
510 extern struct WBStartup
*_WBenchMsg
;
514 struct Process
*proc
;
515 struct CommandLineInterface
*cli
;
518 if ((task
= FindTask(0)) == 0) {
519 GC_err_puts("Cannot find own task structure\n");
520 ABORT("task missing");
522 proc
= (struct Process
*)task
;
523 cli
= BADDR(proc
->pr_CLI
);
525 if (_WBenchMsg
!= 0 || cli
== 0) {
526 size
= (char *)task
->tc_SPUpper
- (char *)task
->tc_SPLower
;
528 size
= cli
->cli_DefaultStack
* 4;
530 return (ptr_t
)(__base
+ GC_max(size
, __stack
));
534 # else /* !AMIGA, !OS2, ... */
536 # ifdef NEED_FIND_LIMIT
537 /* Some tools to implement HEURISTIC2 */
538 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
539 /* static */ jmp_buf GC_jmp_buf
;
542 void GC_fault_handler(sig
)
545 longjmp(GC_jmp_buf
, 1);
549 typedef void (*handler
)(int);
551 typedef void (*handler
)();
554 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
555 static struct sigaction old_segv_act
;
556 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
557 static struct sigaction old_bus_act
;
560 static handler old_segv_handler
, old_bus_handler
;
563 void GC_setup_temporary_fault_handler()
566 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
567 struct sigaction act
;
569 act
.sa_handler
= GC_fault_handler
;
570 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
571 /* The presence of SA_NODEFER represents yet another gross */
572 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
573 /* interact correctly with -lthread. We hide the confusion */
574 /* by making sure that signal handling doesn't affect the */
577 (void) sigemptyset(&act
.sa_mask
);
579 /* Older versions have a bug related to retrieving and */
580 /* and setting a handler at the same time. */
581 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
582 (void) sigaction(SIGSEGV
, &act
, 0);
584 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
585 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
587 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
588 /* Pthreads doesn't exist under Irix 5.x, so we */
589 /* don't have to worry in the threads case. */
590 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
592 # endif /* IRIX_THREADS */
594 old_segv_handler
= signal(SIGSEGV
, GC_fault_handler
);
596 old_bus_handler
= signal(SIGBUS
, GC_fault_handler
);
602 void GC_reset_fault_handler()
605 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
606 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
607 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
609 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
612 (void) signal(SIGSEGV
, old_segv_handler
);
614 (void) signal(SIGBUS
, old_bus_handler
);
620 /* Return the first nonaddressible location > p (up) or */
621 /* the smallest location q s.t. [q,p] is addressible (!up). */
622 ptr_t
GC_find_limit(p
, up
)
627 static VOLATILE ptr_t result
;
628 /* Needs to be static, since otherwise it may not be */
629 /* preserved across the longjmp. Can safely be */
630 /* static since it's only called once, with the */
631 /* allocation lock held. */
634 GC_setup_temporary_fault_handler();
635 if (setjmp(GC_jmp_buf
) == 0) {
636 result
= (ptr_t
)(((word
)(p
))
637 & ~(MIN_PAGE_SIZE
-1));
640 result
+= MIN_PAGE_SIZE
;
642 result
-= MIN_PAGE_SIZE
;
644 GC_noop1((word
)(*result
));
647 GC_reset_fault_handler();
649 result
+= MIN_PAGE_SIZE
;
660 #ifdef LINUX_STACKBOTTOM
662 #include <sys/types.h>
663 #include <sys/stat.h>
666 # define STAT_SKIP 27 /* Number of fields preceding startstack */
667 /* field in /proc/self/stat */
669 ptr_t
GC_linux_stack_base(void)
671 /* We read the stack base value from /proc/self/stat. We do this */
672 /* using direct I/O system calls in order to avoid calling malloc */
673 /* in case REDIRECT_MALLOC is defined. */
674 # define STAT_BUF_SIZE 4096
676 # define STAT_READ __real_read
678 # define STAT_READ read
680 char stat_buf
[STAT_BUF_SIZE
];
684 size_t i
, buf_offset
= 0;
686 f
= open("/proc/self/stat", O_RDONLY
);
687 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
688 ABORT("Couldn't read /proc/self/stat");
690 c
= stat_buf
[buf_offset
++];
691 /* Skip the required number of fields. This number is hopefully */
692 /* constant across all Linux implementations. */
693 for (i
= 0; i
< STAT_SKIP
; ++i
) {
694 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
695 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
697 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
701 c
= stat_buf
[buf_offset
++];
704 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
705 return (ptr_t
)result
;
708 #endif /* LINUX_STACKBOTTOM */
710 ptr_t
GC_get_stack_base()
715 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
717 # if defined(STACKBASE)
718 extern ptr_t STACKBASE
;
725 # ifdef STACK_GROWS_DOWN
726 result
= (ptr_t
)((((word
)(&dummy
))
727 + STACKBOTTOM_ALIGNMENT_M1
)
728 & ~STACKBOTTOM_ALIGNMENT_M1
);
730 result
= (ptr_t
)(((word
)(&dummy
))
731 & ~STACKBOTTOM_ALIGNMENT_M1
);
733 # endif /* HEURISTIC1 */
734 # ifdef LINUX_STACKBOTTOM
735 result
= GC_linux_stack_base();
738 # ifdef STACK_GROWS_DOWN
739 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
740 # ifdef HEURISTIC2_LIMIT
741 if (result
> HEURISTIC2_LIMIT
742 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
743 result
= HEURISTIC2_LIMIT
;
747 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
748 # ifdef HEURISTIC2_LIMIT
749 if (result
< HEURISTIC2_LIMIT
750 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
751 result
= HEURISTIC2_LIMIT
;
756 # endif /* HEURISTIC2 */
757 # ifdef STACK_GROWS_DOWN
758 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
761 # endif /* STACKBOTTOM */
762 # endif /* STACKBASE */
766 # endif /* ! AMIGA */
768 # endif /* ! MSWIN32 */
771 * Register static data segment(s) as roots.
772 * If more data segments are added later then they need to be registered
773 * add that point (as we do with SunOS dynamic loading),
774 * or GC_mark_roots needs to check for them (as we do with PCR).
775 * Called with allocator lock held.
780 void GC_register_data_segments()
784 HMODULE module_handle
;
788 struct exe_hdr hdrdos
; /* MSDOS header. */
789 struct e32_exe hdr386
; /* Real header for my executable */
790 struct o32_obj seg
; /* Currrent segment */
794 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
795 GC_err_printf0("DosGetInfoBlocks failed\n");
796 ABORT("DosGetInfoBlocks failed\n");
798 module_handle
= ppib
-> pib_hmte
;
799 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
800 GC_err_printf0("DosQueryModuleName failed\n");
801 ABORT("DosGetInfoBlocks failed\n");
803 myexefile
= fopen(path
, "rb");
804 if (myexefile
== 0) {
805 GC_err_puts("Couldn't open executable ");
806 GC_err_puts(path
); GC_err_puts("\n");
807 ABORT("Failed to open executable\n");
809 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
810 GC_err_puts("Couldn't read MSDOS header from ");
811 GC_err_puts(path
); GC_err_puts("\n");
812 ABORT("Couldn't read MSDOS header");
814 if (E_MAGIC(hdrdos
) != EMAGIC
) {
815 GC_err_puts("Executable has wrong DOS magic number: ");
816 GC_err_puts(path
); GC_err_puts("\n");
817 ABORT("Bad DOS magic number");
819 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
820 GC_err_puts("Seek to new header failed in ");
821 GC_err_puts(path
); GC_err_puts("\n");
822 ABORT("Bad DOS magic number");
824 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
825 GC_err_puts("Couldn't read MSDOS header from ");
826 GC_err_puts(path
); GC_err_puts("\n");
827 ABORT("Couldn't read OS/2 header");
829 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
830 GC_err_puts("Executable has wrong OS/2 magic number:");
831 GC_err_puts(path
); GC_err_puts("\n");
832 ABORT("Bad OS/2 magic number");
834 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
835 GC_err_puts("Executable %s has wrong byte order: ");
836 GC_err_puts(path
); GC_err_puts("\n");
837 ABORT("Bad byte order");
839 if ( E32_CPU(hdr386
) == E32CPU286
) {
840 GC_err_puts("GC can't handle 80286 executables: ");
841 GC_err_puts(path
); GC_err_puts("\n");
844 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
846 GC_err_puts("Seek to object table failed: ");
847 GC_err_puts(path
); GC_err_puts("\n");
848 ABORT("Seek to object table failed");
850 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
852 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
853 GC_err_puts("Couldn't read obj table entry from ");
854 GC_err_puts(path
); GC_err_puts("\n");
855 ABORT("Couldn't read obj table entry");
857 flags
= O32_FLAGS(seg
);
858 if (!(flags
& OBJWRITE
)) continue;
859 if (!(flags
& OBJREAD
)) continue;
860 if (flags
& OBJINVALID
) {
861 GC_err_printf0("Object with invalid pages?\n");
864 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
871 /* Unfortunately, we have to handle win32s very differently from NT, */
872 /* Since VirtualQuery has very different semantics. In particular, */
873 /* under win32s a VirtualQuery call on an unmapped page returns an */
874 /* invalid result. Under GC_register_data_segments is a noop and */
875 /* all real work is done by GC_register_dynamic_libraries. Under */
876 /* win32s, we cannot find the data segments associated with dll's. */
877 /* We rgister the main data segment here. */
878 GC_bool GC_win32s
= FALSE
; /* We're running under win32s. */
880 GC_bool
GC_is_win32s()
882 DWORD v
= GetVersion();
884 /* Check that this is not NT, and Windows major version <= 3 */
885 return ((v
& 0x80000000) && (v
& 0xff) <= 3);
890 GC_win32s
= GC_is_win32s();
893 /* Return the smallest address a such that VirtualQuery */
894 /* returns correct results for all addresses between a and start. */
895 /* Assumes VirtualQuery returns correct information for start. */
896 ptr_t
GC_least_described_address(ptr_t start
)
898 MEMORY_BASIC_INFORMATION buf
;
905 GetSystemInfo(&sysinfo
);
906 limit
= sysinfo
.lpMinimumApplicationAddress
;
907 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
909 q
= (LPVOID
)(p
- GC_page_size
);
910 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
911 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
912 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
913 p
= (ptr_t
)(buf
.AllocationBase
);
918 /* Is p the start of either the malloc heap, or of one of our */
920 GC_bool
GC_is_heap_base (ptr_t p
)
925 # ifndef REDIRECT_MALLOC
926 static ptr_t malloc_heap_pointer
= 0;
928 if (0 == malloc_heap_pointer
) {
929 MEMORY_BASIC_INFORMATION buf
;
930 register DWORD result
= VirtualQuery(malloc(1), &buf
, sizeof(buf
));
932 if (result
!= sizeof(buf
)) {
933 ABORT("Weird VirtualQuery result");
935 malloc_heap_pointer
= (ptr_t
)(buf
.AllocationBase
);
937 if (p
== malloc_heap_pointer
) return(TRUE
);
939 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
940 if (GC_heap_bases
[i
] == p
) return(TRUE
);
945 void GC_register_root_section(ptr_t static_root
)
947 MEMORY_BASIC_INFORMATION buf
;
953 char * limit
, * new_limit
;
955 if (!GC_win32s
) return;
956 p
= base
= limit
= GC_least_described_address(static_root
);
957 GetSystemInfo(&sysinfo
);
958 while (p
< sysinfo
.lpMaximumApplicationAddress
) {
959 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
960 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
961 || GC_is_heap_base(buf
.AllocationBase
)) break;
962 new_limit
= (char *)p
+ buf
.RegionSize
;
963 protect
= buf
.Protect
;
964 if (buf
.State
== MEM_COMMIT
965 && is_writable(protect
)) {
966 if ((char *)p
== limit
) {
969 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
974 if (p
> (LPVOID
)new_limit
/* overflow */) break;
975 p
= (LPVOID
)new_limit
;
977 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
980 void GC_register_data_segments()
984 GC_register_root_section((ptr_t
)(&dummy
));
989 void GC_register_data_segments()
991 struct Process
*proc
;
992 struct CommandLineInterface
*cli
;
1001 GC_bool found_segment
= FALSE
;
1002 extern char __data_size
[];
1004 dataSegSize
=__data_size
+8;
1005 /* Can`t find the Location of __data_size, because
1006 it`s possible that is it, inside the segment. */
1010 proc
= (struct Process
*)SysBase
->ThisTask
;
1012 /* Reference: Amiga Guru Book Pages: 538ff,565,573
1014 if (proc
->pr_Task
.tc_Node
.ln_Type
==NT_PROCESS
) {
1015 if (proc
->pr_CLI
== NULL
) {
1016 myseglist
= proc
->pr_SegList
;
1018 /* ProcLoaded 'Loaded as a command: '*/
1019 cli
= BADDR(proc
->pr_CLI
);
1020 myseglist
= cli
->cli_Module
;
1023 ABORT("Not a Process.");
1026 if (myseglist
== NULL
) {
1027 ABORT("Arrrgh.. can't find segments, aborting");
1030 /* xoper hunks Shell Process */
1033 for (data
= (ULONG
*)BADDR(myseglist
); data
!= NULL
;
1034 data
= (ULONG
*)BADDR(data
[0])) {
1035 if (((ULONG
) GC_register_data_segments
< (ULONG
) &data
[1]) ||
1036 ((ULONG
) GC_register_data_segments
> (ULONG
) &data
[1] + data
[-1])) {
1038 if (dataSegSize
== data
[-1]) {
1039 found_segment
= TRUE
;
1042 GC_add_roots_inner((char *)&data
[1],
1043 ((char *)&data
[1]) + data
[-1], FALSE
);
1048 if (!found_segment
) {
1049 ABORT("Can`t find correct Segments.\nSolution: Use an newer version of ixemul.library");
1054 #if 0 /* old version */
1055 void GC_register_data_segments()
1057 extern struct WBStartup
*_WBenchMsg
;
1058 struct Process
*proc
;
1059 struct CommandLineInterface
*cli
;
1063 if ( _WBenchMsg
!= 0 ) {
1064 if ((myseglist
= _WBenchMsg
->sm_Segment
) == 0) {
1065 GC_err_puts("No seglist from workbench\n");
1069 if ((proc
= (struct Process
*)FindTask(0)) == 0) {
1070 GC_err_puts("Cannot find process structure\n");
1073 if ((cli
= BADDR(proc
->pr_CLI
)) == 0) {
1074 GC_err_puts("No CLI\n");
1077 if ((myseglist
= cli
->cli_Module
) == 0) {
1078 GC_err_puts("No seglist from CLI\n");
1083 for (data
= (ULONG
*)BADDR(myseglist
); data
!= 0;
1084 data
= (ULONG
*)BADDR(data
[0])) {
1085 # ifdef AMIGA_SKIP_SEG
1086 if (((ULONG
) GC_register_data_segments
< (ULONG
) &data
[1]) ||
1087 ((ULONG
) GC_register_data_segments
> (ULONG
) &data
[1] + data
[-1])) {
1090 # endif /* AMIGA_SKIP_SEG */
1091 GC_add_roots_inner((char *)&data
[1],
1092 ((char *)&data
[1]) + data
[-1], FALSE
);
1096 #endif /* old version */
1101 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1102 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1103 char * GC_SysVGetDataStart(max_page_size
, etext_addr
)
1107 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1108 & ~(sizeof(word
) - 1);
1109 /* etext rounded to word boundary */
1110 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1111 & ~((word
)max_page_size
- 1));
1112 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1113 VOLATILE
char * result
= (char *)(next_page
+ page_offset
);
1114 /* Note that this isnt equivalent to just adding */
1115 /* max_page_size to &etext if &etext is at a page boundary */
1117 GC_setup_temporary_fault_handler();
1118 if (setjmp(GC_jmp_buf
) == 0) {
1119 /* Try writing to the address. */
1121 GC_reset_fault_handler();
1123 GC_reset_fault_handler();
1124 /* We got here via a longjmp. The address is not readable. */
1125 /* This is known to happen under Solaris 2.4 + gcc, which place */
1126 /* string constants in the text segment, but after etext. */
1127 /* Use plan B. Note that we now know there is a gap between */
1128 /* text and data segments, so plan A bought us something. */
1129 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
) - MIN_PAGE_SIZE
, FALSE
);
1131 return((char *)result
);
1136 void GC_register_data_segments()
1138 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1140 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1141 /* As of Solaris 2.3, the Solaris threads implementation */
1142 /* allocates the data structure for the initial thread with */
1143 /* sbrk at process startup. It needs to be scanned, so that */
1144 /* we don't lose some malloc allocated data structures */
1145 /* hanging from it. We're on thin ice here ... */
1146 extern caddr_t
sbrk();
1148 GC_add_roots_inner(DATASTART
, (char *)sbrk(0), FALSE
);
1150 GC_add_roots_inner(DATASTART
, (char *)(DATAEND
), FALSE
);
1153 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1154 GC_add_roots_inner(DATASTART
, (char *) get_end(), FALSE
);
1158 # if defined(THINK_C)
1159 extern void* GC_MacGetDataStart(void);
1160 /* globals begin above stack and end at a5. */
1161 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1162 (ptr_t
)LMGetCurrentA5(), FALSE
);
1164 # if defined(__MWERKS__)
1166 extern void* GC_MacGetDataStart(void);
1167 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1168 # if __option(far_data)
1169 extern void* GC_MacGetDataEnd(void);
1171 /* globals begin above stack and end at a5. */
1172 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1173 (ptr_t
)LMGetCurrentA5(), FALSE
);
1174 /* MATTHEW: Handle Far Globals */
1175 # if __option(far_data)
1176 /* Far globals follow he QD globals: */
1177 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1178 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1181 extern char __data_start__
[], __data_end__
[];
1182 GC_add_roots_inner((ptr_t
)&__data_start__
,
1183 (ptr_t
)&__data_end__
, FALSE
);
1184 # endif /* __POWERPC__ */
1185 # endif /* __MWERKS__ */
1186 # endif /* !THINK_C */
1190 /* Dynamic libraries are added at every collection, since they may */
1194 # endif /* ! AMIGA */
1195 # endif /* ! MSWIN32 */
1199 * Auxiliary routines for obtaining memory from OS.
1202 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1203 && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
1206 extern caddr_t
sbrk();
1209 # define SBRK_ARG_T ptrdiff_t
1211 # define SBRK_ARG_T int
1215 /* The compiler seems to generate speculative reads one past the end of */
1216 /* an allocated object. Hence we need to make sure that the page */
1217 /* following the last heap page is also mapped. */
1218 ptr_t
GC_unix_get_mem(bytes
)
1221 caddr_t cur_brk
= (caddr_t
)sbrk(0);
1223 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1224 static caddr_t my_brk_val
= 0;
1226 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1228 if((caddr_t
)(sbrk(GC_page_size
- lsbs
)) == (caddr_t
)(-1)) return(0);
1230 if (cur_brk
== my_brk_val
) {
1231 /* Use the extra block we allocated last time. */
1232 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1233 if (result
== (caddr_t
)(-1)) return(0);
1234 result
-= GC_page_size
;
1236 result
= (ptr_t
)sbrk(GC_page_size
+ (SBRK_ARG_T
)bytes
);
1237 if (result
== (caddr_t
)(-1)) return(0);
1239 my_brk_val
= result
+ bytes
+ GC_page_size
; /* Always page aligned */
1240 return((ptr_t
)result
);
1243 #else /* Not RS6000 */
1245 #if defined(USE_MMAP)
1246 /* Tested only under IRIX5 and Solaris 2 */
1248 #ifdef USE_MMAP_FIXED
1249 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1250 /* Seems to yield better performance on Solaris 2, but can */
1251 /* be unreliable if something is already mapped at the address. */
1253 # define GC_MMAP_FLAGS MAP_PRIVATE
1256 ptr_t
GC_unix_get_mem(bytes
)
1259 static GC_bool initialized
= FALSE
;
1262 static ptr_t last_addr
= HEAP_START
;
1265 fd
= open("/dev/zero", O_RDONLY
);
1268 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1269 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1270 GC_MMAP_FLAGS
, fd
, 0/* offset */);
1271 if (result
== MAP_FAILED
) return(0);
1272 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1273 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1274 return((ptr_t
)result
);
1277 #else /* Not RS6000, not USE_MMAP */
1278 ptr_t
GC_unix_get_mem(bytes
)
1283 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1284 /* The equivalent may be needed on other systems as well. */
1288 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1289 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1291 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1293 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) return(0);
1295 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1296 if (result
== (ptr_t
)(-1)) result
= 0;
1304 #endif /* Not USE_MMAP */
1305 #endif /* Not RS6000 */
1311 void * os2_alloc(size_t bytes
)
1315 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1316 PAG_WRITE
| PAG_COMMIT
)
1320 if (result
== 0) return(os2_alloc(bytes
));
1328 word GC_n_heap_bases
= 0;
1330 ptr_t
GC_win32_get_mem(bytes
)
1336 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1337 /* There are also unconfirmed rumors of other */
1338 /* problems, so we dodge the issue. */
1339 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1340 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
) & ~(HBLKSIZE
-1));
1342 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
,
1343 MEM_COMMIT
| MEM_RESERVE
,
1344 PAGE_EXECUTE_READWRITE
);
1346 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1347 /* If I read the documentation correctly, this can */
1348 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1349 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1350 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1354 void GC_win32_free_heap ()
1357 while (GC_n_heap_bases
> 0) {
1358 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1359 GC_heap_bases
[GC_n_heap_bases
] = 0;
1369 /* For now, this only works on some Unix-like systems. If you */
1370 /* have something else, don't define USE_MUNMAP. */
1371 /* We assume ANSI C to support this feature. */
1373 #include <sys/mman.h>
1374 #include <sys/stat.h>
1375 #include <sys/types.h>
1378 /* Compute a page aligned starting address for the unmap */
1379 /* operation on a block of size bytes starting at start. */
1380 /* Return 0 if the block is too small to make this feasible. */
1381 ptr_t
GC_unmap_start(ptr_t start
, word bytes
)
1383 ptr_t result
= start
;
1384 /* Round start to next page boundary. */
1385 result
+= GC_page_size
- 1;
1386 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
1387 if (result
+ GC_page_size
> start
+ bytes
) return 0;
1391 /* Compute end address for an unmap operation on the indicated */
1393 ptr_t
GC_unmap_end(ptr_t start
, word bytes
)
1395 ptr_t end_addr
= start
+ bytes
;
1396 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
1400 /* We assume that GC_remap is called on exactly the same range */
1401 /* as a previous call to GC_unmap. It is safe to consistently */
1402 /* round the endpoints in both places. */
1403 void GC_unmap(ptr_t start
, word bytes
)
1405 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1406 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1407 word len
= end_addr
- start_addr
;
1408 if (0 == start_addr
) return;
1409 if (munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1410 GC_unmapped_bytes
+= len
;
1414 void GC_remap(ptr_t start
, word bytes
)
1416 static int zero_descr
= -1;
1417 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1418 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1419 word len
= end_addr
- start_addr
;
1422 if (-1 == zero_descr
) zero_descr
= open("/dev/zero", O_RDWR
);
1423 if (0 == start_addr
) return;
1424 result
= mmap(start_addr
, len
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1425 MAP_FIXED
| MAP_PRIVATE
, zero_descr
, 0);
1426 if (result
!= start_addr
) {
1427 ABORT("mmap remapping failed");
1429 GC_unmapped_bytes
-= len
;
1432 /* Two adjacent blocks have already been unmapped and are about to */
1433 /* be merged. Unmap the whole block. This typically requires */
1434 /* that we unmap a small section in the middle that was not previously */
1435 /* unmapped due to alignment constraints. */
1436 void GC_unmap_gap(ptr_t start1
, word bytes1
, ptr_t start2
, word bytes2
)
1438 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
1439 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
1440 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
1441 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
1442 ptr_t start_addr
= end1_addr
;
1443 ptr_t end_addr
= start2_addr
;
1445 GC_ASSERT(start1
+ bytes1
== start2
);
1446 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
1447 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
1448 if (0 == start_addr
) return;
1449 len
= end_addr
- start_addr
;
1450 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1451 GC_unmapped_bytes
+= len
;
1454 #endif /* USE_MUNMAP */
1456 /* Routine for pushing any additional roots. In THREADS */
1457 /* environment, this is also responsible for marking from */
1458 /* thread stacks. In the SRC_M3 case, it also handles */
1459 /* global variables. */
1461 void (*GC_push_other_roots
)() = 0;
1465 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
1467 struct PCR_ThCtl_TInfoRep info
;
1470 info
.ti_stkLow
= info
.ti_stkHi
= 0;
1471 result
= PCR_ThCtl_GetInfo(t
, &info
);
1472 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
1476 /* Push the contents of an old object. We treat this as stack */
1477 /* data only becasue that makes it robust against mark stack */
1479 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
1481 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
1482 return(PCR_ERes_okay
);
1486 void GC_default_push_other_roots()
1488 /* Traverse data allocated by previous memory managers. */
1490 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
1492 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
1495 ABORT("Old object enumeration failed");
1498 /* Traverse all thread stacks. */
1500 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
1501 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1502 ABORT("Thread stack marking failed\n");
1510 # ifdef ALL_INTERIOR_POINTERS
1515 extern void ThreadF__ProcessStacks();
1517 void GC_push_thread_stack(start
, stop
)
1520 GC_push_all_stack((ptr_t
)start
, (ptr_t
)stop
+ sizeof(word
));
1523 /* Push routine with M3 specific calling convention. */
1524 GC_m3_push_root(dummy1
, p
, dummy2
, dummy3
)
1526 ptr_t dummy1
, dummy2
;
1531 if ((ptr_t
)(q
) >= GC_least_plausible_heap_addr
1532 && (ptr_t
)(q
) < GC_greatest_plausible_heap_addr
) {
1533 GC_push_one_checked(q
,FALSE
);
1537 /* M3 set equivalent to RTHeap.TracedRefTypes */
1538 typedef struct { int elts
[1]; } RefTypeSet
;
1539 RefTypeSet GC_TracedRefTypes
= {{0x1}};
1541 /* From finalize.c */
1542 extern void GC_push_finalizer_structures();
1544 /* From stubborn.c: */
1545 # ifdef STUBBORN_ALLOC
1546 extern GC_PTR
* GC_changing_list_start
;
1550 void GC_default_push_other_roots()
1552 /* Use the M3 provided routine for finding static roots. */
1553 /* This is a bit dubious, since it presumes no C roots. */
1554 /* We handle the collector roots explicitly. */
1556 # ifdef STUBBORN_ALLOC
1557 GC_push_one(GC_changing_list_start
);
1559 GC_push_finalizer_structures();
1560 RTMain__GlobalMapProc(GC_m3_push_root
, 0, GC_TracedRefTypes
);
1562 if (GC_words_allocd
> 0) {
1563 ThreadF__ProcessStacks(GC_push_thread_stack
);
1565 /* Otherwise this isn't absolutely necessary, and we have */
1566 /* startup ordering problems. */
1569 # endif /* SRC_M3 */
1571 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1572 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1573 || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
1575 extern void GC_push_all_stacks();
1577 void GC_default_push_other_roots()
1579 GC_push_all_stacks();
1582 # endif /* SOLARIS_THREADS || ... */
1584 void (*GC_push_other_roots
)() = GC_default_push_other_roots
;
1589 * Routines for accessing dirty bits on virtual pages.
1590 * We plan to eventaually implement four strategies for doing so:
1591 * DEFAULT_VDB: A simple dummy implementation that treats every page
1592 * as possibly dirty. This makes incremental collection
1593 * useless, but the implementation is still correct.
1594 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1595 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1596 * works under some SVR4 variants. Even then, it may be
1597 * too slow to be entirely satisfactory. Requires reading
1598 * dirty bits for entire address space. Implementations tend
1599 * to assume that the client is a (slow) debugger.
1600 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1601 * dirtied pages. The implementation (and implementability)
1602 * is highly system dependent. This usually fails when system
1603 * calls write to a protected page. We prevent the read system
1604 * call from doing so. It is the clients responsibility to
1605 * make sure that other system calls are similarly protected
1606 * or write only to the stack.
1609 GC_bool GC_dirty_maintained
= FALSE
;
1613 /* All of the following assume the allocation lock is held, and */
1614 /* signals are disabled. */
1616 /* The client asserts that unallocated pages in the heap are never */
1619 /* Initialize virtual dirty bit implementation. */
1620 void GC_dirty_init()
1622 GC_dirty_maintained
= TRUE
;
1625 /* Retrieve system dirty bits for heap to a local buffer. */
1626 /* Restore the systems notion of which pages are dirty. */
1627 void GC_read_dirty()
1630 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1631 /* If the actual page size is different, this returns TRUE if any */
1632 /* of the pages overlapping h are dirty. This routine may err on the */
1633 /* side of labelling pages as dirty (and this implementation does). */
1635 GC_bool
GC_page_was_dirty(h
)
1642 * The following two routines are typically less crucial. They matter
1643 * most with large dynamic libraries, or if we can't accurately identify
1644 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1645 * versions are adequate.
1648 /* Could any valid GC heap pointer ever have been written to this page? */
1650 GC_bool
GC_page_was_ever_dirty(h
)
1656 /* Reset the n pages starting at h to "was never dirty" status. */
1657 void GC_is_fresh(h
, n
)
1663 /* A call hints that h is about to be written. */
1664 /* May speed up some dirty bit implementations. */
1666 void GC_write_hint(h
)
1671 # endif /* DEFAULT_VDB */
1674 # ifdef MPROTECT_VDB
1677 * See DEFAULT_VDB for interface descriptions.
1681 * This implementation maintains dirty bits itself by catching write
1682 * faults and keeping track of them. We assume nobody else catches
1683 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1684 * except as a result of a read system call. This means clients must
1685 * either ensure that system calls do not touch the heap, or must
1686 * provide their own wrappers analogous to the one for read.
1687 * We assume the page size is a multiple of HBLKSIZE.
1688 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1689 * tried to use portable code where easily possible. It is known
1690 * not to work under a number of other systems.
1695 # include <sys/mman.h>
1696 # include <signal.h>
1697 # include <sys/syscall.h>
1699 # define PROTECT(addr, len) \
1700 if (mprotect((caddr_t)(addr), (size_t)(len), \
1701 PROT_READ | OPT_PROT_EXEC) < 0) { \
1702 ABORT("mprotect failed"); \
1704 # define UNPROTECT(addr, len) \
1705 if (mprotect((caddr_t)(addr), (size_t)(len), \
1706 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1707 ABORT("un-mprotect failed"); \
1712 # include <signal.h>
1714 static DWORD protect_junk
;
1715 # define PROTECT(addr, len) \
1716 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1718 DWORD last_error = GetLastError(); \
1719 GC_printf1("Last error code: %lx\n", last_error); \
1720 ABORT("VirtualProtect failed"); \
1722 # define UNPROTECT(addr, len) \
1723 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1725 ABORT("un-VirtualProtect failed"); \
1730 #if defined(SUNOS4) || defined(FREEBSD)
1731 typedef void (* SIG_PF
)();
1733 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1735 typedef void (* SIG_PF
)(int);
1737 typedef void (* SIG_PF
)();
1740 #if defined(MSWIN32)
1741 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF
;
1743 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1746 #if defined(IRIX5) || defined(OSF1)
1747 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1749 #if defined(SUNOS5SIGS)
1751 # define SIGINFO __siginfo
1753 # define SIGINFO siginfo
1756 typedef void (* REAL_SIG_PF
)(int, struct SIGINFO
*, void *);
1758 typedef void (* REAL_SIG_PF
)();
1762 # include <linux/version.h>
1763 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1764 typedef struct sigcontext s_c
;
1766 typedef struct sigcontext_struct s_c
;
1768 # if defined(ALPHA) || defined(M68K)
1769 typedef void (* REAL_SIG_PF
)(int, int, s_c
*);
1772 typedef void (* REAL_SIG_PF
)(int, siginfo_t
*, s_c
*);
1774 typedef void (* REAL_SIG_PF
)(int, s_c
);
1778 /* Retrieve fault address from sigcontext structure by decoding */
1780 char * get_fault_addr(s_c
*sc
) {
1784 instr
= *((unsigned *)(sc
->sc_pc
));
1785 faultaddr
= sc
->sc_regs
[(instr
>> 16) & 0x1f];
1786 faultaddr
+= (word
) (((int)instr
<< 16) >> 16);
1787 return (char *)faultaddr
;
1789 # endif /* !ALPHA */
1792 SIG_PF GC_old_bus_handler
;
1793 SIG_PF GC_old_segv_handler
; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1796 # if defined (SUNOS4) || defined(FREEBSD)
1797 void GC_write_fault_handler(sig
, code
, scp
, addr
)
1799 struct sigcontext
*scp
;
1802 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1803 # define CODE_OK (FC_CODE(code) == FC_PROT \
1804 || (FC_CODE(code) == FC_OBJERR \
1805 && FC_ERRNO(code) == FC_PROT))
1808 # define SIG_OK (sig == SIGBUS)
1809 # define CODE_OK (code == BUS_PAGE_FAULT)
1812 # if defined(IRIX5) || defined(OSF1)
1814 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
1815 # define SIG_OK (sig == SIGSEGV)
1817 # define CODE_OK (code == 2 /* experimentally determined */)
1820 # define CODE_OK (code == EACCES)
1824 # if defined(ALPHA) || defined(M68K)
1825 void GC_write_fault_handler(int sig
, int code
, s_c
* sc
)
1828 void GC_write_fault_handler(int sig
, siginfo_t
* si
, s_c
* scp
)
1830 void GC_write_fault_handler(int sig
, s_c sc
)
1833 # define SIG_OK (sig == SIGSEGV)
1834 # define CODE_OK TRUE
1835 /* Empirically c.trapno == 14, on IA32, but is that useful? */
1836 /* Should probably consider alignment issues on other */
1837 /* architectures. */
1839 # if defined(SUNOS5SIGS)
1841 void GC_write_fault_handler(int sig
, struct SIGINFO
*scp
, void * context
)
1843 void GC_write_fault_handler(sig
, scp
, context
)
1845 struct SIGINFO
*scp
;
1849 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1850 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
1851 || (scp -> si_code == BUS_ADRERR) \
1852 || (scp -> si_code == BUS_UNKNOWN) \
1853 || (scp -> si_code == SEGV_UNKNOWN) \
1854 || (scp -> si_code == BUS_OBJERR)
1856 # define SIG_OK (sig == SIGSEGV)
1857 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1860 # if defined(MSWIN32)
1861 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
1862 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1863 EXCEPTION_ACCESS_VIOLATION)
1864 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1868 register unsigned i
;
1870 char * addr
= (char *) (size_t) (scp
-> sc_badvaddr
);
1872 # if defined(OSF1) && defined(ALPHA)
1873 char * addr
= (char *) (scp
-> sc_traparg_a0
);
1876 char * addr
= (char *) (scp
-> si_addr
);
1880 char * addr
= (char *) (sc
.cr2
);
1885 struct sigcontext
*scp
= (struct sigcontext
*)(&sc
);
1887 int format
= (scp
->sc_formatvec
>> 12) & 0xf;
1888 unsigned long *framedata
= (unsigned long *)(scp
+ 1);
1891 if (format
== 0xa || format
== 0xb) {
1894 } else if (format
== 7) {
1897 } else if (format
== 4) {
1900 if (framedata
[1] & 0x08000000) {
1901 /* correct addr on misaligned access */
1902 ea
= (ea
+4095)&(~4095);
1908 char * addr
= get_fault_addr(sc
);
1911 char * addr
= si
-> si_addr
;
1912 /* I believe this is claimed to work on all platforms for */
1913 /* Linux 2.3.47 and later. Hopefully we don't have to */
1914 /* worry about earlier kernels on IA64. */
1916 # if defined(POWERPC)
1917 char * addr
= (char *) (sc
.regs
->dar
);
1919 --> architecture
not supported
1926 # if defined(MSWIN32)
1927 char * addr
= (char *) (exc_info
-> ExceptionRecord
1928 -> ExceptionInformation
[1]);
1929 # define sig SIGSEGV
1932 if (SIG_OK
&& CODE_OK
) {
1933 register struct hblk
* h
=
1934 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
1935 GC_bool in_allocd_block
;
1938 /* Address is only within the correct physical page. */
1939 in_allocd_block
= FALSE
;
1940 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
1941 if (HDR(h
+i
) != 0) {
1942 in_allocd_block
= TRUE
;
1946 in_allocd_block
= (HDR(addr
) != 0);
1948 if (!in_allocd_block
) {
1949 /* Heap blocks now begin and end on page boundaries */
1952 if (sig
== SIGSEGV
) {
1953 old_handler
= GC_old_segv_handler
;
1955 old_handler
= GC_old_bus_handler
;
1957 if (old_handler
== SIG_DFL
) {
1959 GC_err_printf1("Segfault at 0x%lx\n", addr
);
1960 ABORT("Unexpected bus error or segmentation fault");
1962 return(EXCEPTION_CONTINUE_SEARCH
);
1965 # if defined (SUNOS4) || defined(FREEBSD)
1966 (*old_handler
) (sig
, code
, scp
, addr
);
1969 # if defined (SUNOS5SIGS)
1970 (*(REAL_SIG_PF
)old_handler
) (sig
, scp
, context
);
1973 # if defined (LINUX)
1974 # if defined(ALPHA) || defined(M68K)
1975 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, sc
);
1978 (*(REAL_SIG_PF
)old_handler
) (sig
, si
, scp
);
1980 (*(REAL_SIG_PF
)old_handler
) (sig
, sc
);
1985 # if defined (IRIX5) || defined(OSF1)
1986 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
1990 return((*old_handler
)(exc_info
));
1994 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
1995 register int index
= PHT_HASH(h
+i
);
1997 set_pht_entry_from_index(GC_dirty_pages
, index
);
1999 UNPROTECT(h
, GC_page_size
);
2000 # if defined(OSF1) || defined(LINUX)
2001 /* These reset the signal handler each time by default. */
2002 signal(SIGSEGV
, (SIG_PF
) GC_write_fault_handler
);
2004 /* The write may not take place before dirty bits are read. */
2005 /* But then we'll fault again ... */
2007 return(EXCEPTION_CONTINUE_EXECUTION
);
2013 return EXCEPTION_CONTINUE_SEARCH
;
2015 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2016 ABORT("Unexpected bus error or segmentation fault");
2021 * We hold the allocation lock. We expect block h to be written
2024 void GC_write_hint(h
)
2027 register struct hblk
* h_trunc
;
2028 register unsigned i
;
2029 register GC_bool found_clean
;
2031 if (!GC_dirty_maintained
) return;
2032 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2033 found_clean
= FALSE
;
2034 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2035 register int index
= PHT_HASH(h_trunc
+i
);
2037 if (!get_pht_entry_from_index(GC_dirty_pages
, index
)) {
2039 set_pht_entry_from_index(GC_dirty_pages
, index
);
2043 UNPROTECT(h_trunc
, GC_page_size
);
2047 void GC_dirty_init()
2049 #if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2050 struct sigaction act
, oldact
;
2052 act
.sa_flags
= SA_RESTART
;
2053 act
.sa_handler
= GC_write_fault_handler
;
2055 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2056 act
.sa_sigaction
= GC_write_fault_handler
;
2058 (void)sigemptyset(&act
.sa_mask
);
2061 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2063 GC_dirty_maintained
= TRUE
;
2064 if (GC_page_size
% HBLKSIZE
!= 0) {
2065 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2066 ABORT("Page size not multiple of HBLKSIZE");
2068 # if defined(SUNOS4) || defined(FREEBSD)
2069 GC_old_bus_handler
= signal(SIGBUS
, GC_write_fault_handler
);
2070 if (GC_old_bus_handler
== SIG_IGN
) {
2071 GC_err_printf0("Previously ignored bus error!?");
2072 GC_old_bus_handler
= SIG_DFL
;
2074 if (GC_old_bus_handler
!= SIG_DFL
) {
2076 GC_err_printf0("Replaced other SIGBUS handler\n");
2080 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2081 GC_old_segv_handler
= signal(SIGSEGV
, (SIG_PF
)GC_write_fault_handler
);
2082 if (GC_old_segv_handler
== SIG_IGN
) {
2083 GC_err_printf0("Previously ignored segmentation violation!?");
2084 GC_old_segv_handler
= SIG_DFL
;
2086 if (GC_old_segv_handler
!= SIG_DFL
) {
2088 GC_err_printf0("Replaced other SIGSEGV handler\n");
2092 # if defined(SUNOS5SIGS) || defined(IRIX5)
2093 # if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
2094 sigaction(SIGSEGV
, 0, &oldact
);
2095 sigaction(SIGSEGV
, &act
, 0);
2097 sigaction(SIGSEGV
, &act
, &oldact
);
2099 # if defined(_sigargs)
2100 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2102 GC_old_segv_handler
= oldact
.sa_handler
;
2103 # else /* Irix 6.x or SUNOS5SIGS */
2104 if (oldact
.sa_flags
& SA_SIGINFO
) {
2105 GC_old_segv_handler
= (SIG_PF
)(oldact
.sa_sigaction
);
2107 GC_old_segv_handler
= oldact
.sa_handler
;
2110 if (GC_old_segv_handler
== SIG_IGN
) {
2111 GC_err_printf0("Previously ignored segmentation violation!?");
2112 GC_old_segv_handler
= SIG_DFL
;
2114 if (GC_old_segv_handler
!= SIG_DFL
) {
2116 GC_err_printf0("Replaced other SIGSEGV handler\n");
2120 sigaction(SIGBUS
, &act
, &oldact
);
2121 GC_old_bus_handler
= oldact
.sa_handler
;
2122 if (GC_old_segv_handler
!= SIG_DFL
) {
2124 GC_err_printf0("Replaced other SIGBUS handler\n");
2129 # if defined(MSWIN32)
2130 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2131 if (GC_old_segv_handler
!= NULL
) {
2133 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2136 GC_old_segv_handler
= SIG_DFL
;
2143 void GC_protect_heap()
2149 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
2150 start
= GC_heap_sects
[i
].hs_start
;
2151 len
= GC_heap_sects
[i
].hs_bytes
;
2152 PROTECT(start
, len
);
2156 /* We assume that either the world is stopped or its OK to lose dirty */
2157 /* bits while this is happenning (as in GC_enable_incremental). */
2158 void GC_read_dirty()
2160 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2161 (sizeof GC_dirty_pages
));
2162 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2166 GC_bool
GC_page_was_dirty(h
)
2169 register word index
= PHT_HASH(h
);
2171 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2175 * Acquiring the allocation lock here is dangerous, since this
2176 * can be called from within GC_call_with_alloc_lock, and the cord
2177 * package does so. On systems that allow nested lock acquisition, this
2179 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2182 void GC_begin_syscall()
2184 if (!I_HOLD_LOCK()) LOCK();
2187 void GC_end_syscall()
2189 if (!I_HOLD_LOCK()) UNLOCK();
2192 void GC_unprotect_range(addr
, len
)
2196 struct hblk
* start_block
;
2197 struct hblk
* end_block
;
2198 register struct hblk
*h
;
2201 if (!GC_incremental
) return;
2202 obj_start
= GC_base(addr
);
2203 if (obj_start
== 0) return;
2204 if (GC_base(addr
+ len
- 1) != obj_start
) {
2205 ABORT("GC_unprotect_range(range bigger than object)");
2207 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
2208 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
2209 end_block
+= GC_page_size
/HBLKSIZE
- 1;
2210 for (h
= start_block
; h
<= end_block
; h
++) {
2211 register word index
= PHT_HASH(h
);
2213 set_pht_entry_from_index(GC_dirty_pages
, index
);
2215 UNPROTECT(start_block
,
2216 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
2219 #if !defined(MSWIN32) && !defined(LINUX_THREADS)
2220 /* Replacement for UNIX system call. */
2221 /* Other calls that write to the heap */
2222 /* should be handled similarly. */
2223 # if defined(__STDC__) && !defined(SUNOS4)
2224 # include <unistd.h>
2225 # include <sys/uio.h>
2226 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
2229 int read(fd
, buf
, nbyte
)
2231 int GC_read(fd
, buf
, nbyte
)
2241 GC_unprotect_range(buf
, (word
)nbyte
);
2242 # if defined(IRIX5) || defined(LINUX_THREADS)
2243 /* Indirect system call may not always be easily available. */
2244 /* We could call _read, but that would interfere with the */
2245 /* libpthread interception of read. */
2246 /* On Linux, we have to be careful with the linuxthreads */
2247 /* read interception. */
2252 iov
.iov_len
= nbyte
;
2253 result
= readv(fd
, &iov
, 1);
2256 /* The two zero args at the end of this list are because one
2257 IA-64 syscall() implementation actually requires six args
2258 to be passed, even though they aren't always used. */
2259 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
2264 #endif /* !MSWIN32 && !LINUX */
2267 /* We use the GNU ld call wrapping facility. */
2268 /* This requires that the linker be invoked with "--wrap read". */
2269 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2270 /* I'm not sure that this actually wraps whatever version of read */
2271 /* is called by stdio. That code also mentions __read. */
2272 # include <unistd.h>
2273 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2278 GC_unprotect_range(buf
, (word
)nbyte
);
2279 result
= __real_read(fd
, buf
, nbyte
);
2284 /* We should probably also do this for __read, or whatever stdio */
2285 /* actually calls. */
2289 GC_bool
GC_page_was_ever_dirty(h
)
2295 /* Reset the n pages starting at h to "was never dirty" status. */
2297 void GC_is_fresh(h
, n
)
2303 # endif /* MPROTECT_VDB */
2308 * See DEFAULT_VDB for interface descriptions.
2312 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2313 * from which we can read page modified bits. This facility is far from
2314 * optimal (e.g. we would like to get the info for only some of the
2315 * address space), but it avoids intercepting system calls.
2319 #include <sys/types.h>
2320 #include <sys/signal.h>
2321 #include <sys/fault.h>
2322 #include <sys/syscall.h>
2323 #include <sys/procfs.h>
2324 #include <sys/stat.h>
2327 #define INITIAL_BUF_SZ 4096
2328 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
2331 #ifdef SOLARIS_THREADS
2332 /* We don't have exact sp values for threads. So we count on */
2333 /* occasionally declaring stack pages to be fresh. Thus we */
2334 /* need a real implementation of GC_is_fresh. We can't clear */
2335 /* entries in GC_written_pages, since that would declare all */
2336 /* pages with the given hash address to be fresh. */
2337 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2338 struct hblk
** GC_fresh_pages
; /* A direct mapped cache. */
2339 /* Collisions are dropped. */
2341 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2342 # define ADD_FRESH_PAGE(h) \
2343 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2344 # define PAGE_IS_FRESH(h) \
2345 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2348 /* Add all pages in pht2 to pht1 */
2349 void GC_or_pages(pht1
, pht2
)
2350 page_hash_table pht1
, pht2
;
2354 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2359 void GC_dirty_init()
2364 GC_dirty_maintained
= TRUE
;
2365 if (GC_words_allocd
!= 0 || GC_words_allocd_before_gc
!= 0) {
2368 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
2370 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2372 (GC_words_allocd
+ GC_words_allocd_before_gc
));
2375 sprintf(buf
, "/proc/%d", getpid());
2376 fd
= open(buf
, O_RDONLY
);
2378 ABORT("/proc open failed");
2380 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
2382 if (GC_proc_fd
< 0) {
2383 ABORT("/proc ioctl failed");
2385 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
2386 # ifdef SOLARIS_THREADS
2387 GC_fresh_pages
= (struct hblk
**)
2388 GC_scratch_alloc(MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2389 if (GC_fresh_pages
== 0) {
2390 GC_err_printf0("No space for fresh pages\n");
2393 BZERO(GC_fresh_pages
, MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2397 /* Ignore write hints. They don't help us here. */
2399 void GC_write_hint(h
)
2404 #ifdef SOLARIS_THREADS
2405 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2407 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2410 void GC_read_dirty()
2412 unsigned long ps
, np
;
2415 struct prasmap
* map
;
2417 ptr_t current_addr
, limit
;
2421 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
2424 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2426 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2430 /* Retry with larger buffer. */
2431 word new_size
= 2 * GC_proc_buf_size
;
2432 char * new_buf
= GC_scratch_alloc(new_size
);
2435 GC_proc_buf
= bufp
= new_buf
;
2436 GC_proc_buf_size
= new_size
;
2438 if (syscall(SYS_read
, GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2439 WARN("Insufficient space for /proc read\n", 0);
2441 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
2442 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
2443 # ifdef SOLARIS_THREADS
2444 BZERO(GC_fresh_pages
,
2445 MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2451 /* Copy dirty bits into GC_grungy_pages */
2452 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
2453 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2454 nmaps, PG_REFERENCED, PG_MODIFIED); */
2455 bufp
= bufp
+ sizeof(struct prpageheader
);
2456 for (i
= 0; i
< nmaps
; i
++) {
2457 map
= (struct prasmap
*)bufp
;
2458 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
2459 ps
= map
-> pr_pagesize
;
2460 np
= map
-> pr_npage
;
2461 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2462 limit
= vaddr
+ ps
* np
;
2463 bufp
+= sizeof (struct prasmap
);
2464 for (current_addr
= vaddr
;
2465 current_addr
< limit
; current_addr
+= ps
){
2466 if ((*bufp
++) & PG_MODIFIED
) {
2467 register struct hblk
* h
= (struct hblk
*) current_addr
;
2469 while ((ptr_t
)h
< current_addr
+ ps
) {
2470 register word index
= PHT_HASH(h
);
2472 set_pht_entry_from_index(GC_grungy_pages
, index
);
2473 # ifdef SOLARIS_THREADS
2475 register int slot
= FRESH_PAGE_SLOT(h
);
2477 if (GC_fresh_pages
[slot
] == h
) {
2478 GC_fresh_pages
[slot
] = 0;
2486 bufp
+= sizeof(long) - 1;
2487 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
2489 /* Update GC_written_pages. */
2490 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2491 # ifdef SOLARIS_THREADS
2492 /* Make sure that old stacks are considered completely clean */
2493 /* unless written again. */
2494 GC_old_stacks_are_fresh();
2500 GC_bool
GC_page_was_dirty(h
)
2503 register word index
= PHT_HASH(h
);
2504 register GC_bool result
;
2506 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
2507 # ifdef SOLARIS_THREADS
2508 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2509 /* This happens only if page was declared fresh since */
2510 /* the read_dirty call, e.g. because it's in an unused */
2511 /* thread stack. It's OK to treat it as clean, in */
2512 /* that case. And it's consistent with */
2513 /* GC_page_was_ever_dirty. */
2518 GC_bool
GC_page_was_ever_dirty(h
)
2521 register word index
= PHT_HASH(h
);
2522 register GC_bool result
;
2524 result
= get_pht_entry_from_index(GC_written_pages
, index
);
2525 # ifdef SOLARIS_THREADS
2526 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2531 /* Caller holds allocation lock. */
2532 void GC_is_fresh(h
, n
)
2537 register word index
;
2539 # ifdef SOLARIS_THREADS
2542 if (GC_fresh_pages
!= 0) {
2543 for (i
= 0; i
< n
; i
++) {
2544 ADD_FRESH_PAGE(h
+ i
);
2550 # endif /* PROC_VDB */
2555 # include "vd/PCR_VD.h"
2557 # define NPAGES (32*1024) /* 128 MB */
2559 PCR_VD_DB GC_grungy_bits
[NPAGES
];
2561 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
2562 /* HBLKSIZE aligned. */
2564 void GC_dirty_init()
2566 GC_dirty_maintained
= TRUE
;
2567 /* For the time being, we assume the heap generally grows up */
2568 GC_vd_base
= GC_heap_sects
[0].hs_start
;
2569 if (GC_vd_base
== 0) {
2570 ABORT("Bad initial heap segment");
2572 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
2574 ABORT("dirty bit initialization failed");
2578 void GC_read_dirty()
2580 /* lazily enable dirty bits on newly added heap sects */
2582 static int onhs
= 0;
2583 int nhs
= GC_n_heap_sects
;
2584 for( ; onhs
< nhs
; onhs
++ ) {
2585 PCR_VD_WriteProtectEnable(
2586 GC_heap_sects
[onhs
].hs_start
,
2587 GC_heap_sects
[onhs
].hs_bytes
);
2592 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
2594 ABORT("dirty bit read failed");
2598 GC_bool
GC_page_was_dirty(h
)
2601 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
2604 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
2608 void GC_write_hint(h
)
2611 PCR_VD_WriteProtectDisable(h
, HBLKSIZE
);
2612 PCR_VD_WriteProtectEnable(h
, HBLKSIZE
);
2615 # endif /* PCR_VDB */
2618 * Call stack save code for debugging.
2619 * Should probably be in mach_dep.c, but that requires reorganization.
2626 struct frame
*fr_savfp
;
2635 # if defined(SUNOS4)
2636 # include <machine/frame.h>
2638 # if defined (DRSNX)
2639 # include <sys/sparc/frame.h>
2641 # if defined(OPENBSD)
2644 # include <sys/frame.h>
2650 --> We only know how to to get the first
6 arguments
2653 #ifdef SAVE_CALL_CHAIN
2654 /* Fill in the pc and argument information for up to NFRAMES of my */
2655 /* callers. Ignore my frame and my callers frame. */
2658 # define FR_SAVFP fr_fp
2659 # define FR_SAVPC fr_pc
2661 # define FR_SAVFP fr_savfp
2662 # define FR_SAVPC fr_savpc
2665 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
2671 void GC_save_callers (info
)
2672 struct callinfo info
[NFRAMES
];
2674 struct frame
*frame
;
2677 word
GC_save_regs_in_stack();
2679 frame
= (struct frame
*) GC_save_regs_in_stack ();
2681 for (fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
2682 fp
!= 0 && nframes
< NFRAMES
;
2683 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
2686 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
2687 for (i
= 0; i
< NARGS
; i
++) {
2688 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
2691 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
2694 #endif /* SAVE_CALL_CHAIN */