2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
58 # define SIGSEGV 0 /* value is irrelevant */
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
67 # define NEED_FIND_LIMIT
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
71 # define NEED_FIND_LIMIT
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
80 # define NEED_FIND_LIMIT
83 #ifdef NEED_FIND_LIMIT
88 # include <machine/trap.h>
93 # include "exec/execbase.h"
99 # include <proto/exec.h>
100 # include <exec/execbase.h>
101 # include <dos/dos.h>
102 # include <dos/dosextens.h>
105 #if defined(MSWIN32) || defined(MSWINCE)
106 # define WIN32_LEAN_AND_MEAN
108 # include <windows.h>
112 # include <Processes.h>
116 # include <sys/uio.h>
117 # include <malloc.h> /* for locking */
120 # include <sys/types.h>
121 # include <sys/mman.h>
122 # include <sys/stat.h>
129 #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
131 # include <sys/siginfo.h>
135 # define setjmp(env) sigsetjmp(env, 1)
136 # define longjmp(env, val) siglongjmp(env, val)
137 # define jmp_buf sigjmp_buf
141 /* Apparently necessary for djgpp 2.01. May cause problems with */
142 /* other versions. */
143 typedef long unsigned int caddr_t
;
147 # include "il/PCR_IL.h"
148 # include "th/PCR_ThCtl.h"
149 # include "mm/PCR_MM.h"
152 #if !defined(NO_EXECUTE_PERMISSION)
153 # define OPT_PROT_EXEC PROT_EXEC
155 # define OPT_PROT_EXEC 0
158 #if defined(SEARCH_FOR_DATA_START)
159 /* The I386 case can be handled without a search. The Alpha case */
160 /* used to be handled differently as well, but the rules changed */
161 /* for recent Linux versions. This seems to be the easiest way to */
162 /* cover all versions. */
165 # pragma weak __data_start
166 extern int __data_start
;
167 # pragma weak data_start
168 extern int data_start
;
174 void GC_init_linux_data_start()
176 extern ptr_t
GC_find_limit();
179 /* Try the easy approaches first: */
180 if (&__data_start
!= 0) {
181 GC_data_start
= (ptr_t
)(&__data_start
);
184 if (&data_start
!= 0) {
185 GC_data_start
= (ptr_t
)(&data_start
);
189 GC_data_start
= GC_find_limit((ptr_t
)(&_end
), FALSE
);
195 # ifndef ECOS_GC_MEMORY_SIZE
196 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
197 # endif /* ECOS_GC_MEMORY_SIZE */
199 // setjmp() function, as described in ANSI para 7.6.1.1
200 #define setjmp( __env__ ) hal_setjmp( __env__ )
202 // FIXME: This is a simple way of allocating memory which is
203 // compatible with ECOS early releases. Later releases use a more
204 // sophisticated means of allocating memory than this simple static
205 // allocator, but this method is at least bound to work.
206 static char memory
[ECOS_GC_MEMORY_SIZE
];
207 static char *brk
= memory
;
209 static void *tiny_sbrk(ptrdiff_t increment
)
215 if (brk
> memory
+ sizeof memory
)
223 #define sbrk tiny_sbrk
226 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
229 void GC_init_netbsd_elf()
231 extern ptr_t
GC_find_limit();
232 extern char **environ
;
233 /* This may need to be environ, without the underscore, for */
235 GC_data_start
= GC_find_limit((ptr_t
)&environ
, FALSE
);
243 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
246 unsigned short magic_number
;
247 unsigned short padding
[29];
251 #define E_MAGIC(x) (x).magic_number
252 #define EMAGIC 0x5A4D
253 #define E_LFANEW(x) (x).new_exe_offset
256 unsigned char magic_number
[2];
257 unsigned char byte_order
;
258 unsigned char word_order
;
259 unsigned long exe_format_level
;
262 unsigned long padding1
[13];
263 unsigned long object_table_offset
;
264 unsigned long object_count
;
265 unsigned long padding2
[31];
268 #define E32_MAGIC1(x) (x).magic_number[0]
269 #define E32MAGIC1 'L'
270 #define E32_MAGIC2(x) (x).magic_number[1]
271 #define E32MAGIC2 'X'
272 #define E32_BORDER(x) (x).byte_order
274 #define E32_WORDER(x) (x).word_order
276 #define E32_CPU(x) (x).cpu
278 #define E32_OBJTAB(x) (x).object_table_offset
279 #define E32_OBJCNT(x) (x).object_count
285 unsigned long pagemap
;
286 unsigned long mapsize
;
287 unsigned long reserved
;
290 #define O32_FLAGS(x) (x).flags
291 #define OBJREAD 0x0001L
292 #define OBJWRITE 0x0002L
293 #define OBJINVALID 0x0080L
294 #define O32_SIZE(x) (x).size
295 #define O32_BASE(x) (x).base
297 # else /* IBM's compiler */
299 /* A kludge to get around what appears to be a header file bug */
301 # define WORD unsigned short
304 # define DWORD unsigned long
311 # endif /* __IBMC__ */
313 # define INCL_DOSEXCEPTIONS
314 # define INCL_DOSPROCESS
315 # define INCL_DOSERRORS
316 # define INCL_DOSMODULEMGR
317 # define INCL_DOSMEMMGR
321 /* Disable and enable signals during nontrivial allocations */
323 void GC_disable_signals(void)
327 DosEnterMustComplete(&nest
);
328 if (nest
!= 1) ABORT("nested GC_disable_signals");
331 void GC_enable_signals(void)
335 DosExitMustComplete(&nest
);
336 if (nest
!= 0) ABORT("GC_enable_signals");
342 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
343 && !defined(MSWINCE) \
344 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
346 # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
347 /* Use the traditional BSD interface */
348 # define SIGSET_T int
349 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
350 # define SIG_FILL(set) (set) = 0x7fffffff
351 /* Setting the leading bit appears to provoke a bug in some */
352 /* longjmp implementations. Most systems appear not to have */
354 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
356 /* Use POSIX/SYSV interface */
357 # define SIGSET_T sigset_t
358 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
359 # define SIG_FILL(set) sigfillset(&set)
360 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
363 static GC_bool mask_initialized
= FALSE
;
365 static SIGSET_T new_mask
;
367 static SIGSET_T old_mask
;
369 static SIGSET_T dummy
;
371 #if defined(PRINTSTATS) && !defined(THREADS)
372 # define CHECK_SIGNALS
373 int GC_sig_disabled
= 0;
376 void GC_disable_signals()
378 if (!mask_initialized
) {
381 SIG_DEL(new_mask
, SIGSEGV
);
382 SIG_DEL(new_mask
, SIGILL
);
383 SIG_DEL(new_mask
, SIGQUIT
);
385 SIG_DEL(new_mask
, SIGBUS
);
388 SIG_DEL(new_mask
, SIGIOT
);
391 SIG_DEL(new_mask
, SIGEMT
);
394 SIG_DEL(new_mask
, SIGTRAP
);
396 mask_initialized
= TRUE
;
398 # ifdef CHECK_SIGNALS
399 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
402 SIGSETMASK(old_mask
,new_mask
);
405 void GC_enable_signals()
407 # ifdef CHECK_SIGNALS
408 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
411 SIGSETMASK(dummy
,old_mask
);
418 /* Ivan Demakov: simplest way (to me) */
420 void GC_disable_signals() { }
421 void GC_enable_signals() { }
424 /* Find the page size */
427 # if defined(MSWIN32) || defined(MSWINCE)
428 void GC_setpagesize()
430 GetSystemInfo(&GC_sysinfo
);
431 GC_page_size
= GC_sysinfo
.dwPageSize
;
435 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
436 || defined(USE_MUNMAP)
437 void GC_setpagesize()
439 GC_page_size
= GETPAGESIZE();
442 /* It's acceptable to fake it. */
443 void GC_setpagesize()
445 GC_page_size
= HBLKSIZE
;
451 * Find the base of the stack.
452 * Used only in single-threaded environment.
453 * With threads, GC_mark_roots needs to know how to do this.
454 * Called with allocator lock held.
456 # if defined(MSWIN32) || defined(MSWINCE)
457 # define is_writable(prot) ((prot) == PAGE_READWRITE \
458 || (prot) == PAGE_WRITECOPY \
459 || (prot) == PAGE_EXECUTE_READWRITE \
460 || (prot) == PAGE_EXECUTE_WRITECOPY)
461 /* Return the number of bytes that are writable starting at p. */
462 /* The pointer p is assumed to be page aligned. */
463 /* If base is not 0, *base becomes the beginning of the */
464 /* allocation region containing p. */
465 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
467 MEMORY_BASIC_INFORMATION buf
;
471 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
472 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
473 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
474 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
475 if (!is_writable(protect
)) {
478 if (buf
.State
!= MEM_COMMIT
) return(0);
479 return(buf
.RegionSize
);
482 ptr_t
GC_get_stack_base()
485 ptr_t sp
= (ptr_t
)(&dummy
);
486 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
487 word size
= GC_get_writable_length(trunc_sp
, 0);
489 return(trunc_sp
+ size
);
493 # endif /* MS Windows */
496 # include <kernel/OS.h>
497 ptr_t
GC_get_stack_base(){
499 get_thread_info(find_thread(NULL
),&th
);
507 ptr_t
GC_get_stack_base()
512 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
513 GC_err_printf0("DosGetInfoBlocks failed\n");
514 ABORT("DosGetInfoBlocks failed\n");
516 return((ptr_t
)(ptib
-> tib_pstacklimit
));
522 ptr_t
GC_get_stack_base(){
523 return (char *)SysBase
->ThisTask
->tc_SPUpper
;
528 # include "AmigaOS.c"
530 # endif /* __AMIGAOS__ */
532 # if defined(NEED_FIND_LIMIT) || (defined(UNIX_LIKE) && !defined(ECOS))
535 typedef void (*handler
)(int);
537 typedef void (*handler
)();
540 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
541 static struct sigaction old_segv_act
;
542 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
543 static struct sigaction old_bus_act
;
546 static handler old_segv_handler
, old_bus_handler
;
550 void GC_set_and_save_fault_handler(handler h
)
552 void GC_set_and_save_fault_handler(h
)
556 # if defined(SUNOS5SIGS) || defined(IRIX5) \
557 || defined(OSF1) || defined(HURD)
558 struct sigaction act
;
562 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
564 act
.sa_flags
= SA_RESTART
;
566 /* The presence of SA_NODEFER represents yet another gross */
567 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
568 /* interact correctly with -lthread. We hide the confusion */
569 /* by making sure that signal handling doesn't affect the */
572 (void) sigemptyset(&act
.sa_mask
);
573 # ifdef GC_IRIX_THREADS
574 /* Older versions have a bug related to retrieving and */
575 /* and setting a handler at the same time. */
576 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
577 (void) sigaction(SIGSEGV
, &act
, 0);
579 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
580 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
581 || defined(HPUX) || defined(HURD)
582 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
583 /* Pthreads doesn't exist under Irix 5.x, so we */
584 /* don't have to worry in the threads case. */
585 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
587 # endif /* GC_IRIX_THREADS */
589 old_segv_handler
= signal(SIGSEGV
, h
);
591 old_bus_handler
= signal(SIGBUS
, h
);
595 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
597 # ifdef NEED_FIND_LIMIT
598 /* Some tools to implement HEURISTIC2 */
599 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
600 /* static */ jmp_buf GC_jmp_buf
;
603 void GC_fault_handler(sig
)
606 longjmp(GC_jmp_buf
, 1);
609 void GC_setup_temporary_fault_handler()
611 GC_set_and_save_fault_handler(GC_fault_handler
);
614 void GC_reset_fault_handler()
616 # if defined(SUNOS5SIGS) || defined(IRIX5) \
617 || defined(OSF1) || defined(HURD)
618 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
619 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
620 || defined(HPUX) || defined(HURD)
621 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
624 (void) signal(SIGSEGV
, old_segv_handler
);
626 (void) signal(SIGBUS
, old_bus_handler
);
631 /* Return the first nonaddressible location > p (up) or */
632 /* the smallest location q s.t. [q,p] is addressible (!up). */
633 ptr_t
GC_find_limit(p
, up
)
637 static VOLATILE ptr_t result
;
638 /* Needs to be static, since otherwise it may not be */
639 /* preserved across the longjmp. Can safely be */
640 /* static since it's only called once, with the */
641 /* allocation lock held. */
644 GC_setup_temporary_fault_handler();
645 if (setjmp(GC_jmp_buf
) == 0) {
646 result
= (ptr_t
)(((word
)(p
))
647 & ~(MIN_PAGE_SIZE
-1));
650 result
+= MIN_PAGE_SIZE
;
652 result
-= MIN_PAGE_SIZE
;
654 GC_noop1((word
)(*result
));
657 GC_reset_fault_handler();
659 result
+= MIN_PAGE_SIZE
;
665 #ifdef LINUX_STACKBOTTOM
667 #include <sys/types.h>
668 #include <sys/stat.h>
670 # define STAT_SKIP 27 /* Number of fields preceding startstack */
671 /* field in /proc/self/stat */
673 # pragma weak __libc_stack_end
674 extern ptr_t __libc_stack_end
;
677 # pragma weak __libc_ia64_register_backing_store_base
678 extern ptr_t __libc_ia64_register_backing_store_base
;
680 ptr_t
GC_get_register_stack_base(void)
682 if (0 != &__libc_ia64_register_backing_store_base
683 && 0 != __libc_ia64_register_backing_store_base
) {
684 /* Glibc 2.2.4 has a bug such that for dynamically linked */
685 /* executables __libc_ia64_register_backing_store_base is */
686 /* defined but ininitialized during constructor calls. */
687 /* Hence we check for both nonzero address and value. */
688 return __libc_ia64_register_backing_store_base
;
690 word result
= (word
)GC_stackbottom
- BACKING_STORE_DISPLACEMENT
;
691 result
+= BACKING_STORE_ALIGNMENT
- 1;
692 result
&= ~(BACKING_STORE_ALIGNMENT
- 1);
693 return (ptr_t
)result
;
698 ptr_t
GC_linux_stack_base(void)
700 /* We read the stack base value from /proc/self/stat. We do this */
701 /* using direct I/O system calls in order to avoid calling malloc */
702 /* in case REDIRECT_MALLOC is defined. */
703 # define STAT_BUF_SIZE 4096
704 # if defined(GC_USE_LD_WRAP)
705 # define STAT_READ __real_read
707 # define STAT_READ read
709 char stat_buf
[STAT_BUF_SIZE
];
713 size_t i
, buf_offset
= 0;
715 /* First try the easy way. This should work for glibc 2.2 */
716 if (0 != &__libc_stack_end
) {
717 return __libc_stack_end
;
719 f
= open("/proc/self/stat", O_RDONLY
);
720 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
721 ABORT("Couldn't read /proc/self/stat");
723 c
= stat_buf
[buf_offset
++];
724 /* Skip the required number of fields. This number is hopefully */
725 /* constant across all Linux implementations. */
726 for (i
= 0; i
< STAT_SKIP
; ++i
) {
727 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
728 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
730 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
734 c
= stat_buf
[buf_offset
++];
737 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
738 return (ptr_t
)result
;
741 #endif /* LINUX_STACKBOTTOM */
743 #ifdef FREEBSD_STACKBOTTOM
745 /* This uses an undocumented sysctl call, but at least one expert */
746 /* believes it will stay. */
749 #include <sys/types.h>
750 #include <sys/sysctl.h>
752 ptr_t
GC_freebsd_stack_base(void)
754 int nm
[2] = { CTL_KERN
, KERN_USRSTACK
}, base
, len
, r
;
757 r
= sysctl(nm
, 2, &base
, &len
, NULL
, 0);
759 if (r
) ABORT("Error getting stack base");
764 #endif /* FREEBSD_STACKBOTTOM */
766 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
767 && !defined(MSWINCE) && !defined(OS2) && !defined(ECOS)
769 ptr_t
GC_get_stack_base()
774 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
780 # ifdef STACK_GROWS_DOWN
781 result
= (ptr_t
)((((word
)(&dummy
))
782 + STACKBOTTOM_ALIGNMENT_M1
)
783 & ~STACKBOTTOM_ALIGNMENT_M1
);
785 result
= (ptr_t
)(((word
)(&dummy
))
786 & ~STACKBOTTOM_ALIGNMENT_M1
);
788 # endif /* HEURISTIC1 */
789 # ifdef LINUX_STACKBOTTOM
790 result
= GC_linux_stack_base();
792 # ifdef FREEBSD_STACKBOTTOM
793 result
= GC_freebsd_stack_base();
796 # ifdef STACK_GROWS_DOWN
797 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
798 # ifdef HEURISTIC2_LIMIT
799 if (result
> HEURISTIC2_LIMIT
800 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
801 result
= HEURISTIC2_LIMIT
;
805 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
806 # ifdef HEURISTIC2_LIMIT
807 if (result
< HEURISTIC2_LIMIT
808 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
809 result
= HEURISTIC2_LIMIT
;
814 # endif /* HEURISTIC2 */
815 # ifdef STACK_GROWS_DOWN
816 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
819 # endif /* STACKBOTTOM */
822 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
825 * Register static data segment(s) as roots.
826 * If more data segments are added later then they need to be registered
827 * add that point (as we do with SunOS dynamic loading),
828 * or GC_mark_roots needs to check for them (as we do with PCR).
829 * Called with allocator lock held.
834 void GC_register_data_segments()
838 HMODULE module_handle
;
842 struct exe_hdr hdrdos
; /* MSDOS header. */
843 struct e32_exe hdr386
; /* Real header for my executable */
844 struct o32_obj seg
; /* Currrent segment */
848 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
849 GC_err_printf0("DosGetInfoBlocks failed\n");
850 ABORT("DosGetInfoBlocks failed\n");
852 module_handle
= ppib
-> pib_hmte
;
853 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
854 GC_err_printf0("DosQueryModuleName failed\n");
855 ABORT("DosGetInfoBlocks failed\n");
857 myexefile
= fopen(path
, "rb");
858 if (myexefile
== 0) {
859 GC_err_puts("Couldn't open executable ");
860 GC_err_puts(path
); GC_err_puts("\n");
861 ABORT("Failed to open executable\n");
863 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
864 GC_err_puts("Couldn't read MSDOS header from ");
865 GC_err_puts(path
); GC_err_puts("\n");
866 ABORT("Couldn't read MSDOS header");
868 if (E_MAGIC(hdrdos
) != EMAGIC
) {
869 GC_err_puts("Executable has wrong DOS magic number: ");
870 GC_err_puts(path
); GC_err_puts("\n");
871 ABORT("Bad DOS magic number");
873 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
874 GC_err_puts("Seek to new header failed in ");
875 GC_err_puts(path
); GC_err_puts("\n");
876 ABORT("Bad DOS magic number");
878 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
879 GC_err_puts("Couldn't read MSDOS header from ");
880 GC_err_puts(path
); GC_err_puts("\n");
881 ABORT("Couldn't read OS/2 header");
883 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
884 GC_err_puts("Executable has wrong OS/2 magic number:");
885 GC_err_puts(path
); GC_err_puts("\n");
886 ABORT("Bad OS/2 magic number");
888 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
889 GC_err_puts("Executable %s has wrong byte order: ");
890 GC_err_puts(path
); GC_err_puts("\n");
891 ABORT("Bad byte order");
893 if ( E32_CPU(hdr386
) == E32CPU286
) {
894 GC_err_puts("GC can't handle 80286 executables: ");
895 GC_err_puts(path
); GC_err_puts("\n");
898 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
900 GC_err_puts("Seek to object table failed: ");
901 GC_err_puts(path
); GC_err_puts("\n");
902 ABORT("Seek to object table failed");
904 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
906 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
907 GC_err_puts("Couldn't read obj table entry from ");
908 GC_err_puts(path
); GC_err_puts("\n");
909 ABORT("Couldn't read obj table entry");
911 flags
= O32_FLAGS(seg
);
912 if (!(flags
& OBJWRITE
)) continue;
913 if (!(flags
& OBJREAD
)) continue;
914 if (flags
& OBJINVALID
) {
915 GC_err_printf0("Object with invalid pages?\n");
918 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
924 # if defined(MSWIN32) || defined(MSWINCE)
927 /* Unfortunately, we have to handle win32s very differently from NT, */
928 /* Since VirtualQuery has very different semantics. In particular, */
929 /* under win32s a VirtualQuery call on an unmapped page returns an */
930 /* invalid result. Under GC_register_data_segments is a noop and */
931 /* all real work is done by GC_register_dynamic_libraries. Under */
932 /* win32s, we cannot find the data segments associated with dll's. */
933 /* We rgister the main data segment here. */
934 GC_bool GC_win32s
= FALSE
; /* We're running under win32s. */
936 GC_bool
GC_is_win32s()
938 DWORD v
= GetVersion();
940 /* Check that this is not NT, and Windows major version <= 3 */
941 return ((v
& 0x80000000) && (v
& 0xff) <= 3);
946 GC_win32s
= GC_is_win32s();
949 /* Return the smallest address a such that VirtualQuery */
950 /* returns correct results for all addresses between a and start. */
951 /* Assumes VirtualQuery returns correct information for start. */
952 ptr_t
GC_least_described_address(ptr_t start
)
954 MEMORY_BASIC_INFORMATION buf
;
960 limit
= GC_sysinfo
.lpMinimumApplicationAddress
;
961 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
963 q
= (LPVOID
)(p
- GC_page_size
);
964 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
965 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
966 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
967 p
= (ptr_t
)(buf
.AllocationBase
);
973 /* Is p the start of either the malloc heap, or of one of our */
975 GC_bool
GC_is_heap_base (ptr_t p
)
980 # ifndef REDIRECT_MALLOC
981 static ptr_t malloc_heap_pointer
= 0;
983 if (0 == malloc_heap_pointer
) {
984 MEMORY_BASIC_INFORMATION buf
;
985 void *pTemp
= malloc( 1 );
986 register DWORD result
= VirtualQuery(pTemp
, &buf
, sizeof(buf
));
991 if (result
!= sizeof(buf
)) {
992 ABORT("Weird VirtualQuery result");
994 malloc_heap_pointer
= (ptr_t
)(buf
.AllocationBase
);
996 if (p
== malloc_heap_pointer
) return(TRUE
);
998 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
999 if (GC_heap_bases
[i
] == p
) return(TRUE
);
1005 void GC_register_root_section(ptr_t static_root
)
1007 MEMORY_BASIC_INFORMATION buf
;
1012 char * limit
, * new_limit
;
1014 if (!GC_win32s
) return;
1015 p
= base
= limit
= GC_least_described_address(static_root
);
1016 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
1017 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
1018 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
1019 || GC_is_heap_base(buf
.AllocationBase
)) break;
1020 new_limit
= (char *)p
+ buf
.RegionSize
;
1021 protect
= buf
.Protect
;
1022 if (buf
.State
== MEM_COMMIT
1023 && is_writable(protect
)) {
1024 if ((char *)p
== limit
) {
1027 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1032 if (p
> (LPVOID
)new_limit
/* overflow */) break;
1033 p
= (LPVOID
)new_limit
;
1035 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1039 void GC_register_data_segments()
1043 GC_register_root_section((ptr_t
)(&dummy
));
1047 # else /* !OS2 && !Windows */
1049 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1050 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1051 char * GC_SysVGetDataStart(max_page_size
, etext_addr
)
1055 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1056 & ~(sizeof(word
) - 1);
1057 /* etext rounded to word boundary */
1058 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1059 & ~((word
)max_page_size
- 1));
1060 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1061 VOLATILE
char * result
= (char *)(next_page
+ page_offset
);
1062 /* Note that this isnt equivalent to just adding */
1063 /* max_page_size to &etext if &etext is at a page boundary */
1065 GC_setup_temporary_fault_handler();
1066 if (setjmp(GC_jmp_buf
) == 0) {
1067 /* Try writing to the address. */
1069 GC_reset_fault_handler();
1071 GC_reset_fault_handler();
1072 /* We got here via a longjmp. The address is not readable. */
1073 /* This is known to happen under Solaris 2.4 + gcc, which place */
1074 /* string constants in the text segment, but after etext. */
1075 /* Use plan B. Note that we now know there is a gap between */
1076 /* text and data segments, so plan A bought us something. */
1077 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
) - MIN_PAGE_SIZE
, FALSE
);
1079 return((char *)result
);
1085 void GC_register_data_segments()
1087 struct Process
*proc
;
1088 struct CommandLineInterface
*cli
;
1092 if ((proc
= (struct Process
*)FindTask(0)) == 0) {
1093 GC_err_puts("Cannot find process structure\n");
1096 if ((cli
= BADDR(proc
->pr_CLI
)) == 0) {
1097 GC_err_puts("No CLI\n");
1100 if ((myseglist
= cli
->cli_Module
) == 0) {
1101 GC_err_puts("No seglist from CLI\n");
1105 for (data
= (ULONG
*)BADDR(myseglist
); data
!= 0;
1106 data
= (ULONG
*)BADDR(data
[0])) {
1107 if (((ULONG
) GC_register_data_segments
< (ULONG
) &data
[1]) ||
1108 ((ULONG
) GC_register_data_segments
> (ULONG
) &data
[1] + data
[-1])) {
1109 GC_add_roots_inner((char *)&data
[1],
1110 ((char *)&data
[1]) + data
[-1], FALSE
);
1116 # define GC_AMIGA_DS
1117 # include "AmigaOS.c"
1119 #else /* !OS2 && !Windows && !__AMIGAOS__ */
1121 void GC_register_data_segments()
1123 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1125 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1126 /* As of Solaris 2.3, the Solaris threads implementation */
1127 /* allocates the data structure for the initial thread with */
1128 /* sbrk at process startup. It needs to be scanned, so that */
1129 /* we don't lose some malloc allocated data structures */
1130 /* hanging from it. We're on thin ice here ... */
1131 extern caddr_t
sbrk();
1133 GC_add_roots_inner(DATASTART
, (char *)sbrk(0), FALSE
);
1135 GC_add_roots_inner(DATASTART
, (char *)(DATAEND
), FALSE
);
1138 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1139 GC_add_roots_inner(DATASTART
, (char *) get_end(), FALSE
);
1143 # if defined(THINK_C)
1144 extern void* GC_MacGetDataStart(void);
1145 /* globals begin above stack and end at a5. */
1146 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1147 (ptr_t
)LMGetCurrentA5(), FALSE
);
1149 # if defined(__MWERKS__)
1151 extern void* GC_MacGetDataStart(void);
1152 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1153 # if __option(far_data)
1154 extern void* GC_MacGetDataEnd(void);
1156 /* globals begin above stack and end at a5. */
1157 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1158 (ptr_t
)LMGetCurrentA5(), FALSE
);
1159 /* MATTHEW: Handle Far Globals */
1160 # if __option(far_data)
1161 /* Far globals follow he QD globals: */
1162 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1163 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1166 extern char __data_start__
[], __data_end__
[];
1167 GC_add_roots_inner((ptr_t
)&__data_start__
,
1168 (ptr_t
)&__data_end__
, FALSE
);
1169 # endif /* __POWERPC__ */
1170 # endif /* __MWERKS__ */
1171 # endif /* !THINK_C */
1175 /* Dynamic libraries are added at every collection, since they may */
1179 # endif /* ! __AMIGAOS__ */
1180 # endif /* ! __AROS__ */
1181 # endif /* ! MSWIN32 && ! MSWINCE*/
1185 * Auxiliary routines for obtaining memory from OS.
1188 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1189 && !defined(MSWIN32) && !defined(MSWINCE) \
1190 && !defined(MACOS) && !defined(DOS4GW)
1193 extern caddr_t
sbrk();
1196 # define SBRK_ARG_T ptrdiff_t
1198 # define SBRK_ARG_T int
1203 /* The compiler seems to generate speculative reads one past the end of */
1204 /* an allocated object. Hence we need to make sure that the page */
1205 /* following the last heap page is also mapped. */
1206 ptr_t
GC_unix_get_mem(bytes
)
1209 caddr_t cur_brk
= (caddr_t
)sbrk(0);
1211 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1212 static caddr_t my_brk_val
= 0;
1214 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1216 if((caddr_t
)(sbrk(GC_page_size
- lsbs
)) == (caddr_t
)(-1)) return(0);
1218 if (cur_brk
== my_brk_val
) {
1219 /* Use the extra block we allocated last time. */
1220 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1221 if (result
== (caddr_t
)(-1)) return(0);
1222 result
-= GC_page_size
;
1224 result
= (ptr_t
)sbrk(GC_page_size
+ (SBRK_ARG_T
)bytes
);
1225 if (result
== (caddr_t
)(-1)) return(0);
1227 my_brk_val
= result
+ bytes
+ GC_page_size
; /* Always page aligned */
1228 return((ptr_t
)result
);
1231 #else /* Not RS6000 */
1233 #if defined(USE_MMAP)
1234 /* Tested only under Linux, IRIX5 and Solaris 2 */
1236 #ifdef USE_MMAP_FIXED
1237 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1238 /* Seems to yield better performance on Solaris 2, but can */
1239 /* be unreliable if something is already mapped at the address. */
1241 # define GC_MMAP_FLAGS MAP_PRIVATE
1245 # define HEAP_START 0
1248 ptr_t
GC_unix_get_mem(bytes
)
1251 static GC_bool initialized
= FALSE
;
1254 static ptr_t last_addr
= HEAP_START
;
1257 fd
= open("/dev/zero", O_RDONLY
);
1260 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1261 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1262 GC_MMAP_FLAGS
, fd
, 0/* offset */);
1263 if (result
== MAP_FAILED
) return(0);
1264 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1265 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1266 # if !defined(LINUX)
1267 if (last_addr
== 0) {
1268 /* Oops. We got the end of the address space. This isn't */
1269 /* usable by arbitrary C code, since one-past-end pointers */
1270 /* don't work, so we discard it and try again. */
1271 munmap(result
, (size_t)(-GC_page_size
) - (size_t)result
);
1272 /* Leave last page mapped, so we can't repeat. */
1273 return GC_unix_get_mem(bytes
);
1276 GC_ASSERT(last_addr
!= 0);
1278 return((ptr_t
)result
);
1281 #else /* Not RS6000, not USE_MMAP */
1282 ptr_t
GC_unix_get_mem(bytes
)
1287 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1288 /* The equivalent may be needed on other systems as well. */
1292 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1293 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1295 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1297 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) return(0);
1299 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1300 if (result
== (ptr_t
)(-1)) result
= 0;
1308 #endif /* Not USE_MMAP */
1309 #endif /* Not RS6000 */
1315 void * os2_alloc(size_t bytes
)
1319 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1320 PAG_WRITE
| PAG_COMMIT
)
1324 if (result
== 0) return(os2_alloc(bytes
));
1331 # if defined(MSWIN32) || defined(MSWINCE)
1332 SYSTEM_INFO GC_sysinfo
;
1337 word GC_n_heap_bases
= 0;
1339 ptr_t
GC_win32_get_mem(bytes
)
1345 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1346 /* There are also unconfirmed rumors of other */
1347 /* problems, so we dodge the issue. */
1348 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1349 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
) & ~(HBLKSIZE
-1));
1351 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
,
1352 MEM_COMMIT
| MEM_RESERVE
,
1353 PAGE_EXECUTE_READWRITE
);
1355 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1356 /* If I read the documentation correctly, this can */
1357 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1358 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1359 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1363 void GC_win32_free_heap ()
1366 while (GC_n_heap_bases
> 0) {
1367 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1368 GC_heap_bases
[GC_n_heap_bases
] = 0;
1375 # define GC_AMIGA_AM
1376 # include "AmigaOS.c"
1382 word GC_n_heap_bases
= 0;
1384 ptr_t
GC_wince_get_mem(bytes
)
1390 /* Round up allocation size to multiple of page size */
1391 bytes
= (bytes
+ GC_page_size
-1) & ~(GC_page_size
-1);
1393 /* Try to find reserved, uncommitted pages */
1394 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
1395 if (((word
)(-(signed_word
)GC_heap_lengths
[i
])
1396 & (GC_sysinfo
.dwAllocationGranularity
-1))
1398 result
= GC_heap_bases
[i
] + GC_heap_lengths
[i
];
1403 if (i
== GC_n_heap_bases
) {
1404 /* Reserve more pages */
1405 word res_bytes
= (bytes
+ GC_sysinfo
.dwAllocationGranularity
-1)
1406 & ~(GC_sysinfo
.dwAllocationGranularity
-1);
1407 result
= (ptr_t
) VirtualAlloc(NULL
, res_bytes
,
1408 MEM_RESERVE
| MEM_TOP_DOWN
,
1409 PAGE_EXECUTE_READWRITE
);
1410 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1411 /* If I read the documentation correctly, this can */
1412 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1413 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1414 GC_heap_bases
[GC_n_heap_bases
] = result
;
1415 GC_heap_lengths
[GC_n_heap_bases
] = 0;
1420 result
= (ptr_t
) VirtualAlloc(result
, bytes
,
1422 PAGE_EXECUTE_READWRITE
);
1423 if (result
!= NULL
) {
1424 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1425 GC_heap_lengths
[i
] += bytes
;
1434 /* For now, this only works on Win32/WinCE and some Unix-like */
1435 /* systems. If you have something else, don't define */
1437 /* We assume ANSI C to support this feature. */
1439 #if !defined(MSWIN32) && !defined(MSWINCE)
1442 #include <sys/mman.h>
1443 #include <sys/stat.h>
1444 #include <sys/types.h>
1448 /* Compute a page aligned starting address for the unmap */
1449 /* operation on a block of size bytes starting at start. */
1450 /* Return 0 if the block is too small to make this feasible. */
1451 ptr_t
GC_unmap_start(ptr_t start
, word bytes
)
1453 ptr_t result
= start
;
1454 /* Round start to next page boundary. */
1455 result
+= GC_page_size
- 1;
1456 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
1457 if (result
+ GC_page_size
> start
+ bytes
) return 0;
1461 /* Compute end address for an unmap operation on the indicated */
1463 ptr_t
GC_unmap_end(ptr_t start
, word bytes
)
1465 ptr_t end_addr
= start
+ bytes
;
1466 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
1470 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1471 /* memory using VirtualAlloc and VirtualFree. These functions */
1472 /* work on individual allocations of virtual memory, made */
1473 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1474 /* The ranges we need to (de)commit may span several of these */
1475 /* allocations; therefore we use VirtualQuery to check */
1476 /* allocation lengths, and split up the range as necessary. */
1478 /* We assume that GC_remap is called on exactly the same range */
1479 /* as a previous call to GC_unmap. It is safe to consistently */
1480 /* round the endpoints in both places. */
1481 void GC_unmap(ptr_t start
, word bytes
)
1483 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1484 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1485 word len
= end_addr
- start_addr
;
1486 if (0 == start_addr
) return;
1487 # if defined(MSWIN32) || defined(MSWINCE)
1489 MEMORY_BASIC_INFORMATION mem_info
;
1491 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1492 != sizeof(mem_info
))
1493 ABORT("Weird VirtualQuery result");
1494 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1495 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1496 ABORT("VirtualFree failed");
1497 GC_unmapped_bytes
+= free_len
;
1498 start_addr
+= free_len
;
1502 if (munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1503 GC_unmapped_bytes
+= len
;
1508 void GC_remap(ptr_t start
, word bytes
)
1510 static int zero_descr
= -1;
1511 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1512 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1513 word len
= end_addr
- start_addr
;
1516 # if defined(MSWIN32) || defined(MSWINCE)
1517 if (0 == start_addr
) return;
1519 MEMORY_BASIC_INFORMATION mem_info
;
1521 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1522 != sizeof(mem_info
))
1523 ABORT("Weird VirtualQuery result");
1524 alloc_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1525 result
= VirtualAlloc(start_addr
, alloc_len
,
1527 PAGE_EXECUTE_READWRITE
);
1528 if (result
!= start_addr
) {
1529 ABORT("VirtualAlloc remapping failed");
1531 GC_unmapped_bytes
-= alloc_len
;
1532 start_addr
+= alloc_len
;
1536 if (-1 == zero_descr
) zero_descr
= open("/dev/zero", O_RDWR
);
1537 if (0 == start_addr
) return;
1538 result
= mmap(start_addr
, len
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1539 MAP_FIXED
| MAP_PRIVATE
, zero_descr
, 0);
1540 if (result
!= start_addr
) {
1541 ABORT("mmap remapping failed");
1543 GC_unmapped_bytes
-= len
;
1547 /* Two adjacent blocks have already been unmapped and are about to */
1548 /* be merged. Unmap the whole block. This typically requires */
1549 /* that we unmap a small section in the middle that was not previously */
1550 /* unmapped due to alignment constraints. */
1551 void GC_unmap_gap(ptr_t start1
, word bytes1
, ptr_t start2
, word bytes2
)
1553 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
1554 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
1555 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
1556 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
1557 ptr_t start_addr
= end1_addr
;
1558 ptr_t end_addr
= start2_addr
;
1560 GC_ASSERT(start1
+ bytes1
== start2
);
1561 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
1562 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
1563 if (0 == start_addr
) return;
1564 len
= end_addr
- start_addr
;
1565 # if defined(MSWIN32) || defined(MSWINCE)
1567 MEMORY_BASIC_INFORMATION mem_info
;
1569 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1570 != sizeof(mem_info
))
1571 ABORT("Weird VirtualQuery result");
1572 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1573 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1574 ABORT("VirtualFree failed");
1575 GC_unmapped_bytes
+= free_len
;
1576 start_addr
+= free_len
;
1580 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1581 GC_unmapped_bytes
+= len
;
1585 #endif /* USE_MUNMAP */
1587 /* Routine for pushing any additional roots. In THREADS */
1588 /* environment, this is also responsible for marking from */
1589 /* thread stacks. */
1591 void (*GC_push_other_roots
)() = 0;
1595 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
1597 struct PCR_ThCtl_TInfoRep info
;
1600 info
.ti_stkLow
= info
.ti_stkHi
= 0;
1601 result
= PCR_ThCtl_GetInfo(t
, &info
);
1602 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
1606 /* Push the contents of an old object. We treat this as stack */
1607 /* data only becasue that makes it robust against mark stack */
1609 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
1611 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
1612 return(PCR_ERes_okay
);
1616 void GC_default_push_other_roots
GC_PROTO((void))
1618 /* Traverse data allocated by previous memory managers. */
1620 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
1622 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
1625 ABORT("Old object enumeration failed");
1628 /* Traverse all thread stacks. */
1630 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
1631 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1632 ABORT("Thread stack marking failed\n");
1640 # ifdef ALL_INTERIOR_POINTERS
1644 void GC_push_thread_structures
GC_PROTO((void))
1646 /* Not our responsibibility. */
1649 extern void ThreadF__ProcessStacks();
1651 void GC_push_thread_stack(start
, stop
)
1654 GC_push_all_stack((ptr_t
)start
, (ptr_t
)stop
+ sizeof(word
));
1657 /* Push routine with M3 specific calling convention. */
1658 GC_m3_push_root(dummy1
, p
, dummy2
, dummy3
)
1660 ptr_t dummy1
, dummy2
;
1665 GC_PUSH_ONE_STACK(q
, p
);
1668 /* M3 set equivalent to RTHeap.TracedRefTypes */
1669 typedef struct { int elts
[1]; } RefTypeSet
;
1670 RefTypeSet GC_TracedRefTypes
= {{0x1}};
1672 void GC_default_push_other_roots
GC_PROTO((void))
1674 /* Use the M3 provided routine for finding static roots. */
1675 /* This is a bit dubious, since it presumes no C roots. */
1676 /* We handle the collector roots explicitly in GC_push_roots */
1677 RTMain__GlobalMapProc(GC_m3_push_root
, 0, GC_TracedRefTypes
);
1678 if (GC_words_allocd
> 0) {
1679 ThreadF__ProcessStacks(GC_push_thread_stack
);
1681 /* Otherwise this isn't absolutely necessary, and we have */
1682 /* startup ordering problems. */
1685 # endif /* SRC_M3 */
1687 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
1688 defined(GC_WIN32_THREADS)
1690 extern void GC_push_all_stacks();
1692 void GC_default_push_other_roots
GC_PROTO((void))
1694 GC_push_all_stacks();
1697 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1699 void (*GC_push_other_roots
) GC_PROTO((void)) = GC_default_push_other_roots
;
1701 #endif /* THREADS */
1704 * Routines for accessing dirty bits on virtual pages.
1705 * We plan to eventually implement four strategies for doing so:
1706 * DEFAULT_VDB: A simple dummy implementation that treats every page
1707 * as possibly dirty. This makes incremental collection
1708 * useless, but the implementation is still correct.
1709 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1710 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1711 * works under some SVR4 variants. Even then, it may be
1712 * too slow to be entirely satisfactory. Requires reading
1713 * dirty bits for entire address space. Implementations tend
1714 * to assume that the client is a (slow) debugger.
1715 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1716 * dirtied pages. The implementation (and implementability)
1717 * is highly system dependent. This usually fails when system
1718 * calls write to a protected page. We prevent the read system
1719 * call from doing so. It is the clients responsibility to
1720 * make sure that other system calls are similarly protected
1721 * or write only to the stack.
1724 GC_bool GC_dirty_maintained
= FALSE
;
1728 /* All of the following assume the allocation lock is held, and */
1729 /* signals are disabled. */
1731 /* The client asserts that unallocated pages in the heap are never */
1734 /* Initialize virtual dirty bit implementation. */
1735 void GC_dirty_init()
1737 GC_dirty_maintained
= TRUE
;
1740 /* Retrieve system dirty bits for heap to a local buffer. */
1741 /* Restore the systems notion of which pages are dirty. */
1742 void GC_read_dirty()
1745 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1746 /* If the actual page size is different, this returns TRUE if any */
1747 /* of the pages overlapping h are dirty. This routine may err on the */
1748 /* side of labelling pages as dirty (and this implementation does). */
1750 GC_bool
GC_page_was_dirty(h
)
1757 * The following two routines are typically less crucial. They matter
1758 * most with large dynamic libraries, or if we can't accurately identify
1759 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1760 * versions are adequate.
1763 /* Could any valid GC heap pointer ever have been written to this page? */
1765 GC_bool
GC_page_was_ever_dirty(h
)
1771 /* Reset the n pages starting at h to "was never dirty" status. */
1772 void GC_is_fresh(h
, n
)
1778 /* A call hints that h is about to be written. */
1779 /* May speed up some dirty bit implementations. */
1781 void GC_write_hint(h
)
1786 # endif /* DEFAULT_VDB */
1789 # ifdef MPROTECT_VDB
1792 * See DEFAULT_VDB for interface descriptions.
1796 * This implementation maintains dirty bits itself by catching write
1797 * faults and keeping track of them. We assume nobody else catches
1798 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1799 * except as a result of a read system call. This means clients must
1800 * either ensure that system calls do not touch the heap, or must
1801 * provide their own wrappers analogous to the one for read.
1802 * We assume the page size is a multiple of HBLKSIZE.
1803 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1804 * tried to use portable code where easily possible. It is known
1805 * not to work under a number of other systems.
1808 # if !defined(MSWIN32) && !defined(MSWINCE)
1810 # include <sys/mman.h>
1811 # include <signal.h>
1812 # include <sys/syscall.h>
1814 # define PROTECT(addr, len) \
1815 if (mprotect((caddr_t)(addr), (size_t)(len), \
1816 PROT_READ | OPT_PROT_EXEC) < 0) { \
1817 ABORT("mprotect failed"); \
1819 # define UNPROTECT(addr, len) \
1820 if (mprotect((caddr_t)(addr), (size_t)(len), \
1821 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1822 ABORT("un-mprotect failed"); \
1828 # include <signal.h>
1831 static DWORD protect_junk
;
1832 # define PROTECT(addr, len) \
1833 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1835 DWORD last_error = GetLastError(); \
1836 GC_printf1("Last error code: %lx\n", last_error); \
1837 ABORT("VirtualProtect failed"); \
1839 # define UNPROTECT(addr, len) \
1840 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1842 ABORT("un-VirtualProtect failed"); \
1847 #if defined(SUNOS4) || defined(FREEBSD)
1848 typedef void (* SIG_PF
)();
1850 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
1851 || defined(MACOSX) || defined(HURD)
1853 typedef void (* SIG_PF
)(int);
1855 typedef void (* SIG_PF
)();
1858 #if defined(MSWIN32)
1859 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF
;
1861 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1863 #if defined(MSWINCE)
1864 typedef LONG (WINAPI
*SIG_PF
)(struct _EXCEPTION_POINTERS
*);
1866 # define SIG_DFL (SIG_PF) (-1)
1869 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
1870 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1872 #if defined(SUNOS5SIGS)
1874 # define SIGINFO __siginfo
1876 # define SIGINFO siginfo
1879 typedef void (* REAL_SIG_PF
)(int, struct SIGINFO
*, void *);
1881 typedef void (* REAL_SIG_PF
)();
1885 # include <linux/version.h>
1886 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1887 typedef struct sigcontext s_c
;
1889 typedef struct sigcontext_struct s_c
;
1891 # if defined(ALPHA) || defined(M68K)
1892 typedef void (* REAL_SIG_PF
)(int, int, s_c
*);
1894 # if defined(IA64) || defined(HP_PA)
1895 typedef void (* REAL_SIG_PF
)(int, siginfo_t
*, s_c
*);
1897 typedef void (* REAL_SIG_PF
)(int, s_c
);
1901 /* Retrieve fault address from sigcontext structure by decoding */
1903 char * get_fault_addr(s_c
*sc
) {
1907 instr
= *((unsigned *)(sc
->sc_pc
));
1908 faultaddr
= sc
->sc_regs
[(instr
>> 16) & 0x1f];
1909 faultaddr
+= (word
) (((int)instr
<< 16) >> 16);
1910 return (char *)faultaddr
;
1912 # endif /* !ALPHA */
1915 # if defined(MACOSX) /* Should also test for PowerPC? */
1916 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1918 /* Decodes the machine instruction which was responsible for the sending of the
1919 SIGBUS signal. Sadly this is the only way to find the faulting address because
1920 the signal handler doesn't get it directly from the kernel (although it is
1921 available on the Mach level, but droppped by the BSD personality before it
1922 calls our signal handler...)
1923 This code should be able to deal correctly with all PPCs starting from the
1924 601 up to and including the G4s (including Velocity Engine). */
1925 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1926 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1927 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1928 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1929 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1930 #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1932 static char *get_fault_addr(struct sigcontext
*scp
)
1934 unsigned int instr
= *((unsigned int *) scp
->sc_ir
);
1935 unsigned int * regs
= &((unsigned int *) scp
->sc_regs
)[2];
1937 unsigned int baseA
= 0, baseB
= 0;
1938 unsigned int addr
, alignmask
= 0xFFFFFFFF;
1940 #ifdef GC_DEBUG_DECODER
1941 GC_err_printf1("Instruction: 0x%lx\n", instr
);
1942 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr
));
1944 switch(EXTRACT_OP1(instr
)) {
1948 case 55: /* stfdu */
1950 case 53: /* stfsu */
1956 tmp
= EXTRACT_REGA(instr
);
1959 disp
= EXTRACT_DISP(instr
);
1962 #ifdef GC_DEBUG_DECODER
1963 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr
));
1965 switch(EXTRACT_OP2(instr
)) {
1967 case 54: /* dcbst */
1968 case 1014: /* dcbz */
1969 case 247: /* stbux */
1970 case 215: /* stbx */
1971 case 759: /* stfdux */
1972 case 727: /* stfdx */
1973 case 983: /* stfiwx */
1974 case 695: /* stfsux */
1975 case 663: /* stfsx */
1976 case 918: /* sthbrx */
1977 case 439: /* sthux */
1978 case 407: /* sthx */
1979 case 661: /* stswx */
1980 case 662: /* stwbrx */
1981 case 150: /* stwcx. */
1982 case 183: /* stwux */
1983 case 151: /* stwx */
1984 case 135: /* stvebx */
1985 case 167: /* stvehx */
1986 case 199: /* stvewx */
1987 case 231: /* stvx */
1988 case 487: /* stvxl */
1989 tmp
= EXTRACT_REGA(instr
);
1992 baseB
= regs
[EXTRACT_REGC(instr
)];
1993 /* determine Altivec alignment mask */
1994 switch(EXTRACT_OP2(instr
)) {
1995 case 167: /* stvehx */
1996 alignmask
= 0xFFFFFFFE;
1998 case 199: /* stvewx */
1999 alignmask
= 0xFFFFFFFC;
2001 case 231: /* stvx */
2002 alignmask
= 0xFFFFFFF0;
2004 case 487: /* stvxl */
2005 alignmask
= 0xFFFFFFF0;
2009 case 725: /* stswi */
2010 tmp
= EXTRACT_REGA(instr
);
2014 default: /* ignore instruction */
2015 #ifdef GC_DEBUG_DECODER
2016 GC_err_printf("Ignored by inner handler\n");
2022 default: /* ignore instruction */
2023 #ifdef GC_DEBUG_DECODER
2024 GC_err_printf("Ignored by main handler\n");
2030 addr
= (baseA
+ baseB
) + disp
;
2032 #ifdef GC_DEBUG_DECODER
2033 GC_err_printf1("BaseA: %d\n", baseA
);
2034 GC_err_printf1("BaseB: %d\n", baseB
);
2035 GC_err_printf1("Disp: %d\n", disp
);
2036 GC_err_printf1("Address: %d\n", addr
);
2038 return (char *)addr
;
2042 SIG_PF GC_old_bus_handler
;
2043 SIG_PF GC_old_segv_handler
; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2046 /* We need to lock around the bitmap update in the write fault handler */
2047 /* in order to avoid the risk of losing a bit. We do this with a */
2048 /* test-and-set spin lock if we know how to do that. Otherwise we */
2049 /* check whether we are already in the handler and use the dumb but */
2050 /* safe fallback algorithm of setting all bits in the word. */
2051 /* Contention should be very rare, so we do the minimum to handle it */
2053 #ifdef GC_TEST_AND_SET_DEFINED
2054 static VOLATILE
unsigned int fault_handler_lock
= 0;
2055 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2056 while (GC_test_and_set(&fault_handler_lock
)) {}
2057 /* Could also revert to set_pht_entry_from_index_safe if initial */
2058 /* GC_test_and_set fails. */
2059 set_pht_entry_from_index(db
, index
);
2060 GC_clear(&fault_handler_lock
);
2062 #else /* !GC_TEST_AND_SET_DEFINED */
2063 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2064 /* just before we notice the conflict and correct it. We may end up */
2065 /* looking at it while it's wrong. But this requires contention */
2066 /* exactly when a GC is triggered, which seems far less likely to */
2067 /* fail than the old code, which had no reported failures. Thus we */
2068 /* leave it this way while we think of something better, or support */
2069 /* GC_test_and_set on the remaining platforms. */
2070 static VOLATILE word currently_updating
= 0;
2071 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2072 unsigned int update_dummy
;
2073 currently_updating
= (word
)(&update_dummy
);
2074 set_pht_entry_from_index(db
, index
);
2075 /* If we get contention in the 10 or so instruction window here, */
2076 /* and we get stopped by a GC between the two updates, we lose! */
2077 if (currently_updating
!= (word
)(&update_dummy
)) {
2078 set_pht_entry_from_index_safe(db
, index
);
2079 /* We claim that if two threads concurrently try to update the */
2080 /* dirty bit vector, the first one to execute UPDATE_START */
2081 /* will see it changed when UPDATE_END is executed. (Note that */
2082 /* &update_dummy must differ in two distinct threads.) It */
2083 /* will then execute set_pht_entry_from_index_safe, thus */
2084 /* returning us to a safe state, though not soon enough. */
2087 #endif /* !GC_TEST_AND_SET_DEFINED */
2088 #else /* !THREADS */
2089 # define async_set_pht_entry_from_index(db, index) \
2090 set_pht_entry_from_index(db, index)
2091 #endif /* !THREADS */
2094 # if defined (SUNOS4) || defined(FREEBSD)
2095 void GC_write_fault_handler(sig
, code
, scp
, addr
)
2097 struct sigcontext
*scp
;
2100 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2101 # define CODE_OK (FC_CODE(code) == FC_PROT \
2102 || (FC_CODE(code) == FC_OBJERR \
2103 && FC_ERRNO(code) == FC_PROT))
2106 # define SIG_OK (sig == SIGBUS)
2107 # define CODE_OK (code == BUS_PAGE_FAULT)
2110 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2112 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2114 # define SIG_OK (sig == SIGSEGV)
2115 # define CODE_OK (code == 2 /* experimentally determined */)
2118 # define SIG_OK (sig == SIGSEGV)
2119 # define CODE_OK (code == EACCES)
2122 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2123 # define CODE_OK TRUE
2127 # if defined(ALPHA) || defined(M68K)
2128 void GC_write_fault_handler(int sig
, int code
, s_c
* sc
)
2130 # if defined(IA64) || defined(HP_PA)
2131 void GC_write_fault_handler(int sig
, siginfo_t
* si
, s_c
* scp
)
2133 void GC_write_fault_handler(int sig
, s_c sc
)
2136 # define SIG_OK (sig == SIGSEGV)
2137 # define CODE_OK TRUE
2138 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2139 /* Should probably consider alignment issues on other */
2140 /* architectures. */
2142 # if defined(SUNOS5SIGS)
2144 void GC_write_fault_handler(int sig
, struct SIGINFO
*scp
, void * context
)
2146 void GC_write_fault_handler(sig
, scp
, context
)
2148 struct SIGINFO
*scp
;
2152 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2153 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2154 || (scp -> si_code == BUS_ADRERR) \
2155 || (scp -> si_code == BUS_UNKNOWN) \
2156 || (scp -> si_code == SEGV_UNKNOWN) \
2157 || (scp -> si_code == BUS_OBJERR)
2159 # define SIG_OK (sig == SIGSEGV)
2160 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2164 # if defined(MACOSX)
2165 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2166 # define SIG_OK (sig == SIGBUS)
2167 # define CODE_OK (code == 0 /* experimentally determined */)
2170 # if defined(MSWIN32) || defined(MSWINCE)
2171 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
2172 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2173 STATUS_ACCESS_VIOLATION)
2174 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2178 register unsigned i
;
2180 char *addr
= (char *) code
;
2183 char * addr
= (char *) (size_t) (scp
-> sc_badvaddr
);
2185 # if defined(OSF1) && defined(ALPHA)
2186 char * addr
= (char *) (scp
-> sc_traparg_a0
);
2189 char * addr
= (char *) (scp
-> si_addr
);
2193 char * addr
= (char *) (sc
.cr2
);
2198 struct sigcontext
*scp
= (struct sigcontext
*)(sc
);
2200 int format
= (scp
->sc_formatvec
>> 12) & 0xf;
2201 unsigned long *framedata
= (unsigned long *)(scp
+ 1);
2204 if (format
== 0xa || format
== 0xb) {
2207 } else if (format
== 7) {
2210 if (framedata
[1] & 0x08000000) {
2211 /* correct addr on misaligned access */
2212 ea
= (ea
+4095)&(~4095);
2214 } else if (format
== 4) {
2217 if (framedata
[1] & 0x08000000) {
2218 /* correct addr on misaligned access */
2219 ea
= (ea
+4095)&(~4095);
2225 char * addr
= get_fault_addr(sc
);
2227 # if defined(IA64) || defined(HP_PA)
2228 char * addr
= si
-> si_addr
;
2229 /* I believe this is claimed to work on all platforms for */
2230 /* Linux 2.3.47 and later. Hopefully we don't have to */
2231 /* worry about earlier kernels on IA64. */
2233 # if defined(POWERPC)
2234 char * addr
= (char *) (sc
.regs
->dar
);
2236 --> architecture
not supported
2243 # if defined(MACOSX)
2244 char * addr
= get_fault_addr(scp
);
2246 # if defined(MSWIN32) || defined(MSWINCE)
2247 char * addr
= (char *) (exc_info
-> ExceptionRecord
2248 -> ExceptionInformation
[1]);
2249 # define sig SIGSEGV
2252 if (SIG_OK
&& CODE_OK
) {
2253 register struct hblk
* h
=
2254 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
2255 GC_bool in_allocd_block
;
2258 /* Address is only within the correct physical page. */
2259 in_allocd_block
= FALSE
;
2260 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2261 if (HDR(h
+i
) != 0) {
2262 in_allocd_block
= TRUE
;
2266 in_allocd_block
= (HDR(addr
) != 0);
2268 if (!in_allocd_block
) {
2269 /* Heap blocks now begin and end on page boundaries */
2272 if (sig
== SIGSEGV
) {
2273 old_handler
= GC_old_segv_handler
;
2275 old_handler
= GC_old_bus_handler
;
2277 if (old_handler
== SIG_DFL
) {
2278 # if !defined(MSWIN32) && !defined(MSWINCE)
2279 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2280 ABORT("Unexpected bus error or segmentation fault");
2282 return(EXCEPTION_CONTINUE_SEARCH
);
2285 # if defined (SUNOS4) || defined(FREEBSD)
2286 (*old_handler
) (sig
, code
, scp
, addr
);
2289 # if defined (SUNOS5SIGS)
2290 (*(REAL_SIG_PF
)old_handler
) (sig
, scp
, context
);
2293 # if defined (LINUX)
2294 # if defined(ALPHA) || defined(M68K)
2295 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, sc
);
2297 # if defined(IA64) || defined(HP_PA)
2298 (*(REAL_SIG_PF
)old_handler
) (sig
, si
, scp
);
2300 (*(REAL_SIG_PF
)old_handler
) (sig
, sc
);
2305 # if defined (IRIX5) || defined(OSF1) || defined(HURD)
2306 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2310 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2313 return((*old_handler
)(exc_info
));
2317 UNPROTECT(h
, GC_page_size
);
2318 /* We need to make sure that no collection occurs between */
2319 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2320 /* a write by a third thread might go unnoticed. Reversing */
2321 /* the order is just as bad, since we would end up unprotecting */
2322 /* a page in a GC cycle during which it's not marked. */
2323 /* Currently we do this by disabling the thread stopping */
2324 /* signals while this handler is running. An alternative might */
2325 /* be to record the fact that we're about to unprotect, or */
2326 /* have just unprotected a page in the GC's thread structure, */
2327 /* and then to have the thread stopping code set the dirty */
2328 /* flag, if necessary. */
2329 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2330 register int index
= PHT_HASH(h
+i
);
2332 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2335 /* These reset the signal handler each time by default. */
2336 signal(SIGSEGV
, (SIG_PF
) GC_write_fault_handler
);
2338 /* The write may not take place before dirty bits are read. */
2339 /* But then we'll fault again ... */
2340 # if defined(MSWIN32) || defined(MSWINCE)
2341 return(EXCEPTION_CONTINUE_EXECUTION
);
2346 #if defined(MSWIN32) || defined(MSWINCE)
2347 return EXCEPTION_CONTINUE_SEARCH
;
2349 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2350 ABORT("Unexpected bus error or segmentation fault");
2355 * We hold the allocation lock. We expect block h to be written
2358 void GC_write_hint(h
)
2361 register struct hblk
* h_trunc
;
2362 register unsigned i
;
2363 register GC_bool found_clean
;
2365 if (!GC_dirty_maintained
) return;
2366 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2367 found_clean
= FALSE
;
2368 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2369 register int index
= PHT_HASH(h_trunc
+i
);
2371 if (!get_pht_entry_from_index(GC_dirty_pages
, index
)) {
2373 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2377 UNPROTECT(h_trunc
, GC_page_size
);
2381 void GC_dirty_init()
2383 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2384 defined(OSF1) || defined(HURD)
2385 struct sigaction act
, oldact
;
2386 /* We should probably specify SA_SIGINFO for Linux, and handle */
2387 /* the different architectures more uniformly. */
2388 # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
2389 act
.sa_flags
= SA_RESTART
;
2390 act
.sa_handler
= (SIG_PF
)GC_write_fault_handler
;
2392 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2393 act
.sa_sigaction
= GC_write_fault_handler
;
2395 (void)sigemptyset(&act
.sa_mask
);
2397 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2398 /* handler. This effectively makes the handler atomic w.r.t. */
2399 /* stopping the world for GC. */
2400 (void)sigaddset(&act
.sa_mask
, SIG_SUSPEND
);
2401 # endif /* SIG_SUSPEND */
2403 # if defined(MACOSX)
2404 struct sigaction act
, oldact
;
2406 act
.sa_flags
= SA_RESTART
;
2407 act
.sa_handler
= GC_write_fault_handler
;
2408 sigemptyset(&act
.sa_mask
);
2411 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2413 GC_dirty_maintained
= TRUE
;
2414 if (GC_page_size
% HBLKSIZE
!= 0) {
2415 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2416 ABORT("Page size not multiple of HBLKSIZE");
2418 # if defined(SUNOS4) || defined(FREEBSD)
2419 GC_old_bus_handler
= signal(SIGBUS
, GC_write_fault_handler
);
2420 if (GC_old_bus_handler
== SIG_IGN
) {
2421 GC_err_printf0("Previously ignored bus error!?");
2422 GC_old_bus_handler
= SIG_DFL
;
2424 if (GC_old_bus_handler
!= SIG_DFL
) {
2426 GC_err_printf0("Replaced other SIGBUS handler\n");
2430 # if defined(SUNOS4)
2431 GC_old_segv_handler
= signal(SIGSEGV
, (SIG_PF
)GC_write_fault_handler
);
2432 if (GC_old_segv_handler
== SIG_IGN
) {
2433 GC_err_printf0("Previously ignored segmentation violation!?");
2434 GC_old_segv_handler
= SIG_DFL
;
2436 if (GC_old_segv_handler
!= SIG_DFL
) {
2438 GC_err_printf0("Replaced other SIGSEGV handler\n");
2442 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
2443 || defined(OSF1) || defined(HURD)
2444 /* SUNOS5SIGS includes HPUX */
2445 # if defined(GC_IRIX_THREADS)
2446 sigaction(SIGSEGV
, 0, &oldact
);
2447 sigaction(SIGSEGV
, &act
, 0);
2449 sigaction(SIGSEGV
, &act
, &oldact
);
2451 # if defined(_sigargs) || defined(HURD)
2452 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2454 GC_old_segv_handler
= oldact
.sa_handler
;
2455 # else /* Irix 6.x or SUNOS5SIGS or LINUX */
2456 if (oldact
.sa_flags
& SA_SIGINFO
) {
2457 GC_old_segv_handler
= (SIG_PF
)(oldact
.sa_sigaction
);
2459 GC_old_segv_handler
= oldact
.sa_handler
;
2462 if (GC_old_segv_handler
== SIG_IGN
) {
2463 GC_err_printf0("Previously ignored segmentation violation!?");
2464 GC_old_segv_handler
= SIG_DFL
;
2466 if (GC_old_segv_handler
!= SIG_DFL
) {
2468 GC_err_printf0("Replaced other SIGSEGV handler\n");
2472 # if defined(MACOSX) || defined(HPUX) || defined(LINUX) || defined(HURD)
2473 sigaction(SIGBUS
, &act
, &oldact
);
2474 GC_old_bus_handler
= oldact
.sa_handler
;
2475 if (GC_old_bus_handler
== SIG_IGN
) {
2476 GC_err_printf0("Previously ignored bus error!?");
2477 GC_old_bus_handler
= SIG_DFL
;
2479 if (GC_old_bus_handler
!= SIG_DFL
) {
2481 GC_err_printf0("Replaced other SIGBUS handler\n");
2484 # endif /* MACOS || HPUX || LINUX */
2485 # if defined(MSWIN32)
2486 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2487 if (GC_old_segv_handler
!= NULL
) {
2489 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2492 GC_old_segv_handler
= SIG_DFL
;
2499 void GC_protect_heap()
2505 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
2506 start
= GC_heap_sects
[i
].hs_start
;
2507 len
= GC_heap_sects
[i
].hs_bytes
;
2508 PROTECT(start
, len
);
2512 /* We assume that either the world is stopped or its OK to lose dirty */
2513 /* bits while this is happenning (as in GC_enable_incremental). */
2514 void GC_read_dirty()
2516 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2517 (sizeof GC_dirty_pages
));
2518 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2522 GC_bool
GC_page_was_dirty(h
)
2525 register word index
= PHT_HASH(h
);
2527 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2531 * Acquiring the allocation lock here is dangerous, since this
2532 * can be called from within GC_call_with_alloc_lock, and the cord
2533 * package does so. On systems that allow nested lock acquisition, this
2535 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2538 static GC_bool syscall_acquired_lock
= FALSE
; /* Protected by GC lock. */
2540 void GC_begin_syscall()
2542 if (!I_HOLD_LOCK()) {
2544 syscall_acquired_lock
= TRUE
;
2548 void GC_end_syscall()
2550 if (syscall_acquired_lock
) {
2551 syscall_acquired_lock
= FALSE
;
2556 void GC_unprotect_range(addr
, len
)
2560 struct hblk
* start_block
;
2561 struct hblk
* end_block
;
2562 register struct hblk
*h
;
2565 if (!GC_incremental
) return;
2566 obj_start
= GC_base(addr
);
2567 if (obj_start
== 0) return;
2568 if (GC_base(addr
+ len
- 1) != obj_start
) {
2569 ABORT("GC_unprotect_range(range bigger than object)");
2571 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
2572 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
2573 end_block
+= GC_page_size
/HBLKSIZE
- 1;
2574 for (h
= start_block
; h
<= end_block
; h
++) {
2575 register word index
= PHT_HASH(h
);
2577 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2579 UNPROTECT(start_block
,
2580 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
2583 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_LINUX_THREADS) \
2584 && !defined(GC_USE_LD_WRAP)
2585 /* Replacement for UNIX system call. */
2586 /* Other calls that write to the heap */
2587 /* should be handled similarly. */
2588 # if defined(__STDC__) && !defined(SUNOS4)
2589 # include <unistd.h>
2590 # include <sys/uio.h>
2591 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
2594 int read(fd
, buf
, nbyte
)
2596 int GC_read(fd
, buf
, nbyte
)
2606 GC_unprotect_range(buf
, (word
)nbyte
);
2607 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
2608 /* Indirect system call may not always be easily available. */
2609 /* We could call _read, but that would interfere with the */
2610 /* libpthread interception of read. */
2611 /* On Linux, we have to be careful with the linuxthreads */
2612 /* read interception. */
2617 iov
.iov_len
= nbyte
;
2618 result
= readv(fd
, &iov
, 1);
2622 result
= __read(fd
, buf
, nbyte
);
2624 /* The two zero args at the end of this list are because one
2625 IA-64 syscall() implementation actually requires six args
2626 to be passed, even though they aren't always used. */
2627 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
2633 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
2635 #ifdef GC_USE_LD_WRAP
2636 /* We use the GNU ld call wrapping facility. */
2637 /* This requires that the linker be invoked with "--wrap read". */
2638 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2639 /* I'm not sure that this actually wraps whatever version of read */
2640 /* is called by stdio. That code also mentions __read. */
2641 # include <unistd.h>
2642 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2647 GC_unprotect_range(buf
, (word
)nbyte
);
2648 result
= __real_read(fd
, buf
, nbyte
);
2653 /* We should probably also do this for __read, or whatever stdio */
2654 /* actually calls. */
2658 GC_bool
GC_page_was_ever_dirty(h
)
2664 /* Reset the n pages starting at h to "was never dirty" status. */
2666 void GC_is_fresh(h
, n
)
2672 # else /* !MPROTECT_VDB */
2674 # ifdef GC_USE_LD_WRAP
2675 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2676 { return __real_read(fd
, buf
, nbyte
); }
2679 # endif /* MPROTECT_VDB */
2684 * See DEFAULT_VDB for interface descriptions.
2688 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2689 * from which we can read page modified bits. This facility is far from
2690 * optimal (e.g. we would like to get the info for only some of the
2691 * address space), but it avoids intercepting system calls.
2695 #include <sys/types.h>
2696 #include <sys/signal.h>
2697 #include <sys/fault.h>
2698 #include <sys/syscall.h>
2699 #include <sys/procfs.h>
2700 #include <sys/stat.h>
2702 #define INITIAL_BUF_SZ 4096
2703 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
2706 #ifdef GC_SOLARIS_THREADS
2707 /* We don't have exact sp values for threads. So we count on */
2708 /* occasionally declaring stack pages to be fresh. Thus we */
2709 /* need a real implementation of GC_is_fresh. We can't clear */
2710 /* entries in GC_written_pages, since that would declare all */
2711 /* pages with the given hash address to be fresh. */
2712 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2713 struct hblk
** GC_fresh_pages
; /* A direct mapped cache. */
2714 /* Collisions are dropped. */
2716 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2717 # define ADD_FRESH_PAGE(h) \
2718 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2719 # define PAGE_IS_FRESH(h) \
2720 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2723 /* Add all pages in pht2 to pht1 */
2724 void GC_or_pages(pht1
, pht2
)
2725 page_hash_table pht1
, pht2
;
2729 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2734 void GC_dirty_init()
2739 GC_dirty_maintained
= TRUE
;
2740 if (GC_words_allocd
!= 0 || GC_words_allocd_before_gc
!= 0) {
2743 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
2745 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2747 (GC_words_allocd
+ GC_words_allocd_before_gc
));
2750 sprintf(buf
, "/proc/%d", getpid());
2751 fd
= open(buf
, O_RDONLY
);
2753 ABORT("/proc open failed");
2755 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
2757 if (GC_proc_fd
< 0) {
2758 ABORT("/proc ioctl failed");
2760 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
2761 # ifdef GC_SOLARIS_THREADS
2762 GC_fresh_pages
= (struct hblk
**)
2763 GC_scratch_alloc(MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2764 if (GC_fresh_pages
== 0) {
2765 GC_err_printf0("No space for fresh pages\n");
2768 BZERO(GC_fresh_pages
, MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2772 /* Ignore write hints. They don't help us here. */
2774 void GC_write_hint(h
)
2779 #ifdef GC_SOLARIS_THREADS
2780 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2782 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2785 void GC_read_dirty()
2787 unsigned long ps
, np
;
2790 struct prasmap
* map
;
2792 ptr_t current_addr
, limit
;
2796 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
2799 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2801 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2805 /* Retry with larger buffer. */
2806 word new_size
= 2 * GC_proc_buf_size
;
2807 char * new_buf
= GC_scratch_alloc(new_size
);
2810 GC_proc_buf
= bufp
= new_buf
;
2811 GC_proc_buf_size
= new_size
;
2813 if (syscall(SYS_read
, GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2814 WARN("Insufficient space for /proc read\n", 0);
2816 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
2817 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
2818 # ifdef GC_SOLARIS_THREADS
2819 BZERO(GC_fresh_pages
,
2820 MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2826 /* Copy dirty bits into GC_grungy_pages */
2827 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
2828 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2829 nmaps, PG_REFERENCED, PG_MODIFIED); */
2830 bufp
= bufp
+ sizeof(struct prpageheader
);
2831 for (i
= 0; i
< nmaps
; i
++) {
2832 map
= (struct prasmap
*)bufp
;
2833 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
2834 ps
= map
-> pr_pagesize
;
2835 np
= map
-> pr_npage
;
2836 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2837 limit
= vaddr
+ ps
* np
;
2838 bufp
+= sizeof (struct prasmap
);
2839 for (current_addr
= vaddr
;
2840 current_addr
< limit
; current_addr
+= ps
){
2841 if ((*bufp
++) & PG_MODIFIED
) {
2842 register struct hblk
* h
= (struct hblk
*) current_addr
;
2844 while ((ptr_t
)h
< current_addr
+ ps
) {
2845 register word index
= PHT_HASH(h
);
2847 set_pht_entry_from_index(GC_grungy_pages
, index
);
2848 # ifdef GC_SOLARIS_THREADS
2850 register int slot
= FRESH_PAGE_SLOT(h
);
2852 if (GC_fresh_pages
[slot
] == h
) {
2853 GC_fresh_pages
[slot
] = 0;
2861 bufp
+= sizeof(long) - 1;
2862 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
2864 /* Update GC_written_pages. */
2865 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2866 # ifdef GC_SOLARIS_THREADS
2867 /* Make sure that old stacks are considered completely clean */
2868 /* unless written again. */
2869 GC_old_stacks_are_fresh();
2875 GC_bool
GC_page_was_dirty(h
)
2878 register word index
= PHT_HASH(h
);
2879 register GC_bool result
;
2881 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
2882 # ifdef GC_SOLARIS_THREADS
2883 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2884 /* This happens only if page was declared fresh since */
2885 /* the read_dirty call, e.g. because it's in an unused */
2886 /* thread stack. It's OK to treat it as clean, in */
2887 /* that case. And it's consistent with */
2888 /* GC_page_was_ever_dirty. */
2893 GC_bool
GC_page_was_ever_dirty(h
)
2896 register word index
= PHT_HASH(h
);
2897 register GC_bool result
;
2899 result
= get_pht_entry_from_index(GC_written_pages
, index
);
2900 # ifdef GC_SOLARIS_THREADS
2901 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2906 /* Caller holds allocation lock. */
2907 void GC_is_fresh(h
, n
)
2912 register word index
;
2914 # ifdef GC_SOLARIS_THREADS
2917 if (GC_fresh_pages
!= 0) {
2918 for (i
= 0; i
< n
; i
++) {
2919 ADD_FRESH_PAGE(h
+ i
);
2925 # endif /* PROC_VDB */
2930 # include "vd/PCR_VD.h"
2932 # define NPAGES (32*1024) /* 128 MB */
2934 PCR_VD_DB GC_grungy_bits
[NPAGES
];
2936 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
2937 /* HBLKSIZE aligned. */
2939 void GC_dirty_init()
2941 GC_dirty_maintained
= TRUE
;
2942 /* For the time being, we assume the heap generally grows up */
2943 GC_vd_base
= GC_heap_sects
[0].hs_start
;
2944 if (GC_vd_base
== 0) {
2945 ABORT("Bad initial heap segment");
2947 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
2949 ABORT("dirty bit initialization failed");
2953 void GC_read_dirty()
2955 /* lazily enable dirty bits on newly added heap sects */
2957 static int onhs
= 0;
2958 int nhs
= GC_n_heap_sects
;
2959 for( ; onhs
< nhs
; onhs
++ ) {
2960 PCR_VD_WriteProtectEnable(
2961 GC_heap_sects
[onhs
].hs_start
,
2962 GC_heap_sects
[onhs
].hs_bytes
);
2967 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
2969 ABORT("dirty bit read failed");
2973 GC_bool
GC_page_was_dirty(h
)
2976 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
2979 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
2983 void GC_write_hint(h
)
2986 PCR_VD_WriteProtectDisable(h
, HBLKSIZE
);
2987 PCR_VD_WriteProtectEnable(h
, HBLKSIZE
);
2990 # endif /* PCR_VDB */
2993 * Call stack save code for debugging.
2994 * Should probably be in mach_dep.c, but that requires reorganization.
2997 /* I suspect the following works for most X86 *nix variants, so */
2998 /* long as the frame pointer is explicitly stored. In the case of gcc, */
2999 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3000 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
3002 struct frame
*fr_savfp
;
3004 long fr_arg
[NARGS
]; /* All the arguments go here. */
3013 struct frame
*fr_savfp
;
3022 # if defined(SUNOS4)
3023 # include <machine/frame.h>
3025 # if defined (DRSNX)
3026 # include <sys/sparc/frame.h>
3028 # if defined(OPENBSD) || defined(NETBSD)
3031 # include <sys/frame.h>
3037 --> We only know how to to get the first
6 arguments
3041 #ifdef SAVE_CALL_CHAIN
3042 /* Fill in the pc and argument information for up to NFRAMES of my */
3043 /* callers. Ignore my frame and my callers frame. */
3045 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
3046 # define FR_SAVFP fr_fp
3047 # define FR_SAVPC fr_pc
3049 # define FR_SAVFP fr_savfp
3050 # define FR_SAVPC fr_savpc
3053 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3059 void GC_save_callers (info
)
3060 struct callinfo info
[NFRAMES
];
3062 struct frame
*frame
;
3066 /* We assume this is turned on only with gcc as the compiler. */
3067 asm("movl %%ebp,%0" : "=r"(frame
));
3070 word
GC_save_regs_in_stack();
3072 frame
= (struct frame
*) GC_save_regs_in_stack ();
3073 fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
3076 for (; (!(fp HOTTER_THAN frame
) && !(GC_stackbottom
HOTTER_THAN (ptr_t
)fp
)
3077 && (nframes
< NFRAMES
));
3078 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
3081 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
3083 for (i
= 0; i
< NARGS
; i
++) {
3084 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
3086 # endif /* NARGS > 0 */
3088 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
3091 #endif /* SAVE_CALL_CHAIN */
3093 #if defined(LINUX) && defined(__ELF__) && \
3094 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3095 #ifdef GC_USE_LD_WRAP
3096 # define READ __real_read
3102 /* Repeatedly perform a read call until the buffer is filled or */
3103 /* we encounter EOF. */
3104 ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
)
3106 ssize_t num_read
= 0;
3109 while (num_read
< count
) {
3110 result
= READ(fd
, buf
+ num_read
, count
- num_read
);
3111 if (result
< 0) return result
;
3112 if (result
== 0) break;
3117 #endif /* LINUX && ... */
3120 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3122 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3123 addresses in FIND_LEAK output. */
3125 void GC_print_address_map()
3129 char maps_temp
[32768];
3130 GC_err_printf0("---------- Begin address map ----------\n");
3131 f
= open("/proc/self/maps", O_RDONLY
);
3132 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
3134 result
= GC_repeat_read(f
, maps_temp
, sizeof(maps_temp
));
3135 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
3136 GC_err_write(maps_temp
, result
);
3137 } while (result
== sizeof(maps_temp
));
3139 GC_err_printf0("---------- End address map ----------\n");