2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
58 # define SIGSEGV 0 /* value is irrelevant */
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
67 # define NEED_FIND_LIMIT
70 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
71 # define NEED_FIND_LIMIT
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
79 # define NEED_FIND_LIMIT
82 # if defined(LINUX) && \
83 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
85 # define NEED_FIND_LIMIT
88 #ifdef NEED_FIND_LIMIT
93 # include <machine/trap.h>
102 #if defined(MSWIN32) || defined(MSWINCE)
103 # define WIN32_LEAN_AND_MEAN
105 # include <windows.h>
109 # include <Processes.h>
113 # include <sys/uio.h>
114 # include <malloc.h> /* for locking */
117 # include <sys/types.h>
118 # include <sys/mman.h>
119 # include <sys/stat.h>
127 # include <sys/siginfo.h>
130 # define setjmp(env) sigsetjmp(env, 1)
131 # define longjmp(env, val) siglongjmp(env, val)
132 # define jmp_buf sigjmp_buf
136 /* Apparently necessary for djgpp 2.01. May cause problems with */
137 /* other versions. */
138 typedef long unsigned int caddr_t
;
142 # include "il/PCR_IL.h"
143 # include "th/PCR_ThCtl.h"
144 # include "mm/PCR_MM.h"
147 #if !defined(NO_EXECUTE_PERMISSION)
148 # define OPT_PROT_EXEC PROT_EXEC
150 # define OPT_PROT_EXEC 0
153 #if defined(SEARCH_FOR_DATA_START)
154 /* The I386 case can be handled without a search. The Alpha case */
155 /* used to be handled differently as well, but the rules changed */
156 /* for recent Linux versions. This seems to be the easiest way to */
157 /* cover all versions. */
160 # pragma weak __data_start
161 extern int __data_start
;
162 # pragma weak data_start
163 extern int data_start
;
169 void GC_init_linux_data_start()
171 extern ptr_t
GC_find_limit();
174 /* Try the easy approaches first: */
175 if (&__data_start
!= 0) {
176 GC_data_start
= (ptr_t
)(&__data_start
);
179 if (&data_start
!= 0) {
180 GC_data_start
= (ptr_t
)(&data_start
);
184 GC_data_start
= GC_find_limit((ptr_t
)(&_end
), FALSE
);
190 # ifndef ECOS_GC_MEMORY_SIZE
191 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
192 # endif /* ECOS_GC_MEMORY_SIZE */
194 // setjmp() function, as described in ANSI para 7.6.1.1
195 #define setjmp( __env__ ) hal_setjmp( __env__ )
197 // FIXME: This is a simple way of allocating memory which is
198 // compatible with ECOS early releases. Later releases use a more
199 // sophisticated means of allocating memory than this simple static
200 // allocator, but this method is at least bound to work.
201 static char memory
[ECOS_GC_MEMORY_SIZE
];
202 static char *brk
= memory
;
204 static void *tiny_sbrk(ptrdiff_t increment
)
210 if (brk
> memory
+ sizeof memory
)
218 #define sbrk tiny_sbrk
221 #if defined(NETBSD) && defined(__ELF__)
224 void GC_init_netbsd_elf()
226 extern ptr_t
GC_find_limit();
227 extern char **environ
;
228 /* This may need to be environ, without the underscore, for */
230 GC_data_start
= GC_find_limit((ptr_t
)&environ
, FALSE
);
238 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
241 unsigned short magic_number
;
242 unsigned short padding
[29];
246 #define E_MAGIC(x) (x).magic_number
247 #define EMAGIC 0x5A4D
248 #define E_LFANEW(x) (x).new_exe_offset
251 unsigned char magic_number
[2];
252 unsigned char byte_order
;
253 unsigned char word_order
;
254 unsigned long exe_format_level
;
257 unsigned long padding1
[13];
258 unsigned long object_table_offset
;
259 unsigned long object_count
;
260 unsigned long padding2
[31];
263 #define E32_MAGIC1(x) (x).magic_number[0]
264 #define E32MAGIC1 'L'
265 #define E32_MAGIC2(x) (x).magic_number[1]
266 #define E32MAGIC2 'X'
267 #define E32_BORDER(x) (x).byte_order
269 #define E32_WORDER(x) (x).word_order
271 #define E32_CPU(x) (x).cpu
273 #define E32_OBJTAB(x) (x).object_table_offset
274 #define E32_OBJCNT(x) (x).object_count
280 unsigned long pagemap
;
281 unsigned long mapsize
;
282 unsigned long reserved
;
285 #define O32_FLAGS(x) (x).flags
286 #define OBJREAD 0x0001L
287 #define OBJWRITE 0x0002L
288 #define OBJINVALID 0x0080L
289 #define O32_SIZE(x) (x).size
290 #define O32_BASE(x) (x).base
292 # else /* IBM's compiler */
294 /* A kludge to get around what appears to be a header file bug */
296 # define WORD unsigned short
299 # define DWORD unsigned long
306 # endif /* __IBMC__ */
308 # define INCL_DOSEXCEPTIONS
309 # define INCL_DOSPROCESS
310 # define INCL_DOSERRORS
311 # define INCL_DOSMODULEMGR
312 # define INCL_DOSMEMMGR
316 /* Disable and enable signals during nontrivial allocations */
318 void GC_disable_signals(void)
322 DosEnterMustComplete(&nest
);
323 if (nest
!= 1) ABORT("nested GC_disable_signals");
326 void GC_enable_signals(void)
330 DosExitMustComplete(&nest
);
331 if (nest
!= 0) ABORT("GC_enable_signals");
337 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
338 && !defined(MSWINCE) \
339 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
341 # if defined(sigmask) && !defined(UTS4)
342 /* Use the traditional BSD interface */
343 # define SIGSET_T int
344 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
345 # define SIG_FILL(set) (set) = 0x7fffffff
346 /* Setting the leading bit appears to provoke a bug in some */
347 /* longjmp implementations. Most systems appear not to have */
349 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
351 /* Use POSIX/SYSV interface */
352 # define SIGSET_T sigset_t
353 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
354 # define SIG_FILL(set) sigfillset(&set)
355 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
358 static GC_bool mask_initialized
= FALSE
;
360 static SIGSET_T new_mask
;
362 static SIGSET_T old_mask
;
364 static SIGSET_T dummy
;
366 #if defined(PRINTSTATS) && !defined(THREADS)
367 # define CHECK_SIGNALS
368 int GC_sig_disabled
= 0;
371 void GC_disable_signals()
373 if (!mask_initialized
) {
376 SIG_DEL(new_mask
, SIGSEGV
);
377 SIG_DEL(new_mask
, SIGILL
);
378 SIG_DEL(new_mask
, SIGQUIT
);
380 SIG_DEL(new_mask
, SIGBUS
);
383 SIG_DEL(new_mask
, SIGIOT
);
386 SIG_DEL(new_mask
, SIGEMT
);
389 SIG_DEL(new_mask
, SIGTRAP
);
391 mask_initialized
= TRUE
;
393 # ifdef CHECK_SIGNALS
394 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
397 SIGSETMASK(old_mask
,new_mask
);
400 void GC_enable_signals()
402 # ifdef CHECK_SIGNALS
403 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
406 SIGSETMASK(dummy
,old_mask
);
413 /* Ivan Demakov: simplest way (to me) */
415 void GC_disable_signals() { }
416 void GC_enable_signals() { }
419 /* Find the page size */
422 # if defined(MSWIN32) || defined(MSWINCE)
423 void GC_setpagesize()
425 GetSystemInfo(&GC_sysinfo
);
426 GC_page_size
= GC_sysinfo
.dwPageSize
;
430 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
431 || defined(USE_MUNMAP)
432 void GC_setpagesize()
434 GC_page_size
= GETPAGESIZE();
437 /* It's acceptable to fake it. */
438 void GC_setpagesize()
440 GC_page_size
= HBLKSIZE
;
446 * Find the base of the stack.
447 * Used only in single-threaded environment.
448 * With threads, GC_mark_roots needs to know how to do this.
449 * Called with allocator lock held.
451 # if defined(MSWIN32) || defined(MSWINCE)
452 # define is_writable(prot) ((prot) == PAGE_READWRITE \
453 || (prot) == PAGE_WRITECOPY \
454 || (prot) == PAGE_EXECUTE_READWRITE \
455 || (prot) == PAGE_EXECUTE_WRITECOPY)
456 /* Return the number of bytes that are writable starting at p. */
457 /* The pointer p is assumed to be page aligned. */
458 /* If base is not 0, *base becomes the beginning of the */
459 /* allocation region containing p. */
460 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
462 MEMORY_BASIC_INFORMATION buf
;
466 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
467 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
468 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
469 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
470 if (!is_writable(protect
)) {
473 if (buf
.State
!= MEM_COMMIT
) return(0);
474 return(buf
.RegionSize
);
477 ptr_t
GC_get_stack_base()
480 ptr_t sp
= (ptr_t
)(&dummy
);
481 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
482 word size
= GC_get_writable_length(trunc_sp
, 0);
484 return(trunc_sp
+ size
);
488 # endif /* MS Windows */
491 # include <kernel/OS.h>
492 ptr_t
GC_get_stack_base(){
494 get_thread_info(find_thread(NULL
),&th
);
502 ptr_t
GC_get_stack_base()
507 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
508 GC_err_printf0("DosGetInfoBlocks failed\n");
509 ABORT("DosGetInfoBlocks failed\n");
511 return((ptr_t
)(ptib
-> tib_pstacklimit
));
518 # include "AmigaOS.c"
522 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
525 typedef void (*handler
)(int);
527 typedef void (*handler
)();
530 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
531 static struct sigaction old_segv_act
;
532 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
533 static struct sigaction old_bus_act
;
536 static handler old_segv_handler
, old_bus_handler
;
540 void GC_set_and_save_fault_handler(handler h
)
542 void GC_set_and_save_fault_handler(h
)
547 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
548 struct sigaction act
;
551 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
552 /* The presence of SA_NODEFER represents yet another gross */
553 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
554 /* interact correctly with -lthread. We hide the confusion */
555 /* by making sure that signal handling doesn't affect the */
558 (void) sigemptyset(&act
.sa_mask
);
560 /* Older versions have a bug related to retrieving and */
561 /* and setting a handler at the same time. */
562 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
563 (void) sigaction(SIGSEGV
, &act
, 0);
565 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
566 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
568 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
569 /* Pthreads doesn't exist under Irix 5.x, so we */
570 /* don't have to worry in the threads case. */
571 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
573 # endif /* IRIX_THREADS */
575 old_segv_handler
= signal(SIGSEGV
, h
);
577 old_bus_handler
= signal(SIGBUS
, h
);
582 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
584 # ifdef NEED_FIND_LIMIT
585 /* Some tools to implement HEURISTIC2 */
586 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
587 /* static */ jmp_buf GC_jmp_buf
;
590 void GC_fault_handler(sig
)
593 longjmp(GC_jmp_buf
, 1);
596 void GC_setup_temporary_fault_handler()
598 GC_set_and_save_fault_handler(GC_fault_handler
);
601 void GC_reset_fault_handler()
604 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
605 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
606 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
608 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
611 (void) signal(SIGSEGV
, old_segv_handler
);
613 (void) signal(SIGBUS
, old_bus_handler
);
619 /* Return the first nonaddressible location > p (up) or */
620 /* the smallest location q s.t. [q,p] is addressible (!up). */
621 ptr_t
GC_find_limit(p
, up
)
626 static VOLATILE ptr_t result
;
627 /* Needs to be static, since otherwise it may not be */
628 /* preserved across the longjmp. Can safely be */
629 /* static since it's only called once, with the */
630 /* allocation lock held. */
633 GC_setup_temporary_fault_handler();
634 if (setjmp(GC_jmp_buf
) == 0) {
635 result
= (ptr_t
)(((word
)(p
))
636 & ~(MIN_PAGE_SIZE
-1));
639 result
+= MIN_PAGE_SIZE
;
641 result
-= MIN_PAGE_SIZE
;
643 GC_noop1((word
)(*result
));
646 GC_reset_fault_handler();
648 result
+= MIN_PAGE_SIZE
;
659 #ifdef LINUX_STACKBOTTOM
661 #include <sys/types.h>
662 #include <sys/stat.h>
664 # define STAT_SKIP 27 /* Number of fields preceding startstack */
665 /* field in /proc/self/stat */
667 # pragma weak __libc_stack_end
668 extern ptr_t __libc_stack_end
;
671 # pragma weak __libc_ia64_register_backing_store_base
672 extern ptr_t __libc_ia64_register_backing_store_base
;
674 ptr_t
GC_get_register_stack_base(void)
676 if (0 != &__libc_ia64_register_backing_store_base
) {
677 return __libc_ia64_register_backing_store_base
;
679 word result
= (word
)GC_stackbottom
- BACKING_STORE_DISPLACEMENT
;
680 result
+= BACKING_STORE_ALIGNMENT
- 1;
681 result
&= ~(BACKING_STORE_ALIGNMENT
- 1);
682 return (ptr_t
)result
;
687 ptr_t
GC_linux_stack_base(void)
689 /* We read the stack base value from /proc/self/stat. We do this */
690 /* using direct I/O system calls in order to avoid calling malloc */
691 /* in case REDIRECT_MALLOC is defined. */
692 # define STAT_BUF_SIZE 4096
693 # if defined(GC_USE_LD_WRAP)
694 # define STAT_READ __real_read
696 # define STAT_READ read
698 char stat_buf
[STAT_BUF_SIZE
];
702 size_t i
, buf_offset
= 0;
704 /* First try the easy way. This should work for glibc 2.2 */
705 if (0 != &__libc_stack_end
) {
706 return __libc_stack_end
;
708 f
= open("/proc/self/stat", O_RDONLY
);
709 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
710 ABORT("Couldn't read /proc/self/stat");
712 c
= stat_buf
[buf_offset
++];
713 /* Skip the required number of fields. This number is hopefully */
714 /* constant across all Linux implementations. */
715 for (i
= 0; i
< STAT_SKIP
; ++i
) {
716 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
717 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
719 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
723 c
= stat_buf
[buf_offset
++];
726 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
727 return (ptr_t
)result
;
730 #endif /* LINUX_STACKBOTTOM */
732 #ifdef FREEBSD_STACKBOTTOM
734 /* This uses an undocumented sysctl call, but at least one expert */
735 /* believes it will stay. */
738 #include <sys/types.h>
739 #include <sys/sysctl.h>
741 ptr_t
GC_freebsd_stack_base(void)
743 int nm
[2] = { CTL_KERN
, KERN_USRSTACK
}, base
, len
, r
;
746 r
= sysctl(nm
, 2, &base
, &len
, NULL
, 0);
748 if (r
) ABORT("Error getting stack base");
753 #endif /* FREEBSD_STACKBOTTOM */
755 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
756 && !defined(MSWINCE) && !defined(OS2)
758 ptr_t
GC_get_stack_base()
763 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
769 # ifdef STACK_GROWS_DOWN
770 result
= (ptr_t
)((((word
)(&dummy
))
771 + STACKBOTTOM_ALIGNMENT_M1
)
772 & ~STACKBOTTOM_ALIGNMENT_M1
);
774 result
= (ptr_t
)(((word
)(&dummy
))
775 & ~STACKBOTTOM_ALIGNMENT_M1
);
777 # endif /* HEURISTIC1 */
778 # ifdef LINUX_STACKBOTTOM
779 result
= GC_linux_stack_base();
781 # ifdef FREEBSD_STACKBOTTOM
782 result
= GC_freebsd_stack_base();
785 # ifdef STACK_GROWS_DOWN
786 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
787 # ifdef HEURISTIC2_LIMIT
788 if (result
> HEURISTIC2_LIMIT
789 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
790 result
= HEURISTIC2_LIMIT
;
794 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
795 # ifdef HEURISTIC2_LIMIT
796 if (result
< HEURISTIC2_LIMIT
797 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
798 result
= HEURISTIC2_LIMIT
;
803 # endif /* HEURISTIC2 */
804 # ifdef STACK_GROWS_DOWN
805 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
808 # endif /* STACKBOTTOM */
812 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
815 * Register static data segment(s) as roots.
816 * If more data segments are added later then they need to be registered
817 * add that point (as we do with SunOS dynamic loading),
818 * or GC_mark_roots needs to check for them (as we do with PCR).
819 * Called with allocator lock held.
824 void GC_register_data_segments()
828 HMODULE module_handle
;
832 struct exe_hdr hdrdos
; /* MSDOS header. */
833 struct e32_exe hdr386
; /* Real header for my executable */
834 struct o32_obj seg
; /* Currrent segment */
838 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
839 GC_err_printf0("DosGetInfoBlocks failed\n");
840 ABORT("DosGetInfoBlocks failed\n");
842 module_handle
= ppib
-> pib_hmte
;
843 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
844 GC_err_printf0("DosQueryModuleName failed\n");
845 ABORT("DosGetInfoBlocks failed\n");
847 myexefile
= fopen(path
, "rb");
848 if (myexefile
== 0) {
849 GC_err_puts("Couldn't open executable ");
850 GC_err_puts(path
); GC_err_puts("\n");
851 ABORT("Failed to open executable\n");
853 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
854 GC_err_puts("Couldn't read MSDOS header from ");
855 GC_err_puts(path
); GC_err_puts("\n");
856 ABORT("Couldn't read MSDOS header");
858 if (E_MAGIC(hdrdos
) != EMAGIC
) {
859 GC_err_puts("Executable has wrong DOS magic number: ");
860 GC_err_puts(path
); GC_err_puts("\n");
861 ABORT("Bad DOS magic number");
863 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
864 GC_err_puts("Seek to new header failed in ");
865 GC_err_puts(path
); GC_err_puts("\n");
866 ABORT("Bad DOS magic number");
868 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
869 GC_err_puts("Couldn't read MSDOS header from ");
870 GC_err_puts(path
); GC_err_puts("\n");
871 ABORT("Couldn't read OS/2 header");
873 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
874 GC_err_puts("Executable has wrong OS/2 magic number:");
875 GC_err_puts(path
); GC_err_puts("\n");
876 ABORT("Bad OS/2 magic number");
878 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
879 GC_err_puts("Executable %s has wrong byte order: ");
880 GC_err_puts(path
); GC_err_puts("\n");
881 ABORT("Bad byte order");
883 if ( E32_CPU(hdr386
) == E32CPU286
) {
884 GC_err_puts("GC can't handle 80286 executables: ");
885 GC_err_puts(path
); GC_err_puts("\n");
888 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
890 GC_err_puts("Seek to object table failed: ");
891 GC_err_puts(path
); GC_err_puts("\n");
892 ABORT("Seek to object table failed");
894 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
896 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
897 GC_err_puts("Couldn't read obj table entry from ");
898 GC_err_puts(path
); GC_err_puts("\n");
899 ABORT("Couldn't read obj table entry");
901 flags
= O32_FLAGS(seg
);
902 if (!(flags
& OBJWRITE
)) continue;
903 if (!(flags
& OBJREAD
)) continue;
904 if (flags
& OBJINVALID
) {
905 GC_err_printf0("Object with invalid pages?\n");
908 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
914 # if defined(MSWIN32) || defined(MSWINCE)
917 /* Unfortunately, we have to handle win32s very differently from NT, */
918 /* Since VirtualQuery has very different semantics. In particular, */
919 /* under win32s a VirtualQuery call on an unmapped page returns an */
920 /* invalid result. Under GC_register_data_segments is a noop and */
921 /* all real work is done by GC_register_dynamic_libraries. Under */
922 /* win32s, we cannot find the data segments associated with dll's. */
923 /* We rgister the main data segment here. */
924 GC_bool GC_win32s
= FALSE
; /* We're running under win32s. */
926 GC_bool
GC_is_win32s()
928 DWORD v
= GetVersion();
930 /* Check that this is not NT, and Windows major version <= 3 */
931 return ((v
& 0x80000000) && (v
& 0xff) <= 3);
936 GC_win32s
= GC_is_win32s();
939 /* Return the smallest address a such that VirtualQuery */
940 /* returns correct results for all addresses between a and start. */
941 /* Assumes VirtualQuery returns correct information for start. */
942 ptr_t
GC_least_described_address(ptr_t start
)
944 MEMORY_BASIC_INFORMATION buf
;
950 limit
= GC_sysinfo
.lpMinimumApplicationAddress
;
951 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
953 q
= (LPVOID
)(p
- GC_page_size
);
954 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
955 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
956 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
957 p
= (ptr_t
)(buf
.AllocationBase
);
963 /* Is p the start of either the malloc heap, or of one of our */
965 GC_bool
GC_is_heap_base (ptr_t p
)
970 # ifndef REDIRECT_MALLOC
971 static ptr_t malloc_heap_pointer
= 0;
973 if (0 == malloc_heap_pointer
) {
974 MEMORY_BASIC_INFORMATION buf
;
975 void *pTemp
= malloc( 1 );
976 register DWORD result
= VirtualQuery(pTemp
, &buf
, sizeof(buf
));
981 if (result
!= sizeof(buf
)) {
982 ABORT("Weird VirtualQuery result");
984 malloc_heap_pointer
= (ptr_t
)(buf
.AllocationBase
);
986 if (p
== malloc_heap_pointer
) return(TRUE
);
988 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
989 if (GC_heap_bases
[i
] == p
) return(TRUE
);
995 void GC_register_root_section(ptr_t static_root
)
997 MEMORY_BASIC_INFORMATION buf
;
1002 char * limit
, * new_limit
;
1004 if (!GC_win32s
) return;
1005 p
= base
= limit
= GC_least_described_address(static_root
);
1006 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
1007 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
1008 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
1009 || GC_is_heap_base(buf
.AllocationBase
)) break;
1010 new_limit
= (char *)p
+ buf
.RegionSize
;
1011 protect
= buf
.Protect
;
1012 if (buf
.State
== MEM_COMMIT
1013 && is_writable(protect
)) {
1014 if ((char *)p
== limit
) {
1017 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1022 if (p
> (LPVOID
)new_limit
/* overflow */) break;
1023 p
= (LPVOID
)new_limit
;
1025 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1029 void GC_register_data_segments()
1033 GC_register_root_section((ptr_t
)(&dummy
));
1037 # else /* !OS2 && !Windows */
1039 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1040 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1041 char * GC_SysVGetDataStart(max_page_size
, etext_addr
)
1045 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1046 & ~(sizeof(word
) - 1);
1047 /* etext rounded to word boundary */
1048 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1049 & ~((word
)max_page_size
- 1));
1050 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1051 VOLATILE
char * result
= (char *)(next_page
+ page_offset
);
1052 /* Note that this isnt equivalent to just adding */
1053 /* max_page_size to &etext if &etext is at a page boundary */
1055 GC_setup_temporary_fault_handler();
1056 if (setjmp(GC_jmp_buf
) == 0) {
1057 /* Try writing to the address. */
1059 GC_reset_fault_handler();
1061 GC_reset_fault_handler();
1062 /* We got here via a longjmp. The address is not readable. */
1063 /* This is known to happen under Solaris 2.4 + gcc, which place */
1064 /* string constants in the text segment, but after etext. */
1065 /* Use plan B. Note that we now know there is a gap between */
1066 /* text and data segments, so plan A bought us something. */
1067 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
) - MIN_PAGE_SIZE
, FALSE
);
1069 return((char *)result
);
1076 # define GC_AMIGA_DS
1077 # include "AmigaOS.c"
1080 #else /* !OS2 && !Windows && !AMIGA */
1082 void GC_register_data_segments()
1084 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1086 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1087 /* As of Solaris 2.3, the Solaris threads implementation */
1088 /* allocates the data structure for the initial thread with */
1089 /* sbrk at process startup. It needs to be scanned, so that */
1090 /* we don't lose some malloc allocated data structures */
1091 /* hanging from it. We're on thin ice here ... */
1092 extern caddr_t
sbrk();
1094 GC_add_roots_inner(DATASTART
, (char *)sbrk(0), FALSE
);
1096 GC_add_roots_inner(DATASTART
, (char *)(DATAEND
), FALSE
);
1099 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1100 GC_add_roots_inner(DATASTART
, (char *) get_end(), FALSE
);
1104 # if defined(THINK_C)
1105 extern void* GC_MacGetDataStart(void);
1106 /* globals begin above stack and end at a5. */
1107 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1108 (ptr_t
)LMGetCurrentA5(), FALSE
);
1110 # if defined(__MWERKS__)
1112 extern void* GC_MacGetDataStart(void);
1113 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1114 # if __option(far_data)
1115 extern void* GC_MacGetDataEnd(void);
1117 /* globals begin above stack and end at a5. */
1118 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1119 (ptr_t
)LMGetCurrentA5(), FALSE
);
1120 /* MATTHEW: Handle Far Globals */
1121 # if __option(far_data)
1122 /* Far globals follow he QD globals: */
1123 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1124 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1127 extern char __data_start__
[], __data_end__
[];
1128 GC_add_roots_inner((ptr_t
)&__data_start__
,
1129 (ptr_t
)&__data_end__
, FALSE
);
1130 # endif /* __POWERPC__ */
1131 # endif /* __MWERKS__ */
1132 # endif /* !THINK_C */
1136 /* Dynamic libraries are added at every collection, since they may */
1140 # endif /* ! AMIGA */
1141 # endif /* ! MSWIN32 && ! MSWINCE*/
1145 * Auxiliary routines for obtaining memory from OS.
1148 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1149 && !defined(MSWIN32) && !defined(MSWINCE) \
1150 && !defined(MACOS) && !defined(DOS4GW)
1153 extern caddr_t
sbrk();
1156 # define SBRK_ARG_T ptrdiff_t
1158 # define SBRK_ARG_T int
1163 /* The compiler seems to generate speculative reads one past the end of */
1164 /* an allocated object. Hence we need to make sure that the page */
1165 /* following the last heap page is also mapped. */
1166 ptr_t
GC_unix_get_mem(bytes
)
1169 caddr_t cur_brk
= (caddr_t
)sbrk(0);
1171 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1172 static caddr_t my_brk_val
= 0;
1174 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1176 if((caddr_t
)(sbrk(GC_page_size
- lsbs
)) == (caddr_t
)(-1)) return(0);
1178 if (cur_brk
== my_brk_val
) {
1179 /* Use the extra block we allocated last time. */
1180 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1181 if (result
== (caddr_t
)(-1)) return(0);
1182 result
-= GC_page_size
;
1184 result
= (ptr_t
)sbrk(GC_page_size
+ (SBRK_ARG_T
)bytes
);
1185 if (result
== (caddr_t
)(-1)) return(0);
1187 my_brk_val
= result
+ bytes
+ GC_page_size
; /* Always page aligned */
1188 return((ptr_t
)result
);
1191 #else /* Not RS6000 */
1193 #if defined(USE_MMAP)
1194 /* Tested only under Linux, IRIX5 and Solaris 2 */
1196 #ifdef USE_MMAP_FIXED
1197 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1198 /* Seems to yield better performance on Solaris 2, but can */
1199 /* be unreliable if something is already mapped at the address. */
1201 # define GC_MMAP_FLAGS MAP_PRIVATE
1205 # define HEAP_START 0
1208 ptr_t
GC_unix_get_mem(bytes
)
1211 static GC_bool initialized
= FALSE
;
1214 static ptr_t last_addr
= HEAP_START
;
1217 fd
= open("/dev/zero", O_RDONLY
);
1220 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1221 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1222 GC_MMAP_FLAGS
, fd
, 0/* offset */);
1223 if (result
== MAP_FAILED
) return(0);
1224 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1225 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1226 # if !defined(LINUX)
1227 if (last_addr
== 0) {
1228 /* Oops. We got the end of the address space. This isn't */
1229 /* usable by arbitrary C code, since one-past-end pointers */
1230 /* don't work, so we discard it and try again. */
1231 munmap(result
, (size_t)(-GC_page_size
) - (size_t)result
);
1232 /* Leave last page mapped, so we can't repeat. */
1233 return GC_unix_get_mem(bytes
);
1236 GC_ASSERT(last_addr
!= 0);
1238 return((ptr_t
)result
);
1241 #else /* Not RS6000, not USE_MMAP */
1242 ptr_t
GC_unix_get_mem(bytes
)
1247 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1248 /* The equivalent may be needed on other systems as well. */
1252 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1253 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1255 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1257 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) return(0);
1259 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1260 if (result
== (ptr_t
)(-1)) result
= 0;
1268 #endif /* Not USE_MMAP */
1269 #endif /* Not RS6000 */
1275 void * os2_alloc(size_t bytes
)
1279 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1280 PAG_WRITE
| PAG_COMMIT
)
1284 if (result
== 0) return(os2_alloc(bytes
));
1291 # if defined(MSWIN32) || defined(MSWINCE)
1292 SYSTEM_INFO GC_sysinfo
;
1297 word GC_n_heap_bases
= 0;
1299 ptr_t
GC_win32_get_mem(bytes
)
1305 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1306 /* There are also unconfirmed rumors of other */
1307 /* problems, so we dodge the issue. */
1308 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1309 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
) & ~(HBLKSIZE
-1));
1311 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
,
1312 MEM_COMMIT
| MEM_RESERVE
,
1313 PAGE_EXECUTE_READWRITE
);
1315 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1316 /* If I read the documentation correctly, this can */
1317 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1318 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1319 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1323 void GC_win32_free_heap ()
1326 while (GC_n_heap_bases
> 0) {
1327 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1328 GC_heap_bases
[GC_n_heap_bases
] = 0;
1335 # define GC_AMIGA_AM
1336 # include "AmigaOS.c"
1342 word GC_n_heap_bases
= 0;
1344 ptr_t
GC_wince_get_mem(bytes
)
1350 /* Round up allocation size to multiple of page size */
1351 bytes
= (bytes
+ GC_page_size
-1) & ~(GC_page_size
-1);
1353 /* Try to find reserved, uncommitted pages */
1354 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
1355 if (((word
)(-(signed_word
)GC_heap_lengths
[i
])
1356 & (GC_sysinfo
.dwAllocationGranularity
-1))
1358 result
= GC_heap_bases
[i
] + GC_heap_lengths
[i
];
1363 if (i
== GC_n_heap_bases
) {
1364 /* Reserve more pages */
1365 word res_bytes
= (bytes
+ GC_sysinfo
.dwAllocationGranularity
-1)
1366 & ~(GC_sysinfo
.dwAllocationGranularity
-1);
1367 result
= (ptr_t
) VirtualAlloc(NULL
, res_bytes
,
1368 MEM_RESERVE
| MEM_TOP_DOWN
,
1369 PAGE_EXECUTE_READWRITE
);
1370 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1371 /* If I read the documentation correctly, this can */
1372 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1373 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1374 GC_heap_bases
[GC_n_heap_bases
] = result
;
1375 GC_heap_lengths
[GC_n_heap_bases
] = 0;
1380 result
= (ptr_t
) VirtualAlloc(result
, bytes
,
1382 PAGE_EXECUTE_READWRITE
);
1383 if (result
!= NULL
) {
1384 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1385 GC_heap_lengths
[i
] += bytes
;
1394 /* For now, this only works on Win32/WinCE and some Unix-like */
1395 /* systems. If you have something else, don't define */
1397 /* We assume ANSI C to support this feature. */
1399 #if !defined(MSWIN32) && !defined(MSWINCE)
1402 #include <sys/mman.h>
1403 #include <sys/stat.h>
1404 #include <sys/types.h>
1408 /* Compute a page aligned starting address for the unmap */
1409 /* operation on a block of size bytes starting at start. */
1410 /* Return 0 if the block is too small to make this feasible. */
1411 ptr_t
GC_unmap_start(ptr_t start
, word bytes
)
1413 ptr_t result
= start
;
1414 /* Round start to next page boundary. */
1415 result
+= GC_page_size
- 1;
1416 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
1417 if (result
+ GC_page_size
> start
+ bytes
) return 0;
1421 /* Compute end address for an unmap operation on the indicated */
1423 ptr_t
GC_unmap_end(ptr_t start
, word bytes
)
1425 ptr_t end_addr
= start
+ bytes
;
1426 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
1430 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1431 /* memory using VirtualAlloc and VirtualFree. These functions */
1432 /* work on individual allocations of virtual memory, made */
1433 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1434 /* The ranges we need to (de)commit may span several of these */
1435 /* allocations; therefore we use VirtualQuery to check */
1436 /* allocation lengths, and split up the range as necessary. */
1438 /* We assume that GC_remap is called on exactly the same range */
1439 /* as a previous call to GC_unmap. It is safe to consistently */
1440 /* round the endpoints in both places. */
1441 void GC_unmap(ptr_t start
, word bytes
)
1443 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1444 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1445 word len
= end_addr
- start_addr
;
1446 if (0 == start_addr
) return;
1447 # if defined(MSWIN32) || defined(MSWINCE)
1449 MEMORY_BASIC_INFORMATION mem_info
;
1451 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1452 != sizeof(mem_info
))
1453 ABORT("Weird VirtualQuery result");
1454 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1455 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1456 ABORT("VirtualFree failed");
1457 GC_unmapped_bytes
+= free_len
;
1458 start_addr
+= free_len
;
1462 if (munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1463 GC_unmapped_bytes
+= len
;
1468 void GC_remap(ptr_t start
, word bytes
)
1470 static int zero_descr
= -1;
1471 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1472 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1473 word len
= end_addr
- start_addr
;
1476 # if defined(MSWIN32) || defined(MSWINCE)
1477 if (0 == start_addr
) return;
1479 MEMORY_BASIC_INFORMATION mem_info
;
1481 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1482 != sizeof(mem_info
))
1483 ABORT("Weird VirtualQuery result");
1484 alloc_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1485 result
= VirtualAlloc(start_addr
, alloc_len
,
1487 PAGE_EXECUTE_READWRITE
);
1488 if (result
!= start_addr
) {
1489 ABORT("VirtualAlloc remapping failed");
1491 GC_unmapped_bytes
-= alloc_len
;
1492 start_addr
+= alloc_len
;
1496 if (-1 == zero_descr
) zero_descr
= open("/dev/zero", O_RDWR
);
1497 if (0 == start_addr
) return;
1498 result
= mmap(start_addr
, len
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1499 MAP_FIXED
| MAP_PRIVATE
, zero_descr
, 0);
1500 if (result
!= start_addr
) {
1501 ABORT("mmap remapping failed");
1503 GC_unmapped_bytes
-= len
;
1507 /* Two adjacent blocks have already been unmapped and are about to */
1508 /* be merged. Unmap the whole block. This typically requires */
1509 /* that we unmap a small section in the middle that was not previously */
1510 /* unmapped due to alignment constraints. */
1511 void GC_unmap_gap(ptr_t start1
, word bytes1
, ptr_t start2
, word bytes2
)
1513 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
1514 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
1515 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
1516 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
1517 ptr_t start_addr
= end1_addr
;
1518 ptr_t end_addr
= start2_addr
;
1520 GC_ASSERT(start1
+ bytes1
== start2
);
1521 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
1522 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
1523 if (0 == start_addr
) return;
1524 len
= end_addr
- start_addr
;
1525 # if defined(MSWIN32) || defined(MSWINCE)
1527 MEMORY_BASIC_INFORMATION mem_info
;
1529 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1530 != sizeof(mem_info
))
1531 ABORT("Weird VirtualQuery result");
1532 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1533 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1534 ABORT("VirtualFree failed");
1535 GC_unmapped_bytes
+= free_len
;
1536 start_addr
+= free_len
;
1540 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1541 GC_unmapped_bytes
+= len
;
1545 #endif /* USE_MUNMAP */
1547 /* Routine for pushing any additional roots. In THREADS */
1548 /* environment, this is also responsible for marking from */
1549 /* thread stacks. */
1551 void (*GC_push_other_roots
)() = 0;
1555 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
1557 struct PCR_ThCtl_TInfoRep info
;
1560 info
.ti_stkLow
= info
.ti_stkHi
= 0;
1561 result
= PCR_ThCtl_GetInfo(t
, &info
);
1562 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
1566 /* Push the contents of an old object. We treat this as stack */
1567 /* data only becasue that makes it robust against mark stack */
1569 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
1571 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
1572 return(PCR_ERes_okay
);
1576 void GC_default_push_other_roots
GC_PROTO((void))
1578 /* Traverse data allocated by previous memory managers. */
1580 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
1582 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
1585 ABORT("Old object enumeration failed");
1588 /* Traverse all thread stacks. */
1590 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
1591 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1592 ABORT("Thread stack marking failed\n");
1600 # ifdef ALL_INTERIOR_POINTERS
1604 void GC_push_thread_structures
GC_PROTO((void))
1606 /* Not our responsibibility. */
1609 extern void ThreadF__ProcessStacks();
1611 void GC_push_thread_stack(start
, stop
)
1614 GC_push_all_stack((ptr_t
)start
, (ptr_t
)stop
+ sizeof(word
));
1617 /* Push routine with M3 specific calling convention. */
1618 GC_m3_push_root(dummy1
, p
, dummy2
, dummy3
)
1620 ptr_t dummy1
, dummy2
;
1625 GC_PUSH_ONE_STACK(q
, p
);
1628 /* M3 set equivalent to RTHeap.TracedRefTypes */
1629 typedef struct { int elts
[1]; } RefTypeSet
;
1630 RefTypeSet GC_TracedRefTypes
= {{0x1}};
1632 void GC_default_push_other_roots
GC_PROTO((void))
1634 /* Use the M3 provided routine for finding static roots. */
1635 /* This is a bit dubious, since it presumes no C roots. */
1636 /* We handle the collector roots explicitly in GC_push_roots */
1637 RTMain__GlobalMapProc(GC_m3_push_root
, 0, GC_TracedRefTypes
);
1638 if (GC_words_allocd
> 0) {
1639 ThreadF__ProcessStacks(GC_push_thread_stack
);
1641 /* Otherwise this isn't absolutely necessary, and we have */
1642 /* startup ordering problems. */
1645 # endif /* SRC_M3 */
1647 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1648 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1649 || defined(HPUX_THREADS)
1651 extern void GC_push_all_stacks();
1653 void GC_default_push_other_roots
GC_PROTO((void))
1655 GC_push_all_stacks();
1658 # endif /* SOLARIS_THREADS || ... */
1660 void (*GC_push_other_roots
) GC_PROTO((void)) = GC_default_push_other_roots
;
1665 * Routines for accessing dirty bits on virtual pages.
1666 * We plan to eventually implement four strategies for doing so:
1667 * DEFAULT_VDB: A simple dummy implementation that treats every page
1668 * as possibly dirty. This makes incremental collection
1669 * useless, but the implementation is still correct.
1670 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1671 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1672 * works under some SVR4 variants. Even then, it may be
1673 * too slow to be entirely satisfactory. Requires reading
1674 * dirty bits for entire address space. Implementations tend
1675 * to assume that the client is a (slow) debugger.
1676 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1677 * dirtied pages. The implementation (and implementability)
1678 * is highly system dependent. This usually fails when system
1679 * calls write to a protected page. We prevent the read system
1680 * call from doing so. It is the clients responsibility to
1681 * make sure that other system calls are similarly protected
1682 * or write only to the stack.
1685 GC_bool GC_dirty_maintained
= FALSE
;
1689 /* All of the following assume the allocation lock is held, and */
1690 /* signals are disabled. */
1692 /* The client asserts that unallocated pages in the heap are never */
1695 /* Initialize virtual dirty bit implementation. */
1696 void GC_dirty_init()
1698 GC_dirty_maintained
= TRUE
;
1701 /* Retrieve system dirty bits for heap to a local buffer. */
1702 /* Restore the systems notion of which pages are dirty. */
1703 void GC_read_dirty()
1706 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1707 /* If the actual page size is different, this returns TRUE if any */
1708 /* of the pages overlapping h are dirty. This routine may err on the */
1709 /* side of labelling pages as dirty (and this implementation does). */
1711 GC_bool
GC_page_was_dirty(h
)
1718 * The following two routines are typically less crucial. They matter
1719 * most with large dynamic libraries, or if we can't accurately identify
1720 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1721 * versions are adequate.
1724 /* Could any valid GC heap pointer ever have been written to this page? */
1726 GC_bool
GC_page_was_ever_dirty(h
)
1732 /* Reset the n pages starting at h to "was never dirty" status. */
1733 void GC_is_fresh(h
, n
)
1739 /* A call hints that h is about to be written. */
1740 /* May speed up some dirty bit implementations. */
1742 void GC_write_hint(h
)
1747 # endif /* DEFAULT_VDB */
1750 # ifdef MPROTECT_VDB
1753 * See DEFAULT_VDB for interface descriptions.
1757 * This implementation maintains dirty bits itself by catching write
1758 * faults and keeping track of them. We assume nobody else catches
1759 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1760 * except as a result of a read system call. This means clients must
1761 * either ensure that system calls do not touch the heap, or must
1762 * provide their own wrappers analogous to the one for read.
1763 * We assume the page size is a multiple of HBLKSIZE.
1764 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1765 * tried to use portable code where easily possible. It is known
1766 * not to work under a number of other systems.
1769 # if !defined(MSWIN32) && !defined(MSWINCE)
1771 # include <sys/mman.h>
1772 # include <signal.h>
1773 # include <sys/syscall.h>
1775 # define PROTECT(addr, len) \
1776 if (mprotect((caddr_t)(addr), (size_t)(len), \
1777 PROT_READ | OPT_PROT_EXEC) < 0) { \
1778 ABORT("mprotect failed"); \
1780 # define UNPROTECT(addr, len) \
1781 if (mprotect((caddr_t)(addr), (size_t)(len), \
1782 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1783 ABORT("un-mprotect failed"); \
1789 # include <signal.h>
1792 static DWORD protect_junk
;
1793 # define PROTECT(addr, len) \
1794 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1796 DWORD last_error = GetLastError(); \
1797 GC_printf1("Last error code: %lx\n", last_error); \
1798 ABORT("VirtualProtect failed"); \
1800 # define UNPROTECT(addr, len) \
1801 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1803 ABORT("un-VirtualProtect failed"); \
1808 #if defined(SUNOS4) || defined(FREEBSD)
1809 typedef void (* SIG_PF
)();
1811 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) || defined(MACOSX)
1813 typedef void (* SIG_PF
)(int);
1815 typedef void (* SIG_PF
)();
1818 #if defined(MSWIN32)
1819 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF
;
1821 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1823 #if defined(MSWINCE)
1824 typedef LONG (WINAPI
*SIG_PF
)(struct _EXCEPTION_POINTERS
*);
1826 # define SIG_DFL (SIG_PF) (-1)
1829 #if defined(IRIX5) || defined(OSF1)
1830 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1832 #if defined(SUNOS5SIGS)
1834 # define SIGINFO __siginfo
1836 # define SIGINFO siginfo
1839 typedef void (* REAL_SIG_PF
)(int, struct SIGINFO
*, void *);
1841 typedef void (* REAL_SIG_PF
)();
1845 # include <linux/version.h>
1846 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1847 typedef struct sigcontext s_c
;
1849 typedef struct sigcontext_struct s_c
;
1851 # if defined(ALPHA) || defined(M68K)
1852 typedef void (* REAL_SIG_PF
)(int, int, s_c
*);
1854 # if defined(IA64) || defined(HP_PA)
1855 typedef void (* REAL_SIG_PF
)(int, siginfo_t
*, s_c
*);
1857 typedef void (* REAL_SIG_PF
)(int, s_c
);
1861 /* Retrieve fault address from sigcontext structure by decoding */
1863 char * get_fault_addr(s_c
*sc
) {
1867 instr
= *((unsigned *)(sc
->sc_pc
));
1868 faultaddr
= sc
->sc_regs
[(instr
>> 16) & 0x1f];
1869 faultaddr
+= (word
) (((int)instr
<< 16) >> 16);
1870 return (char *)faultaddr
;
1872 # endif /* !ALPHA */
1875 # if defined(MACOSX) /* Should also test for PowerPC? */
1876 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1878 /* Decodes the machine instruction which was responsible for the sending of the
1879 SIGBUS signal. Sadly this is the only way to find the faulting address because
1880 the signal handler doesn't get it directly from the kernel (although it is
1881 available on the Mach level, but droppped by the BSD personality before it
1882 calls our signal handler...)
1883 This code should be able to deal correctly with all PPCs starting from the
1884 601 up to and including the G4s (including Velocity Engine). */
1885 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1886 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1887 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1888 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1889 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1890 #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1892 static char *get_fault_addr(struct sigcontext
*scp
)
1894 unsigned int instr
= *((unsigned int *) scp
->sc_ir
);
1895 unsigned int * regs
= &((unsigned int *) scp
->sc_regs
)[2];
1897 unsigned int baseA
= 0, baseB
= 0;
1898 unsigned int addr
, alignmask
= 0xFFFFFFFF;
1900 #ifdef GC_DEBUG_DECODER
1901 GC_err_printf1("Instruction: 0x%lx\n", instr
);
1902 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr
));
1904 switch(EXTRACT_OP1(instr
)) {
1908 case 55: /* stfdu */
1910 case 53: /* stfsu */
1916 tmp
= EXTRACT_REGA(instr
);
1919 disp
= EXTRACT_DISP(instr
);
1922 #ifdef GC_DEBUG_DECODER
1923 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr
));
1925 switch(EXTRACT_OP2(instr
)) {
1927 case 54: /* dcbst */
1928 case 1014: /* dcbz */
1929 case 247: /* stbux */
1930 case 215: /* stbx */
1931 case 759: /* stfdux */
1932 case 727: /* stfdx */
1933 case 983: /* stfiwx */
1934 case 695: /* stfsux */
1935 case 663: /* stfsx */
1936 case 918: /* sthbrx */
1937 case 439: /* sthux */
1938 case 407: /* sthx */
1939 case 661: /* stswx */
1940 case 662: /* stwbrx */
1941 case 150: /* stwcx. */
1942 case 183: /* stwux */
1943 case 151: /* stwx */
1944 case 135: /* stvebx */
1945 case 167: /* stvehx */
1946 case 199: /* stvewx */
1947 case 231: /* stvx */
1948 case 487: /* stvxl */
1949 tmp
= EXTRACT_REGA(instr
);
1952 baseB
= regs
[EXTRACT_REGC(instr
)];
1953 /* determine Altivec alignment mask */
1954 switch(EXTRACT_OP2(instr
)) {
1955 case 167: /* stvehx */
1956 alignmask
= 0xFFFFFFFE;
1958 case 199: /* stvewx */
1959 alignmask
= 0xFFFFFFFC;
1961 case 231: /* stvx */
1962 alignmask
= 0xFFFFFFF0;
1964 case 487: /* stvxl */
1965 alignmask
= 0xFFFFFFF0;
1969 case 725: /* stswi */
1970 tmp
= EXTRACT_REGA(instr
);
1974 default: /* ignore instruction */
1975 #ifdef GC_DEBUG_DECODER
1976 GC_err_printf("Ignored by inner handler\n");
1982 default: /* ignore instruction */
1983 #ifdef GC_DEBUG_DECODER
1984 GC_err_printf("Ignored by main handler\n");
1990 addr
= (baseA
+ baseB
) + disp
;
1992 #ifdef GC_DEBUG_DECODER
1993 GC_err_printf1("BaseA: %d\n", baseA
);
1994 GC_err_printf1("BaseB: %d\n", baseB
);
1995 GC_err_printf1("Disp: %d\n", disp
);
1996 GC_err_printf1("Address: %d\n", addr
);
1998 return (char *)addr
;
2002 SIG_PF GC_old_bus_handler
;
2003 SIG_PF GC_old_segv_handler
; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2006 /* We need to lock around the bitmap update in the write fault handler */
2007 /* in order to avoid the risk of losing a bit. We do this with a */
2008 /* test-and-set spin lock if we know how to do that. Otherwise we */
2009 /* check whether we are already in the handler and use the dumb but */
2010 /* safe fallback algorithm of setting all bits in the word. */
2011 /* Contention should be very rare, so we do the minimum to handle it */
2013 #ifdef GC_TEST_AND_SET_DEFINED
2014 static VOLATILE
unsigned int fault_handler_lock
= 0;
2015 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2016 while (GC_test_and_set(&fault_handler_lock
));
2017 /* Could also revert to set_pht_entry_from_index_safe if initial */
2018 /* GC_test_and_set fails. */
2019 set_pht_entry_from_index(db
, index
);
2020 GC_clear(&fault_handler_lock
);
2022 #else /* !GC_TEST_AND_SET_DEFINED */
2023 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2024 /* just before we notice the conflict and correct it. We may end up */
2025 /* looking at it while it's wrong. But this requires contention */
2026 /* exactly when a GC is triggered, which seems far less likely to */
2027 /* fail than the old code, which had no reported failures. Thus we */
2028 /* leave it this way while we think of something better, or support */
2029 /* GC_test_and_set on the remaining platforms. */
2030 static VOLATILE word currently_updating
= 0;
2031 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2032 unsigned int update_dummy
;
2033 currently_updating
= (word
)(&update_dummy
);
2034 set_pht_entry_from_index(db
, index
);
2035 /* If we get contention in the 10 or so instruction window here, */
2036 /* and we get stopped by a GC between the two updates, we lose! */
2037 if (currently_updating
!= (word
)(&update_dummy
)) {
2038 set_pht_entry_from_index_safe(db
, index
);
2039 /* We claim that if two threads concurrently try to update the */
2040 /* dirty bit vector, the first one to execute UPDATE_START */
2041 /* will see it changed when UPDATE_END is executed. (Note that */
2042 /* &update_dummy must differ in two distinct threads.) It */
2043 /* will then execute set_pht_entry_from_index_safe, thus */
2044 /* returning us to a safe state, though not soon enough. */
2047 #endif /* !GC_TEST_AND_SET_DEFINED */
2048 #else /* !THREADS */
2049 # define async_set_pht_entry_from_index(db, index) \
2050 set_pht_entry_from_index(db, index)
2051 #endif /* !THREADS */
2054 # if defined (SUNOS4) || defined(FREEBSD)
2055 void GC_write_fault_handler(sig
, code
, scp
, addr
)
2057 struct sigcontext
*scp
;
2060 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2061 # define CODE_OK (FC_CODE(code) == FC_PROT \
2062 || (FC_CODE(code) == FC_OBJERR \
2063 && FC_ERRNO(code) == FC_PROT))
2066 # define SIG_OK (sig == SIGBUS)
2067 # define CODE_OK (code == BUS_PAGE_FAULT)
2070 # if defined(IRIX5) || defined(OSF1)
2072 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2073 # define SIG_OK (sig == SIGSEGV)
2075 # define CODE_OK (code == 2 /* experimentally determined */)
2078 # define CODE_OK (code == EACCES)
2082 # if defined(ALPHA) || defined(M68K)
2083 void GC_write_fault_handler(int sig
, int code
, s_c
* sc
)
2085 # if defined(IA64) || defined(HP_PA)
2086 void GC_write_fault_handler(int sig
, siginfo_t
* si
, s_c
* scp
)
2088 void GC_write_fault_handler(int sig
, s_c sc
)
2091 # define SIG_OK (sig == SIGSEGV)
2092 # define CODE_OK TRUE
2093 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2094 /* Should probably consider alignment issues on other */
2095 /* architectures. */
2097 # if defined(SUNOS5SIGS)
2099 void GC_write_fault_handler(int sig
, struct SIGINFO
*scp
, void * context
)
2101 void GC_write_fault_handler(sig
, scp
, context
)
2103 struct SIGINFO
*scp
;
2107 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2108 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2109 || (scp -> si_code == BUS_ADRERR) \
2110 || (scp -> si_code == BUS_UNKNOWN) \
2111 || (scp -> si_code == SEGV_UNKNOWN) \
2112 || (scp -> si_code == BUS_OBJERR)
2114 # define SIG_OK (sig == SIGSEGV)
2115 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2119 # if defined(MACOSX)
2120 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2121 # define SIG_OK (sig == SIGBUS)
2122 # define CODE_OK (code == 0 /* experimentally determined */)
2125 # if defined(MSWIN32) || defined(MSWINCE)
2126 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
2127 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2128 STATUS_ACCESS_VIOLATION)
2129 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2133 register unsigned i
;
2135 char * addr
= (char *) (size_t) (scp
-> sc_badvaddr
);
2137 # if defined(OSF1) && defined(ALPHA)
2138 char * addr
= (char *) (scp
-> sc_traparg_a0
);
2141 char * addr
= (char *) (scp
-> si_addr
);
2145 char * addr
= (char *) (sc
.cr2
);
2150 struct sigcontext
*scp
= (struct sigcontext
*)(sc
);
2152 int format
= (scp
->sc_formatvec
>> 12) & 0xf;
2153 unsigned long *framedata
= (unsigned long *)(scp
+ 1);
2156 if (format
== 0xa || format
== 0xb) {
2159 } else if (format
== 7) {
2162 if (framedata
[1] & 0x08000000) {
2163 /* correct addr on misaligned access */
2164 ea
= (ea
+4095)&(~4095);
2166 } else if (format
== 4) {
2169 if (framedata
[1] & 0x08000000) {
2170 /* correct addr on misaligned access */
2171 ea
= (ea
+4095)&(~4095);
2177 char * addr
= get_fault_addr(sc
);
2179 # if defined(IA64) || defined(HP_PA)
2180 char * addr
= si
-> si_addr
;
2181 /* I believe this is claimed to work on all platforms for */
2182 /* Linux 2.3.47 and later. Hopefully we don't have to */
2183 /* worry about earlier kernels on IA64. */
2185 # if defined(POWERPC)
2186 char * addr
= (char *) (sc
.regs
->dar
);
2188 --> architecture
not supported
2195 # if defined(MACOSX)
2196 char * addr
= get_fault_addr(scp
);
2198 # if defined(MSWIN32) || defined(MSWINCE)
2199 char * addr
= (char *) (exc_info
-> ExceptionRecord
2200 -> ExceptionInformation
[1]);
2201 # define sig SIGSEGV
2204 if (SIG_OK
&& CODE_OK
) {
2205 register struct hblk
* h
=
2206 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
2207 GC_bool in_allocd_block
;
2210 /* Address is only within the correct physical page. */
2211 in_allocd_block
= FALSE
;
2212 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2213 if (HDR(h
+i
) != 0) {
2214 in_allocd_block
= TRUE
;
2218 in_allocd_block
= (HDR(addr
) != 0);
2220 if (!in_allocd_block
) {
2221 /* Heap blocks now begin and end on page boundaries */
2224 if (sig
== SIGSEGV
) {
2225 old_handler
= GC_old_segv_handler
;
2227 old_handler
= GC_old_bus_handler
;
2229 if (old_handler
== SIG_DFL
) {
2230 # if !defined(MSWIN32) && !defined(MSWINCE)
2231 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2232 ABORT("Unexpected bus error or segmentation fault");
2234 return(EXCEPTION_CONTINUE_SEARCH
);
2237 # if defined (SUNOS4) || defined(FREEBSD)
2238 (*old_handler
) (sig
, code
, scp
, addr
);
2241 # if defined (SUNOS5SIGS)
2242 (*(REAL_SIG_PF
)old_handler
) (sig
, scp
, context
);
2245 # if defined (LINUX)
2246 # if defined(ALPHA) || defined(M68K)
2247 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, sc
);
2249 # if defined(IA64) || defined(HP_PA)
2250 (*(REAL_SIG_PF
)old_handler
) (sig
, si
, scp
);
2252 (*(REAL_SIG_PF
)old_handler
) (sig
, sc
);
2257 # if defined (IRIX5) || defined(OSF1)
2258 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2262 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2265 return((*old_handler
)(exc_info
));
2269 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2270 register int index
= PHT_HASH(h
+i
);
2272 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2274 UNPROTECT(h
, GC_page_size
);
2275 # if defined(OSF1) || defined(LINUX)
2276 /* These reset the signal handler each time by default. */
2277 signal(SIGSEGV
, (SIG_PF
) GC_write_fault_handler
);
2279 /* The write may not take place before dirty bits are read. */
2280 /* But then we'll fault again ... */
2281 # if defined(MSWIN32) || defined(MSWINCE)
2282 return(EXCEPTION_CONTINUE_EXECUTION
);
2287 #if defined(MSWIN32) || defined(MSWINCE)
2288 return EXCEPTION_CONTINUE_SEARCH
;
2290 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2291 ABORT("Unexpected bus error or segmentation fault");
2296 * We hold the allocation lock. We expect block h to be written
2299 void GC_write_hint(h
)
2302 register struct hblk
* h_trunc
;
2303 register unsigned i
;
2304 register GC_bool found_clean
;
2306 if (!GC_dirty_maintained
) return;
2307 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2308 found_clean
= FALSE
;
2309 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2310 register int index
= PHT_HASH(h_trunc
+i
);
2312 if (!get_pht_entry_from_index(GC_dirty_pages
, index
)) {
2314 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2318 UNPROTECT(h_trunc
, GC_page_size
);
2322 void GC_dirty_init()
2324 # if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2325 struct sigaction act
, oldact
;
2327 act
.sa_flags
= SA_RESTART
;
2328 act
.sa_handler
= GC_write_fault_handler
;
2330 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2331 act
.sa_sigaction
= GC_write_fault_handler
;
2333 (void)sigemptyset(&act
.sa_mask
);
2335 # if defined(MACOSX)
2336 struct sigaction act
, oldact
;
2338 act
.sa_flags
= SA_RESTART
;
2339 act
.sa_handler
= GC_write_fault_handler
;
2340 sigemptyset(&act
.sa_mask
);
2343 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2345 GC_dirty_maintained
= TRUE
;
2346 if (GC_page_size
% HBLKSIZE
!= 0) {
2347 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2348 ABORT("Page size not multiple of HBLKSIZE");
2350 # if defined(SUNOS4) || defined(FREEBSD)
2351 GC_old_bus_handler
= signal(SIGBUS
, GC_write_fault_handler
);
2352 if (GC_old_bus_handler
== SIG_IGN
) {
2353 GC_err_printf0("Previously ignored bus error!?");
2354 GC_old_bus_handler
= SIG_DFL
;
2356 if (GC_old_bus_handler
!= SIG_DFL
) {
2358 GC_err_printf0("Replaced other SIGBUS handler\n");
2362 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2363 GC_old_segv_handler
= signal(SIGSEGV
, (SIG_PF
)GC_write_fault_handler
);
2364 if (GC_old_segv_handler
== SIG_IGN
) {
2365 GC_err_printf0("Previously ignored segmentation violation!?");
2366 GC_old_segv_handler
= SIG_DFL
;
2368 if (GC_old_segv_handler
!= SIG_DFL
) {
2370 GC_err_printf0("Replaced other SIGSEGV handler\n");
2374 # if defined(SUNOS5SIGS) || defined(IRIX5)
2375 # if defined(IRIX_THREADS)
2376 sigaction(SIGSEGV
, 0, &oldact
);
2377 sigaction(SIGSEGV
, &act
, 0);
2379 sigaction(SIGSEGV
, &act
, &oldact
);
2381 # if defined(_sigargs)
2382 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2384 GC_old_segv_handler
= oldact
.sa_handler
;
2385 # else /* Irix 6.x or SUNOS5SIGS */
2386 if (oldact
.sa_flags
& SA_SIGINFO
) {
2387 GC_old_segv_handler
= (SIG_PF
)(oldact
.sa_sigaction
);
2389 GC_old_segv_handler
= oldact
.sa_handler
;
2392 if (GC_old_segv_handler
== SIG_IGN
) {
2393 GC_err_printf0("Previously ignored segmentation violation!?");
2394 GC_old_segv_handler
= SIG_DFL
;
2396 if (GC_old_segv_handler
!= SIG_DFL
) {
2398 GC_err_printf0("Replaced other SIGSEGV handler\n");
2402 # if defined(MACOSX) || defined(HPUX)
2403 sigaction(SIGBUS
, &act
, &oldact
);
2404 GC_old_bus_handler
= oldact
.sa_handler
;
2405 if (GC_old_bus_handler
== SIG_IGN
) {
2406 GC_err_printf0("Previously ignored bus error!?");
2407 GC_old_bus_handler
= SIG_DFL
;
2409 if (GC_old_bus_handler
!= SIG_DFL
) {
2411 GC_err_printf0("Replaced other SIGBUS handler\n");
2414 # endif /* MACOS || HPUX */
2415 # if defined(MSWIN32)
2416 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2417 if (GC_old_segv_handler
!= NULL
) {
2419 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2422 GC_old_segv_handler
= SIG_DFL
;
2429 void GC_protect_heap()
2435 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
2436 start
= GC_heap_sects
[i
].hs_start
;
2437 len
= GC_heap_sects
[i
].hs_bytes
;
2438 PROTECT(start
, len
);
2442 /* We assume that either the world is stopped or its OK to lose dirty */
2443 /* bits while this is happenning (as in GC_enable_incremental). */
2444 void GC_read_dirty()
2446 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2447 (sizeof GC_dirty_pages
));
2448 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2452 GC_bool
GC_page_was_dirty(h
)
2455 register word index
= PHT_HASH(h
);
2457 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2461 * Acquiring the allocation lock here is dangerous, since this
2462 * can be called from within GC_call_with_alloc_lock, and the cord
2463 * package does so. On systems that allow nested lock acquisition, this
2465 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2468 static GC_bool syscall_acquired_lock
= FALSE
; /* Protected by GC lock. */
2470 void GC_begin_syscall()
2472 if (!I_HOLD_LOCK()) {
2474 syscall_acquired_lock
= TRUE
;
2478 void GC_end_syscall()
2480 if (syscall_acquired_lock
) {
2481 syscall_acquired_lock
= FALSE
;
2486 void GC_unprotect_range(addr
, len
)
2490 struct hblk
* start_block
;
2491 struct hblk
* end_block
;
2492 register struct hblk
*h
;
2495 if (!GC_incremental
) return;
2496 obj_start
= GC_base(addr
);
2497 if (obj_start
== 0) return;
2498 if (GC_base(addr
+ len
- 1) != obj_start
) {
2499 ABORT("GC_unprotect_range(range bigger than object)");
2501 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
2502 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
2503 end_block
+= GC_page_size
/HBLKSIZE
- 1;
2504 for (h
= start_block
; h
<= end_block
; h
++) {
2505 register word index
= PHT_HASH(h
);
2507 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2509 UNPROTECT(start_block
,
2510 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
2513 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(LINUX_THREADS) \
2514 && !defined(GC_USE_LD_WRAP)
2515 /* Replacement for UNIX system call. */
2516 /* Other calls that write to the heap */
2517 /* should be handled similarly. */
2518 # if defined(__STDC__) && !defined(SUNOS4)
2519 # include <unistd.h>
2520 # include <sys/uio.h>
2521 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
2524 int read(fd
, buf
, nbyte
)
2526 int GC_read(fd
, buf
, nbyte
)
2536 GC_unprotect_range(buf
, (word
)nbyte
);
2537 # if defined(IRIX5) || defined(LINUX_THREADS)
2538 /* Indirect system call may not always be easily available. */
2539 /* We could call _read, but that would interfere with the */
2540 /* libpthread interception of read. */
2541 /* On Linux, we have to be careful with the linuxthreads */
2542 /* read interception. */
2547 iov
.iov_len
= nbyte
;
2548 result
= readv(fd
, &iov
, 1);
2551 /* The two zero args at the end of this list are because one
2552 IA-64 syscall() implementation actually requires six args
2553 to be passed, even though they aren't always used. */
2554 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
2559 #endif /* !MSWIN32 && !MSWINCE && !LINUX_THREADS */
2561 #ifdef GC_USE_LD_WRAP
2562 /* We use the GNU ld call wrapping facility. */
2563 /* This requires that the linker be invoked with "--wrap read". */
2564 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2565 /* I'm not sure that this actually wraps whatever version of read */
2566 /* is called by stdio. That code also mentions __read. */
2567 # include <unistd.h>
2568 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2573 GC_unprotect_range(buf
, (word
)nbyte
);
2574 result
= __real_read(fd
, buf
, nbyte
);
2579 /* We should probably also do this for __read, or whatever stdio */
2580 /* actually calls. */
2584 GC_bool
GC_page_was_ever_dirty(h
)
2590 /* Reset the n pages starting at h to "was never dirty" status. */
2592 void GC_is_fresh(h
, n
)
2598 # else /* !MPROTECT_VDB */
2600 # ifdef GC_USE_LD_WRAP
2601 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2602 { return __real_read(fd
, buf
, nbyte
); }
2605 # endif /* MPROTECT_VDB */
2610 * See DEFAULT_VDB for interface descriptions.
2614 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2615 * from which we can read page modified bits. This facility is far from
2616 * optimal (e.g. we would like to get the info for only some of the
2617 * address space), but it avoids intercepting system calls.
2621 #include <sys/types.h>
2622 #include <sys/signal.h>
2623 #include <sys/fault.h>
2624 #include <sys/syscall.h>
2625 #include <sys/procfs.h>
2626 #include <sys/stat.h>
2628 #define INITIAL_BUF_SZ 4096
2629 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
2632 #ifdef SOLARIS_THREADS
2633 /* We don't have exact sp values for threads. So we count on */
2634 /* occasionally declaring stack pages to be fresh. Thus we */
2635 /* need a real implementation of GC_is_fresh. We can't clear */
2636 /* entries in GC_written_pages, since that would declare all */
2637 /* pages with the given hash address to be fresh. */
2638 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2639 struct hblk
** GC_fresh_pages
; /* A direct mapped cache. */
2640 /* Collisions are dropped. */
2642 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2643 # define ADD_FRESH_PAGE(h) \
2644 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2645 # define PAGE_IS_FRESH(h) \
2646 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2649 /* Add all pages in pht2 to pht1 */
2650 void GC_or_pages(pht1
, pht2
)
2651 page_hash_table pht1
, pht2
;
2655 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2660 void GC_dirty_init()
2665 GC_dirty_maintained
= TRUE
;
2666 if (GC_words_allocd
!= 0 || GC_words_allocd_before_gc
!= 0) {
2669 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
2671 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2673 (GC_words_allocd
+ GC_words_allocd_before_gc
));
2676 sprintf(buf
, "/proc/%d", getpid());
2677 fd
= open(buf
, O_RDONLY
);
2679 ABORT("/proc open failed");
2681 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
2683 if (GC_proc_fd
< 0) {
2684 ABORT("/proc ioctl failed");
2686 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
2687 # ifdef SOLARIS_THREADS
2688 GC_fresh_pages
= (struct hblk
**)
2689 GC_scratch_alloc(MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2690 if (GC_fresh_pages
== 0) {
2691 GC_err_printf0("No space for fresh pages\n");
2694 BZERO(GC_fresh_pages
, MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2698 /* Ignore write hints. They don't help us here. */
2700 void GC_write_hint(h
)
2705 #ifdef SOLARIS_THREADS
2706 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2708 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2711 void GC_read_dirty()
2713 unsigned long ps
, np
;
2716 struct prasmap
* map
;
2718 ptr_t current_addr
, limit
;
2722 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
2725 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2727 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2731 /* Retry with larger buffer. */
2732 word new_size
= 2 * GC_proc_buf_size
;
2733 char * new_buf
= GC_scratch_alloc(new_size
);
2736 GC_proc_buf
= bufp
= new_buf
;
2737 GC_proc_buf_size
= new_size
;
2739 if (syscall(SYS_read
, GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2740 WARN("Insufficient space for /proc read\n", 0);
2742 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
2743 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
2744 # ifdef SOLARIS_THREADS
2745 BZERO(GC_fresh_pages
,
2746 MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2752 /* Copy dirty bits into GC_grungy_pages */
2753 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
2754 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2755 nmaps, PG_REFERENCED, PG_MODIFIED); */
2756 bufp
= bufp
+ sizeof(struct prpageheader
);
2757 for (i
= 0; i
< nmaps
; i
++) {
2758 map
= (struct prasmap
*)bufp
;
2759 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
2760 ps
= map
-> pr_pagesize
;
2761 np
= map
-> pr_npage
;
2762 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2763 limit
= vaddr
+ ps
* np
;
2764 bufp
+= sizeof (struct prasmap
);
2765 for (current_addr
= vaddr
;
2766 current_addr
< limit
; current_addr
+= ps
){
2767 if ((*bufp
++) & PG_MODIFIED
) {
2768 register struct hblk
* h
= (struct hblk
*) current_addr
;
2770 while ((ptr_t
)h
< current_addr
+ ps
) {
2771 register word index
= PHT_HASH(h
);
2773 set_pht_entry_from_index(GC_grungy_pages
, index
);
2774 # ifdef SOLARIS_THREADS
2776 register int slot
= FRESH_PAGE_SLOT(h
);
2778 if (GC_fresh_pages
[slot
] == h
) {
2779 GC_fresh_pages
[slot
] = 0;
2787 bufp
+= sizeof(long) - 1;
2788 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
2790 /* Update GC_written_pages. */
2791 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2792 # ifdef SOLARIS_THREADS
2793 /* Make sure that old stacks are considered completely clean */
2794 /* unless written again. */
2795 GC_old_stacks_are_fresh();
2801 GC_bool
GC_page_was_dirty(h
)
2804 register word index
= PHT_HASH(h
);
2805 register GC_bool result
;
2807 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
2808 # ifdef SOLARIS_THREADS
2809 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2810 /* This happens only if page was declared fresh since */
2811 /* the read_dirty call, e.g. because it's in an unused */
2812 /* thread stack. It's OK to treat it as clean, in */
2813 /* that case. And it's consistent with */
2814 /* GC_page_was_ever_dirty. */
2819 GC_bool
GC_page_was_ever_dirty(h
)
2822 register word index
= PHT_HASH(h
);
2823 register GC_bool result
;
2825 result
= get_pht_entry_from_index(GC_written_pages
, index
);
2826 # ifdef SOLARIS_THREADS
2827 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2832 /* Caller holds allocation lock. */
2833 void GC_is_fresh(h
, n
)
2838 register word index
;
2840 # ifdef SOLARIS_THREADS
2843 if (GC_fresh_pages
!= 0) {
2844 for (i
= 0; i
< n
; i
++) {
2845 ADD_FRESH_PAGE(h
+ i
);
2851 # endif /* PROC_VDB */
2856 # include "vd/PCR_VD.h"
2858 # define NPAGES (32*1024) /* 128 MB */
2860 PCR_VD_DB GC_grungy_bits
[NPAGES
];
2862 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
2863 /* HBLKSIZE aligned. */
2865 void GC_dirty_init()
2867 GC_dirty_maintained
= TRUE
;
2868 /* For the time being, we assume the heap generally grows up */
2869 GC_vd_base
= GC_heap_sects
[0].hs_start
;
2870 if (GC_vd_base
== 0) {
2871 ABORT("Bad initial heap segment");
2873 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
2875 ABORT("dirty bit initialization failed");
2879 void GC_read_dirty()
2881 /* lazily enable dirty bits on newly added heap sects */
2883 static int onhs
= 0;
2884 int nhs
= GC_n_heap_sects
;
2885 for( ; onhs
< nhs
; onhs
++ ) {
2886 PCR_VD_WriteProtectEnable(
2887 GC_heap_sects
[onhs
].hs_start
,
2888 GC_heap_sects
[onhs
].hs_bytes
);
2893 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
2895 ABORT("dirty bit read failed");
2899 GC_bool
GC_page_was_dirty(h
)
2902 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
2905 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
2909 void GC_write_hint(h
)
2912 PCR_VD_WriteProtectDisable(h
, HBLKSIZE
);
2913 PCR_VD_WriteProtectEnable(h
, HBLKSIZE
);
2916 # endif /* PCR_VDB */
2919 * Call stack save code for debugging.
2920 * Should probably be in mach_dep.c, but that requires reorganization.
2923 /* I suspect the following works for most X86 *nix variants, so */
2924 /* long as the frame pointer is explicitly stored. In the case of gcc, */
2925 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
2926 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
2928 struct frame
*fr_savfp
;
2930 long fr_arg
[NARGS
]; /* All the arguments go here. */
2939 struct frame
*fr_savfp
;
2948 # if defined(SUNOS4)
2949 # include <machine/frame.h>
2951 # if defined (DRSNX)
2952 # include <sys/sparc/frame.h>
2954 # if defined(OPENBSD) || defined(NETBSD)
2957 # include <sys/frame.h>
2963 --> We only know how to to get the first
6 arguments
2967 #ifdef SAVE_CALL_CHAIN
2968 /* Fill in the pc and argument information for up to NFRAMES of my */
2969 /* callers. Ignore my frame and my callers frame. */
2971 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
2972 # define FR_SAVFP fr_fp
2973 # define FR_SAVPC fr_pc
2975 # define FR_SAVFP fr_savfp
2976 # define FR_SAVPC fr_savpc
2979 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
2985 void GC_save_callers (info
)
2986 struct callinfo info
[NFRAMES
];
2988 struct frame
*frame
;
2992 /* We assume this is turned on only with gcc as the compiler. */
2993 asm("movl %%ebp,%0" : "=r"(frame
));
2996 word
GC_save_regs_in_stack();
2998 frame
= (struct frame
*) GC_save_regs_in_stack ();
2999 fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
3002 for (; (!(fp HOTTER_THAN frame
) && !(GC_stackbottom
HOTTER_THAN (ptr_t
)fp
)
3003 && (nframes
< NFRAMES
));
3004 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
3007 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
3008 for (i
= 0; i
< NARGS
; i
++) {
3009 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
3012 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
3015 #endif /* SAVE_CALL_CHAIN */
3017 #if defined(LINUX) && defined(__ELF__) && \
3018 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3019 #ifdef GC_USE_LD_WRAP
3020 # define READ __real_read
3026 /* Repeatedly perform a read call until the buffer is filled or */
3027 /* we encounter EOF. */
3028 ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
)
3030 ssize_t num_read
= 0;
3033 while (num_read
< count
) {
3034 result
= READ(fd
, buf
+ num_read
, count
- num_read
);
3035 if (result
< 0) return result
;
3036 if (result
== 0) break;
3041 #endif /* LINUX && ... */
3044 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3046 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3047 addresses in FIND_LEAK output. */
3049 void GC_print_address_map()
3053 char maps_temp
[32768];
3054 GC_err_printf0("---------- Begin address map ----------\n");
3055 f
= open("/proc/self/maps", O_RDONLY
);
3056 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
3058 result
= GC_repeat_read(f
, maps_temp
, sizeof(maps_temp
));
3059 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
3060 GC_err_write(maps_temp
, result
);
3061 } while (result
== sizeof(maps_temp
));
3063 GC_err_printf0("---------- End address map ----------\n");