2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
27 # include <asm/signal.h>
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
58 # define SIGSEGV 0 /* value is irrelevant */
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
67 # define NEED_FIND_LIMIT
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
71 # define NEED_FIND_LIMIT
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
80 # define NEED_FIND_LIMIT
83 #ifdef NEED_FIND_LIMIT
87 #if defined(FREEBSD) && defined(I386)
88 # include <machine/trap.h>
97 #if defined(MSWIN32) || defined(MSWINCE)
98 # define WIN32_LEAN_AND_MEAN
100 # include <windows.h>
104 # include <Processes.h>
108 # include <sys/uio.h>
109 # include <malloc.h> /* for locking */
112 # include <sys/types.h>
113 # include <sys/mman.h>
114 # include <sys/stat.h>
121 #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
123 # include <sys/siginfo.h>
127 # define setjmp(env) sigsetjmp(env, 1)
128 # define longjmp(env, val) siglongjmp(env, val)
129 # define jmp_buf sigjmp_buf
133 /* Apparently necessary for djgpp 2.01. May cause problems with */
134 /* other versions. */
135 typedef long unsigned int caddr_t
;
139 # include "il/PCR_IL.h"
140 # include "th/PCR_ThCtl.h"
141 # include "mm/PCR_MM.h"
144 #if !defined(NO_EXECUTE_PERMISSION)
145 # define OPT_PROT_EXEC PROT_EXEC
147 # define OPT_PROT_EXEC 0
150 #if defined(SEARCH_FOR_DATA_START)
151 /* The I386 case can be handled without a search. The Alpha case */
152 /* used to be handled differently as well, but the rules changed */
153 /* for recent Linux versions. This seems to be the easiest way to */
154 /* cover all versions. */
157 # pragma weak __data_start
158 extern int __data_start
[];
159 # pragma weak data_start
160 extern int data_start
[];
166 void GC_init_linux_data_start()
168 extern ptr_t
GC_find_limit();
171 /* Try the easy approaches first: */
172 if (__data_start
!= 0) {
173 GC_data_start
= (ptr_t
)__data_start
;
176 if (data_start
!= 0) {
177 GC_data_start
= (ptr_t
)data_start
;
181 GC_data_start
= GC_find_limit((ptr_t
)_end
, FALSE
);
187 # ifndef ECOS_GC_MEMORY_SIZE
188 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
189 # endif /* ECOS_GC_MEMORY_SIZE */
191 // setjmp() function, as described in ANSI para 7.6.1.1
192 #define setjmp( __env__ ) hal_setjmp( __env__ )
194 // FIXME: This is a simple way of allocating memory which is
195 // compatible with ECOS early releases. Later releases use a more
196 // sophisticated means of allocating memory than this simple static
197 // allocator, but this method is at least bound to work.
198 static char memory
[ECOS_GC_MEMORY_SIZE
];
199 static char *brk
= memory
;
201 static void *tiny_sbrk(ptrdiff_t increment
)
207 if (brk
> memory
+ sizeof memory
)
215 #define sbrk tiny_sbrk
218 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
221 void GC_init_netbsd_elf()
223 extern ptr_t
GC_find_limit();
224 extern char **environ
;
225 /* This may need to be environ, without the underscore, for */
227 GC_data_start
= GC_find_limit((ptr_t
)&environ
, FALSE
);
235 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
238 unsigned short magic_number
;
239 unsigned short padding
[29];
243 #define E_MAGIC(x) (x).magic_number
244 #define EMAGIC 0x5A4D
245 #define E_LFANEW(x) (x).new_exe_offset
248 unsigned char magic_number
[2];
249 unsigned char byte_order
;
250 unsigned char word_order
;
251 unsigned long exe_format_level
;
254 unsigned long padding1
[13];
255 unsigned long object_table_offset
;
256 unsigned long object_count
;
257 unsigned long padding2
[31];
260 #define E32_MAGIC1(x) (x).magic_number[0]
261 #define E32MAGIC1 'L'
262 #define E32_MAGIC2(x) (x).magic_number[1]
263 #define E32MAGIC2 'X'
264 #define E32_BORDER(x) (x).byte_order
266 #define E32_WORDER(x) (x).word_order
268 #define E32_CPU(x) (x).cpu
270 #define E32_OBJTAB(x) (x).object_table_offset
271 #define E32_OBJCNT(x) (x).object_count
277 unsigned long pagemap
;
278 unsigned long mapsize
;
279 unsigned long reserved
;
282 #define O32_FLAGS(x) (x).flags
283 #define OBJREAD 0x0001L
284 #define OBJWRITE 0x0002L
285 #define OBJINVALID 0x0080L
286 #define O32_SIZE(x) (x).size
287 #define O32_BASE(x) (x).base
289 # else /* IBM's compiler */
291 /* A kludge to get around what appears to be a header file bug */
293 # define WORD unsigned short
296 # define DWORD unsigned long
303 # endif /* __IBMC__ */
305 # define INCL_DOSEXCEPTIONS
306 # define INCL_DOSPROCESS
307 # define INCL_DOSERRORS
308 # define INCL_DOSMODULEMGR
309 # define INCL_DOSMEMMGR
313 /* Disable and enable signals during nontrivial allocations */
315 void GC_disable_signals(void)
319 DosEnterMustComplete(&nest
);
320 if (nest
!= 1) ABORT("nested GC_disable_signals");
323 void GC_enable_signals(void)
327 DosExitMustComplete(&nest
);
328 if (nest
!= 0) ABORT("GC_enable_signals");
334 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
335 && !defined(MSWINCE) \
336 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
337 && !defined(NOSYS) && !defined(ECOS)
339 # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
340 /* Use the traditional BSD interface */
341 # define SIGSET_T int
342 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
343 # define SIG_FILL(set) (set) = 0x7fffffff
344 /* Setting the leading bit appears to provoke a bug in some */
345 /* longjmp implementations. Most systems appear not to have */
347 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
349 /* Use POSIX/SYSV interface */
350 # define SIGSET_T sigset_t
351 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
352 # define SIG_FILL(set) sigfillset(&set)
353 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
356 static GC_bool mask_initialized
= FALSE
;
358 static SIGSET_T new_mask
;
360 static SIGSET_T old_mask
;
362 static SIGSET_T dummy
;
364 #if defined(PRINTSTATS) && !defined(THREADS)
365 # define CHECK_SIGNALS
366 int GC_sig_disabled
= 0;
369 void GC_disable_signals()
371 if (!mask_initialized
) {
374 SIG_DEL(new_mask
, SIGSEGV
);
375 SIG_DEL(new_mask
, SIGILL
);
376 SIG_DEL(new_mask
, SIGQUIT
);
378 SIG_DEL(new_mask
, SIGBUS
);
381 SIG_DEL(new_mask
, SIGIOT
);
384 SIG_DEL(new_mask
, SIGEMT
);
387 SIG_DEL(new_mask
, SIGTRAP
);
389 mask_initialized
= TRUE
;
391 # ifdef CHECK_SIGNALS
392 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
395 SIGSETMASK(old_mask
,new_mask
);
398 void GC_enable_signals()
400 # ifdef CHECK_SIGNALS
401 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
404 SIGSETMASK(dummy
,old_mask
);
411 /* Ivan Demakov: simplest way (to me) */
413 void GC_disable_signals() { }
414 void GC_enable_signals() { }
417 /* Find the page size */
420 # if defined(MSWIN32) || defined(MSWINCE)
421 void GC_setpagesize()
423 GetSystemInfo(&GC_sysinfo
);
424 GC_page_size
= GC_sysinfo
.dwPageSize
;
428 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
429 || defined(USE_MUNMAP)
430 void GC_setpagesize()
432 GC_page_size
= GETPAGESIZE();
435 /* It's acceptable to fake it. */
436 void GC_setpagesize()
438 GC_page_size
= HBLKSIZE
;
444 * Find the base of the stack.
445 * Used only in single-threaded environment.
446 * With threads, GC_mark_roots needs to know how to do this.
447 * Called with allocator lock held.
449 # if defined(MSWIN32) || defined(MSWINCE)
450 # define is_writable(prot) ((prot) == PAGE_READWRITE \
451 || (prot) == PAGE_WRITECOPY \
452 || (prot) == PAGE_EXECUTE_READWRITE \
453 || (prot) == PAGE_EXECUTE_WRITECOPY)
454 /* Return the number of bytes that are writable starting at p. */
455 /* The pointer p is assumed to be page aligned. */
456 /* If base is not 0, *base becomes the beginning of the */
457 /* allocation region containing p. */
458 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
460 MEMORY_BASIC_INFORMATION buf
;
464 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
465 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
466 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
467 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
468 if (!is_writable(protect
)) {
471 if (buf
.State
!= MEM_COMMIT
) return(0);
472 return(buf
.RegionSize
);
475 ptr_t
GC_get_stack_base()
478 ptr_t sp
= (ptr_t
)(&dummy
);
479 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
480 word size
= GC_get_writable_length(trunc_sp
, 0);
482 return(trunc_sp
+ size
);
486 # endif /* MS Windows */
489 # include <kernel/OS.h>
490 ptr_t
GC_get_stack_base(){
492 get_thread_info(find_thread(NULL
),&th
);
500 ptr_t
GC_get_stack_base()
505 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
506 GC_err_printf0("DosGetInfoBlocks failed\n");
507 ABORT("DosGetInfoBlocks failed\n");
509 return((ptr_t
)(ptib
-> tib_pstacklimit
));
516 # include "AmigaOS.c"
520 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
523 typedef void (*handler
)(int);
525 typedef void (*handler
)();
528 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
529 static struct sigaction old_segv_act
;
530 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
531 static struct sigaction old_bus_act
;
534 static handler old_segv_handler
, old_bus_handler
;
538 void GC_set_and_save_fault_handler(handler h
)
540 void GC_set_and_save_fault_handler(h
)
544 # if defined(SUNOS5SIGS) || defined(IRIX5) \
545 || defined(OSF1) || defined(HURD)
546 struct sigaction act
;
550 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
552 act
.sa_flags
= SA_RESTART
;
554 /* The presence of SA_NODEFER represents yet another gross */
555 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
556 /* interact correctly with -lthread. We hide the confusion */
557 /* by making sure that signal handling doesn't affect the */
560 (void) sigemptyset(&act
.sa_mask
);
561 # ifdef GC_IRIX_THREADS
562 /* Older versions have a bug related to retrieving and */
563 /* and setting a handler at the same time. */
564 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
565 (void) sigaction(SIGSEGV
, &act
, 0);
567 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
568 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
569 || defined(HPUX) || defined(HURD)
570 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
571 /* Pthreads doesn't exist under Irix 5.x, so we */
572 /* don't have to worry in the threads case. */
573 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
575 # endif /* GC_IRIX_THREADS */
577 old_segv_handler
= signal(SIGSEGV
, h
);
579 old_bus_handler
= signal(SIGBUS
, h
);
583 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
585 # ifdef NEED_FIND_LIMIT
586 /* Some tools to implement HEURISTIC2 */
587 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
588 /* static */ jmp_buf GC_jmp_buf
;
591 void GC_fault_handler(sig
)
594 longjmp(GC_jmp_buf
, 1);
597 void GC_setup_temporary_fault_handler()
599 GC_set_and_save_fault_handler(GC_fault_handler
);
602 void GC_reset_fault_handler()
604 # if defined(SUNOS5SIGS) || defined(IRIX5) \
605 || defined(OSF1) || defined(HURD)
606 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
607 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
608 || defined(HPUX) || defined(HURD)
609 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
612 (void) signal(SIGSEGV
, old_segv_handler
);
614 (void) signal(SIGBUS
, old_bus_handler
);
619 /* Return the first nonaddressible location > p (up) or */
620 /* the smallest location q s.t. [q,p] is addressible (!up). */
621 ptr_t
GC_find_limit(p
, up
)
625 static VOLATILE ptr_t result
;
626 /* Needs to be static, since otherwise it may not be */
627 /* preserved across the longjmp. Can safely be */
628 /* static since it's only called once, with the */
629 /* allocation lock held. */
632 GC_setup_temporary_fault_handler();
633 if (setjmp(GC_jmp_buf
) == 0) {
634 result
= (ptr_t
)(((word
)(p
))
635 & ~(MIN_PAGE_SIZE
-1));
638 result
+= MIN_PAGE_SIZE
;
640 result
-= MIN_PAGE_SIZE
;
642 GC_noop1((word
)(*result
));
645 GC_reset_fault_handler();
647 result
+= MIN_PAGE_SIZE
;
653 # if defined(ECOS) || defined(NOSYS)
654 ptr_t
GC_get_stack_base()
661 #ifdef LINUX_STACKBOTTOM
663 #include <sys/types.h>
664 #include <sys/stat.h>
666 # define STAT_SKIP 27 /* Number of fields preceding startstack */
667 /* field in /proc/self/stat */
669 # pragma weak __libc_stack_end
670 extern ptr_t __libc_stack_end
;
673 # pragma weak __libc_ia64_register_backing_store_base
674 extern ptr_t __libc_ia64_register_backing_store_base
;
676 ptr_t
GC_get_register_stack_base(void)
678 if (0 != &__libc_ia64_register_backing_store_base
679 && 0 != __libc_ia64_register_backing_store_base
) {
680 /* Glibc 2.2.4 has a bug such that for dynamically linked */
681 /* executables __libc_ia64_register_backing_store_base is */
682 /* defined but ininitialized during constructor calls. */
683 /* Hence we check for both nonzero address and value. */
684 return __libc_ia64_register_backing_store_base
;
686 word result
= (word
)GC_stackbottom
- BACKING_STORE_DISPLACEMENT
;
687 result
+= BACKING_STORE_ALIGNMENT
- 1;
688 result
&= ~(BACKING_STORE_ALIGNMENT
- 1);
689 return (ptr_t
)result
;
694 ptr_t
GC_linux_stack_base(void)
696 /* We read the stack base value from /proc/self/stat. We do this */
697 /* using direct I/O system calls in order to avoid calling malloc */
698 /* in case REDIRECT_MALLOC is defined. */
699 # define STAT_BUF_SIZE 4096
700 # if defined(GC_USE_LD_WRAP)
701 # define STAT_READ __real_read
703 # define STAT_READ read
705 char stat_buf
[STAT_BUF_SIZE
];
709 size_t i
, buf_offset
= 0;
711 /* First try the easy way. This should work for glibc 2.2 */
712 if (0 != &__libc_stack_end
) {
713 return __libc_stack_end
;
715 f
= open("/proc/self/stat", O_RDONLY
);
716 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
717 ABORT("Couldn't read /proc/self/stat");
719 c
= stat_buf
[buf_offset
++];
720 /* Skip the required number of fields. This number is hopefully */
721 /* constant across all Linux implementations. */
722 for (i
= 0; i
< STAT_SKIP
; ++i
) {
723 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
724 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
726 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
730 c
= stat_buf
[buf_offset
++];
733 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
734 return (ptr_t
)result
;
737 #endif /* LINUX_STACKBOTTOM */
739 #ifdef FREEBSD_STACKBOTTOM
741 /* This uses an undocumented sysctl call, but at least one expert */
742 /* believes it will stay. */
745 #include <sys/types.h>
746 #include <sys/sysctl.h>
748 ptr_t
GC_freebsd_stack_base(void)
750 int nm
[2] = {CTL_KERN
, KERN_USRSTACK
};
752 size_t len
= sizeof(ptr_t
);
753 int r
= sysctl(nm
, 2, &base
, &len
, NULL
, 0);
755 if (r
) ABORT("Error getting stack base");
760 #endif /* FREEBSD_STACKBOTTOM */
762 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
763 && !defined(MSWINCE) && !defined(OS2)
765 ptr_t
GC_get_stack_base()
770 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
776 # ifdef STACK_GROWS_DOWN
777 result
= (ptr_t
)((((word
)(&dummy
))
778 + STACKBOTTOM_ALIGNMENT_M1
)
779 & ~STACKBOTTOM_ALIGNMENT_M1
);
781 result
= (ptr_t
)(((word
)(&dummy
))
782 & ~STACKBOTTOM_ALIGNMENT_M1
);
784 # endif /* HEURISTIC1 */
785 # ifdef LINUX_STACKBOTTOM
786 result
= GC_linux_stack_base();
788 # ifdef FREEBSD_STACKBOTTOM
789 result
= GC_freebsd_stack_base();
792 # ifdef STACK_GROWS_DOWN
793 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
794 # ifdef HEURISTIC2_LIMIT
795 if (result
> HEURISTIC2_LIMIT
796 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
797 result
= HEURISTIC2_LIMIT
;
801 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
802 # ifdef HEURISTIC2_LIMIT
803 if (result
< HEURISTIC2_LIMIT
804 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
805 result
= HEURISTIC2_LIMIT
;
810 # endif /* HEURISTIC2 */
811 # ifdef STACK_GROWS_DOWN
812 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
815 # endif /* STACKBOTTOM */
817 # endif /* NOSYS ECOS */
819 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
822 * Register static data segment(s) as roots.
823 * If more data segments are added later then they need to be registered
824 * add that point (as we do with SunOS dynamic loading),
825 * or GC_mark_roots needs to check for them (as we do with PCR).
826 * Called with allocator lock held.
831 void GC_register_data_segments()
835 HMODULE module_handle
;
839 struct exe_hdr hdrdos
; /* MSDOS header. */
840 struct e32_exe hdr386
; /* Real header for my executable */
841 struct o32_obj seg
; /* Currrent segment */
845 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
846 GC_err_printf0("DosGetInfoBlocks failed\n");
847 ABORT("DosGetInfoBlocks failed\n");
849 module_handle
= ppib
-> pib_hmte
;
850 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
851 GC_err_printf0("DosQueryModuleName failed\n");
852 ABORT("DosGetInfoBlocks failed\n");
854 myexefile
= fopen(path
, "rb");
855 if (myexefile
== 0) {
856 GC_err_puts("Couldn't open executable ");
857 GC_err_puts(path
); GC_err_puts("\n");
858 ABORT("Failed to open executable\n");
860 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
861 GC_err_puts("Couldn't read MSDOS header from ");
862 GC_err_puts(path
); GC_err_puts("\n");
863 ABORT("Couldn't read MSDOS header");
865 if (E_MAGIC(hdrdos
) != EMAGIC
) {
866 GC_err_puts("Executable has wrong DOS magic number: ");
867 GC_err_puts(path
); GC_err_puts("\n");
868 ABORT("Bad DOS magic number");
870 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
871 GC_err_puts("Seek to new header failed in ");
872 GC_err_puts(path
); GC_err_puts("\n");
873 ABORT("Bad DOS magic number");
875 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
876 GC_err_puts("Couldn't read MSDOS header from ");
877 GC_err_puts(path
); GC_err_puts("\n");
878 ABORT("Couldn't read OS/2 header");
880 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
881 GC_err_puts("Executable has wrong OS/2 magic number:");
882 GC_err_puts(path
); GC_err_puts("\n");
883 ABORT("Bad OS/2 magic number");
885 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
886 GC_err_puts("Executable %s has wrong byte order: ");
887 GC_err_puts(path
); GC_err_puts("\n");
888 ABORT("Bad byte order");
890 if ( E32_CPU(hdr386
) == E32CPU286
) {
891 GC_err_puts("GC can't handle 80286 executables: ");
892 GC_err_puts(path
); GC_err_puts("\n");
895 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
897 GC_err_puts("Seek to object table failed: ");
898 GC_err_puts(path
); GC_err_puts("\n");
899 ABORT("Seek to object table failed");
901 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
903 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
904 GC_err_puts("Couldn't read obj table entry from ");
905 GC_err_puts(path
); GC_err_puts("\n");
906 ABORT("Couldn't read obj table entry");
908 flags
= O32_FLAGS(seg
);
909 if (!(flags
& OBJWRITE
)) continue;
910 if (!(flags
& OBJREAD
)) continue;
911 if (flags
& OBJINVALID
) {
912 GC_err_printf0("Object with invalid pages?\n");
915 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
921 # if defined(MSWIN32) || defined(MSWINCE)
924 /* Unfortunately, we have to handle win32s very differently from NT, */
925 /* Since VirtualQuery has very different semantics. In particular, */
926 /* under win32s a VirtualQuery call on an unmapped page returns an */
927 /* invalid result. Under GC_register_data_segments is a noop and */
928 /* all real work is done by GC_register_dynamic_libraries. Under */
929 /* win32s, we cannot find the data segments associated with dll's. */
930 /* We rgister the main data segment here. */
932 GC_bool GC_no_win32_dlls
= TRUE
; /* GCC can't do SEH, so we can't use VirtualQuery */
934 GC_bool GC_no_win32_dlls
= FALSE
;
939 /* if we're running under win32s, assume that no DLLs will be loaded */
940 DWORD v
= GetVersion();
941 GC_no_win32_dlls
|= ((v
& 0x80000000) && (v
& 0xff) <= 3);
944 /* Return the smallest address a such that VirtualQuery */
945 /* returns correct results for all addresses between a and start. */
946 /* Assumes VirtualQuery returns correct information for start. */
947 ptr_t
GC_least_described_address(ptr_t start
)
949 MEMORY_BASIC_INFORMATION buf
;
955 limit
= GC_sysinfo
.lpMinimumApplicationAddress
;
956 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
958 q
= (LPVOID
)(p
- GC_page_size
);
959 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
960 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
961 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
962 p
= (ptr_t
)(buf
.AllocationBase
);
968 /* Is p the start of either the malloc heap, or of one of our */
970 GC_bool
GC_is_heap_base (ptr_t p
)
975 # ifndef REDIRECT_MALLOC
976 static ptr_t malloc_heap_pointer
= 0;
978 if (0 == malloc_heap_pointer
) {
979 MEMORY_BASIC_INFORMATION buf
;
980 void *pTemp
= malloc( 1 );
981 register DWORD result
= VirtualQuery(pTemp
, &buf
, sizeof(buf
));
986 if (result
!= sizeof(buf
)) {
987 ABORT("Weird VirtualQuery result");
989 malloc_heap_pointer
= (ptr_t
)(buf
.AllocationBase
);
991 if (p
== malloc_heap_pointer
) return(TRUE
);
993 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
994 if (GC_heap_bases
[i
] == p
) return(TRUE
);
1000 void GC_register_root_section(ptr_t static_root
)
1002 MEMORY_BASIC_INFORMATION buf
;
1007 char * limit
, * new_limit
;
1009 if (!GC_no_win32_dlls
) return;
1010 p
= base
= limit
= GC_least_described_address(static_root
);
1011 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
1012 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
1013 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
1014 || GC_is_heap_base(buf
.AllocationBase
)) break;
1015 new_limit
= (char *)p
+ buf
.RegionSize
;
1016 protect
= buf
.Protect
;
1017 if (buf
.State
== MEM_COMMIT
1018 && is_writable(protect
)) {
1019 if ((char *)p
== limit
) {
1022 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1027 if (p
> (LPVOID
)new_limit
/* overflow */) break;
1028 p
= (LPVOID
)new_limit
;
1030 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1034 void GC_register_data_segments()
1038 GC_register_root_section((ptr_t
)(&dummy
));
1042 # else /* !OS2 && !Windows */
1044 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1045 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1046 char * GC_SysVGetDataStart(max_page_size
, etext_addr
)
1050 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1051 & ~(sizeof(word
) - 1);
1052 /* etext rounded to word boundary */
1053 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1054 & ~((word
)max_page_size
- 1));
1055 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1056 VOLATILE
char * result
= (char *)(next_page
+ page_offset
);
1057 /* Note that this isnt equivalent to just adding */
1058 /* max_page_size to &etext if &etext is at a page boundary */
1060 GC_setup_temporary_fault_handler();
1061 if (setjmp(GC_jmp_buf
) == 0) {
1062 /* Try writing to the address. */
1064 GC_reset_fault_handler();
1066 GC_reset_fault_handler();
1067 /* We got here via a longjmp. The address is not readable. */
1068 /* This is known to happen under Solaris 2.4 + gcc, which place */
1069 /* string constants in the text segment, but after etext. */
1070 /* Use plan B. Note that we now know there is a gap between */
1071 /* text and data segments, so plan A bought us something. */
1072 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
) - MIN_PAGE_SIZE
, FALSE
);
1074 return((char *)result
);
1081 # define GC_AMIGA_DS
1082 # include "AmigaOS.c"
1085 #else /* !OS2 && !Windows && !AMIGA */
1087 void GC_register_data_segments()
1089 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1091 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1092 /* As of Solaris 2.3, the Solaris threads implementation */
1093 /* allocates the data structure for the initial thread with */
1094 /* sbrk at process startup. It needs to be scanned, so that */
1095 /* we don't lose some malloc allocated data structures */
1096 /* hanging from it. We're on thin ice here ... */
1097 extern caddr_t
sbrk();
1099 GC_add_roots_inner(DATASTART
, (char *)sbrk(0), FALSE
);
1101 GC_add_roots_inner(DATASTART
, (char *)(DATAEND
), FALSE
);
1102 # if defined(DATASTART2)
1103 GC_add_roots_inner(DATASTART2
, (char *)(DATAEND2
), FALSE
);
1107 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1108 GC_add_roots_inner(DATASTART
, (char *) get_end(), FALSE
);
1112 # if defined(THINK_C)
1113 extern void* GC_MacGetDataStart(void);
1114 /* globals begin above stack and end at a5. */
1115 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1116 (ptr_t
)LMGetCurrentA5(), FALSE
);
1118 # if defined(__MWERKS__)
1120 extern void* GC_MacGetDataStart(void);
1121 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1122 # if __option(far_data)
1123 extern void* GC_MacGetDataEnd(void);
1125 /* globals begin above stack and end at a5. */
1126 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1127 (ptr_t
)LMGetCurrentA5(), FALSE
);
1128 /* MATTHEW: Handle Far Globals */
1129 # if __option(far_data)
1130 /* Far globals follow he QD globals: */
1131 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1132 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1135 extern char __data_start__
[], __data_end__
[];
1136 GC_add_roots_inner((ptr_t
)&__data_start__
,
1137 (ptr_t
)&__data_end__
, FALSE
);
1138 # endif /* __POWERPC__ */
1139 # endif /* __MWERKS__ */
1140 # endif /* !THINK_C */
1144 /* Dynamic libraries are added at every collection, since they may */
1148 # endif /* ! AMIGA */
1149 # endif /* ! MSWIN32 && ! MSWINCE*/
1153 * Auxiliary routines for obtaining memory from OS.
1156 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1157 && !defined(MSWIN32) && !defined(MSWINCE) \
1158 && !defined(MACOS) && !defined(DOS4GW)
1161 extern caddr_t
sbrk();
1164 # define SBRK_ARG_T ptrdiff_t
1166 # define SBRK_ARG_T int
1171 /* The compiler seems to generate speculative reads one past the end of */
1172 /* an allocated object. Hence we need to make sure that the page */
1173 /* following the last heap page is also mapped. */
1174 ptr_t
GC_unix_get_mem(bytes
)
1177 caddr_t cur_brk
= (caddr_t
)sbrk(0);
1179 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1180 static caddr_t my_brk_val
= 0;
1182 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1184 if((caddr_t
)(sbrk(GC_page_size
- lsbs
)) == (caddr_t
)(-1)) return(0);
1186 if (cur_brk
== my_brk_val
) {
1187 /* Use the extra block we allocated last time. */
1188 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1189 if (result
== (caddr_t
)(-1)) return(0);
1190 result
-= GC_page_size
;
1192 result
= (ptr_t
)sbrk(GC_page_size
+ (SBRK_ARG_T
)bytes
);
1193 if (result
== (caddr_t
)(-1)) return(0);
1195 my_brk_val
= result
+ bytes
+ GC_page_size
; /* Always page aligned */
1196 return((ptr_t
)result
);
1199 #else /* Not RS6000 */
1201 #if defined(USE_MMAP)
1202 /* Tested only under Linux, IRIX5 and Solaris 2 */
1204 #ifdef USE_MMAP_FIXED
1205 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1206 /* Seems to yield better performance on Solaris 2, but can */
1207 /* be unreliable if something is already mapped at the address. */
1209 # define GC_MMAP_FLAGS MAP_PRIVATE
1213 # define HEAP_START 0
1216 ptr_t
GC_unix_get_mem(bytes
)
1219 static GC_bool initialized
= FALSE
;
1222 static ptr_t last_addr
= HEAP_START
;
1225 fd
= open("/dev/zero", O_RDONLY
);
1228 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1229 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1230 GC_MMAP_FLAGS
, fd
, 0/* offset */);
1231 if (result
== MAP_FAILED
) return(0);
1232 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1233 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1234 # if !defined(LINUX)
1235 if (last_addr
== 0) {
1236 /* Oops. We got the end of the address space. This isn't */
1237 /* usable by arbitrary C code, since one-past-end pointers */
1238 /* don't work, so we discard it and try again. */
1239 munmap(result
, (size_t)(-GC_page_size
) - (size_t)result
);
1240 /* Leave last page mapped, so we can't repeat. */
1241 return GC_unix_get_mem(bytes
);
1244 GC_ASSERT(last_addr
!= 0);
1246 return((ptr_t
)result
);
1249 #else /* Not RS6000, not USE_MMAP */
1250 ptr_t
GC_unix_get_mem(bytes
)
1255 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1256 /* The equivalent may be needed on other systems as well. */
1260 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1261 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1263 if ((SBRK_ARG_T
)bytes
< 0) return(0); /* too big */
1265 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) return(0);
1267 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1268 if (result
== (ptr_t
)(-1)) result
= 0;
1276 #endif /* Not USE_MMAP */
1277 #endif /* Not RS6000 */
1283 void * os2_alloc(size_t bytes
)
1287 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1288 PAG_WRITE
| PAG_COMMIT
)
1292 if (result
== 0) return(os2_alloc(bytes
));
1299 # if defined(MSWIN32) || defined(MSWINCE)
1300 SYSTEM_INFO GC_sysinfo
;
1305 # ifdef USE_GLOBAL_ALLOC
1306 # define GLOBAL_ALLOC_TEST 1
1308 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1311 word GC_n_heap_bases
= 0;
1313 ptr_t
GC_win32_get_mem(bytes
)
1318 if (GLOBAL_ALLOC_TEST
) {
1319 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1320 /* There are also unconfirmed rumors of other */
1321 /* problems, so we dodge the issue. */
1322 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1323 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
) & ~(HBLKSIZE
-1));
1325 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
,
1326 MEM_COMMIT
| MEM_RESERVE
,
1327 PAGE_EXECUTE_READWRITE
);
1329 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1330 /* If I read the documentation correctly, this can */
1331 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1332 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1333 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1337 void GC_win32_free_heap ()
1339 if (GC_no_win32_dlls
) {
1340 while (GC_n_heap_bases
> 0) {
1341 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1342 GC_heap_bases
[GC_n_heap_bases
] = 0;
1349 # define GC_AMIGA_AM
1350 # include "AmigaOS.c"
1356 word GC_n_heap_bases
= 0;
1358 ptr_t
GC_wince_get_mem(bytes
)
1364 /* Round up allocation size to multiple of page size */
1365 bytes
= (bytes
+ GC_page_size
-1) & ~(GC_page_size
-1);
1367 /* Try to find reserved, uncommitted pages */
1368 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
1369 if (((word
)(-(signed_word
)GC_heap_lengths
[i
])
1370 & (GC_sysinfo
.dwAllocationGranularity
-1))
1372 result
= GC_heap_bases
[i
] + GC_heap_lengths
[i
];
1377 if (i
== GC_n_heap_bases
) {
1378 /* Reserve more pages */
1379 word res_bytes
= (bytes
+ GC_sysinfo
.dwAllocationGranularity
-1)
1380 & ~(GC_sysinfo
.dwAllocationGranularity
-1);
1381 result
= (ptr_t
) VirtualAlloc(NULL
, res_bytes
,
1382 MEM_RESERVE
| MEM_TOP_DOWN
,
1383 PAGE_EXECUTE_READWRITE
);
1384 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1385 /* If I read the documentation correctly, this can */
1386 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1387 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1388 GC_heap_bases
[GC_n_heap_bases
] = result
;
1389 GC_heap_lengths
[GC_n_heap_bases
] = 0;
1394 result
= (ptr_t
) VirtualAlloc(result
, bytes
,
1396 PAGE_EXECUTE_READWRITE
);
1397 if (result
!= NULL
) {
1398 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1399 GC_heap_lengths
[i
] += bytes
;
1408 /* For now, this only works on Win32/WinCE and some Unix-like */
1409 /* systems. If you have something else, don't define */
1411 /* We assume ANSI C to support this feature. */
1413 #if !defined(MSWIN32) && !defined(MSWINCE)
1416 #include <sys/mman.h>
1417 #include <sys/stat.h>
1418 #include <sys/types.h>
1422 /* Compute a page aligned starting address for the unmap */
1423 /* operation on a block of size bytes starting at start. */
1424 /* Return 0 if the block is too small to make this feasible. */
1425 ptr_t
GC_unmap_start(ptr_t start
, word bytes
)
1427 ptr_t result
= start
;
1428 /* Round start to next page boundary. */
1429 result
+= GC_page_size
- 1;
1430 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
1431 if (result
+ GC_page_size
> start
+ bytes
) return 0;
1435 /* Compute end address for an unmap operation on the indicated */
1437 ptr_t
GC_unmap_end(ptr_t start
, word bytes
)
1439 ptr_t end_addr
= start
+ bytes
;
1440 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
1444 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1445 /* memory using VirtualAlloc and VirtualFree. These functions */
1446 /* work on individual allocations of virtual memory, made */
1447 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1448 /* The ranges we need to (de)commit may span several of these */
1449 /* allocations; therefore we use VirtualQuery to check */
1450 /* allocation lengths, and split up the range as necessary. */
1452 /* We assume that GC_remap is called on exactly the same range */
1453 /* as a previous call to GC_unmap. It is safe to consistently */
1454 /* round the endpoints in both places. */
1455 void GC_unmap(ptr_t start
, word bytes
)
1457 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1458 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1459 word len
= end_addr
- start_addr
;
1460 if (0 == start_addr
) return;
1461 # if defined(MSWIN32) || defined(MSWINCE)
1463 MEMORY_BASIC_INFORMATION mem_info
;
1465 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1466 != sizeof(mem_info
))
1467 ABORT("Weird VirtualQuery result");
1468 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1469 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1470 ABORT("VirtualFree failed");
1471 GC_unmapped_bytes
+= free_len
;
1472 start_addr
+= free_len
;
1476 if (munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1477 GC_unmapped_bytes
+= len
;
1482 void GC_remap(ptr_t start
, word bytes
)
1484 static int zero_descr
= -1;
1485 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
1486 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
1487 word len
= end_addr
- start_addr
;
1490 # if defined(MSWIN32) || defined(MSWINCE)
1491 if (0 == start_addr
) return;
1493 MEMORY_BASIC_INFORMATION mem_info
;
1495 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1496 != sizeof(mem_info
))
1497 ABORT("Weird VirtualQuery result");
1498 alloc_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1499 result
= VirtualAlloc(start_addr
, alloc_len
,
1501 PAGE_EXECUTE_READWRITE
);
1502 if (result
!= start_addr
) {
1503 ABORT("VirtualAlloc remapping failed");
1505 GC_unmapped_bytes
-= alloc_len
;
1506 start_addr
+= alloc_len
;
1510 if (-1 == zero_descr
) zero_descr
= open("/dev/zero", O_RDWR
);
1511 if (0 == start_addr
) return;
1512 result
= mmap(start_addr
, len
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1513 MAP_FIXED
| MAP_PRIVATE
, zero_descr
, 0);
1514 if (result
!= start_addr
) {
1515 ABORT("mmap remapping failed");
1517 GC_unmapped_bytes
-= len
;
1521 /* Two adjacent blocks have already been unmapped and are about to */
1522 /* be merged. Unmap the whole block. This typically requires */
1523 /* that we unmap a small section in the middle that was not previously */
1524 /* unmapped due to alignment constraints. */
1525 void GC_unmap_gap(ptr_t start1
, word bytes1
, ptr_t start2
, word bytes2
)
1527 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
1528 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
1529 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
1530 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
1531 ptr_t start_addr
= end1_addr
;
1532 ptr_t end_addr
= start2_addr
;
1534 GC_ASSERT(start1
+ bytes1
== start2
);
1535 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
1536 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
1537 if (0 == start_addr
) return;
1538 len
= end_addr
- start_addr
;
1539 # if defined(MSWIN32) || defined(MSWINCE)
1541 MEMORY_BASIC_INFORMATION mem_info
;
1543 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
1544 != sizeof(mem_info
))
1545 ABORT("Weird VirtualQuery result");
1546 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
1547 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
1548 ABORT("VirtualFree failed");
1549 GC_unmapped_bytes
+= free_len
;
1550 start_addr
+= free_len
;
1554 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
1555 GC_unmapped_bytes
+= len
;
1559 #endif /* USE_MUNMAP */
1561 /* Routine for pushing any additional roots. In THREADS */
1562 /* environment, this is also responsible for marking from */
1563 /* thread stacks. */
1565 void (*GC_push_other_roots
)() = 0;
1569 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
1571 struct PCR_ThCtl_TInfoRep info
;
1574 info
.ti_stkLow
= info
.ti_stkHi
= 0;
1575 result
= PCR_ThCtl_GetInfo(t
, &info
);
1576 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
1580 /* Push the contents of an old object. We treat this as stack */
1581 /* data only becasue that makes it robust against mark stack */
1583 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
1585 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
1586 return(PCR_ERes_okay
);
1590 void GC_default_push_other_roots
GC_PROTO((void))
1592 /* Traverse data allocated by previous memory managers. */
1594 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
1596 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
1599 ABORT("Old object enumeration failed");
1602 /* Traverse all thread stacks. */
1604 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
1605 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1606 ABORT("Thread stack marking failed\n");
1614 # ifdef ALL_INTERIOR_POINTERS
1618 void GC_push_thread_structures
GC_PROTO((void))
1620 /* Not our responsibibility. */
1623 extern void ThreadF__ProcessStacks();
1625 void GC_push_thread_stack(start
, stop
)
1628 GC_push_all_stack((ptr_t
)start
, (ptr_t
)stop
+ sizeof(word
));
1631 /* Push routine with M3 specific calling convention. */
1632 GC_m3_push_root(dummy1
, p
, dummy2
, dummy3
)
1634 ptr_t dummy1
, dummy2
;
1639 GC_PUSH_ONE_STACK(q
, p
);
1642 /* M3 set equivalent to RTHeap.TracedRefTypes */
1643 typedef struct { int elts
[1]; } RefTypeSet
;
1644 RefTypeSet GC_TracedRefTypes
= {{0x1}};
1646 void GC_default_push_other_roots
GC_PROTO((void))
1648 /* Use the M3 provided routine for finding static roots. */
1649 /* This is a bit dubious, since it presumes no C roots. */
1650 /* We handle the collector roots explicitly in GC_push_roots */
1651 RTMain__GlobalMapProc(GC_m3_push_root
, 0, GC_TracedRefTypes
);
1652 if (GC_words_allocd
> 0) {
1653 ThreadF__ProcessStacks(GC_push_thread_stack
);
1655 /* Otherwise this isn't absolutely necessary, and we have */
1656 /* startup ordering problems. */
1659 # endif /* SRC_M3 */
1661 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
1662 defined(GC_WIN32_THREADS)
1664 extern void GC_push_all_stacks();
1666 void GC_default_push_other_roots
GC_PROTO((void))
1668 GC_push_all_stacks();
1671 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1673 void (*GC_push_other_roots
) GC_PROTO((void)) = GC_default_push_other_roots
;
1675 #endif /* THREADS */
1678 * Routines for accessing dirty bits on virtual pages.
1679 * We plan to eventually implement four strategies for doing so:
1680 * DEFAULT_VDB: A simple dummy implementation that treats every page
1681 * as possibly dirty. This makes incremental collection
1682 * useless, but the implementation is still correct.
1683 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1684 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1685 * works under some SVR4 variants. Even then, it may be
1686 * too slow to be entirely satisfactory. Requires reading
1687 * dirty bits for entire address space. Implementations tend
1688 * to assume that the client is a (slow) debugger.
1689 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1690 * dirtied pages. The implementation (and implementability)
1691 * is highly system dependent. This usually fails when system
1692 * calls write to a protected page. We prevent the read system
1693 * call from doing so. It is the clients responsibility to
1694 * make sure that other system calls are similarly protected
1695 * or write only to the stack.
1698 GC_bool GC_dirty_maintained
= FALSE
;
1702 /* All of the following assume the allocation lock is held, and */
1703 /* signals are disabled. */
1705 /* The client asserts that unallocated pages in the heap are never */
1708 /* Initialize virtual dirty bit implementation. */
1709 void GC_dirty_init()
1711 GC_dirty_maintained
= TRUE
;
1714 /* Retrieve system dirty bits for heap to a local buffer. */
1715 /* Restore the systems notion of which pages are dirty. */
1716 void GC_read_dirty()
1719 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1720 /* If the actual page size is different, this returns TRUE if any */
1721 /* of the pages overlapping h are dirty. This routine may err on the */
1722 /* side of labelling pages as dirty (and this implementation does). */
1724 GC_bool
GC_page_was_dirty(h
)
1731 * The following two routines are typically less crucial. They matter
1732 * most with large dynamic libraries, or if we can't accurately identify
1733 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1734 * versions are adequate.
1737 /* Could any valid GC heap pointer ever have been written to this page? */
1739 GC_bool
GC_page_was_ever_dirty(h
)
1745 /* Reset the n pages starting at h to "was never dirty" status. */
1746 void GC_is_fresh(h
, n
)
1753 /* I) hints that [h, h+nblocks) is about to be written. */
1754 /* II) guarantees that protection is removed. */
1755 /* (I) may speed up some dirty bit implementations. */
1756 /* (II) may be essential if we need to ensure that */
1757 /* pointer-free system call buffers in the heap are */
1758 /* not protected. */
1760 void GC_remove_protection(h
, nblocks
, is_ptrfree
)
1767 # endif /* DEFAULT_VDB */
1770 # ifdef MPROTECT_VDB
1773 * See DEFAULT_VDB for interface descriptions.
1777 * This implementation maintains dirty bits itself by catching write
1778 * faults and keeping track of them. We assume nobody else catches
1779 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1780 * except as a result of a read system call. This means clients must
1781 * either ensure that system calls do not touch the heap, or must
1782 * provide their own wrappers analogous to the one for read.
1783 * We assume the page size is a multiple of HBLKSIZE.
1784 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1785 * tried to use portable code where easily possible. It is known
1786 * not to work under a number of other systems.
1789 # if !defined(MSWIN32) && !defined(MSWINCE)
1791 # include <sys/mman.h>
1792 # include <signal.h>
1793 # include <sys/syscall.h>
1795 # define PROTECT(addr, len) \
1796 if (mprotect((caddr_t)(addr), (size_t)(len), \
1797 PROT_READ | OPT_PROT_EXEC) < 0) { \
1798 ABORT("mprotect failed"); \
1800 # define UNPROTECT(addr, len) \
1801 if (mprotect((caddr_t)(addr), (size_t)(len), \
1802 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1803 ABORT("un-mprotect failed"); \
1809 # include <signal.h>
1812 static DWORD protect_junk
;
1813 # define PROTECT(addr, len) \
1814 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1816 DWORD last_error = GetLastError(); \
1817 GC_printf1("Last error code: %lx\n", last_error); \
1818 ABORT("VirtualProtect failed"); \
1820 # define UNPROTECT(addr, len) \
1821 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1823 ABORT("un-VirtualProtect failed"); \
1828 #if defined(SUNOS4) || defined(FREEBSD)
1829 typedef void (* SIG_PF
)();
1831 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
1832 || defined(MACOSX) || defined(HURD)
1834 typedef void (* SIG_PF
)(int);
1836 typedef void (* SIG_PF
)();
1839 #if defined(MSWIN32)
1840 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF
;
1842 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1844 #if defined(MSWINCE)
1845 typedef LONG (WINAPI
*SIG_PF
)(struct _EXCEPTION_POINTERS
*);
1847 # define SIG_DFL (SIG_PF) (-1)
1850 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
1851 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1853 #if defined(SUNOS5SIGS)
1855 # define SIGINFO __siginfo
1857 # define SIGINFO siginfo
1860 typedef void (* REAL_SIG_PF
)(int, struct SIGINFO
*, void *);
1862 typedef void (* REAL_SIG_PF
)();
1866 # if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
1867 typedef struct sigcontext s_c
;
1868 # else /* glibc < 2.2 */
1869 # include <linux/version.h>
1870 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA)
1871 typedef struct sigcontext s_c
;
1873 typedef struct sigcontext_struct s_c
;
1875 # endif /* glibc < 2.2 */
1876 # if defined(ALPHA) || defined(M68K)
1877 typedef void (* REAL_SIG_PF
)(int, int, s_c
*);
1879 # if defined(IA64) || defined(HP_PA)
1880 typedef void (* REAL_SIG_PF
)(int, siginfo_t
*, s_c
*);
1882 typedef void (* REAL_SIG_PF
)(int, s_c
);
1886 /* Retrieve fault address from sigcontext structure by decoding */
1888 char * get_fault_addr(s_c
*sc
) {
1892 instr
= *((unsigned *)(sc
->sc_pc
));
1893 faultaddr
= sc
->sc_regs
[(instr
>> 16) & 0x1f];
1894 faultaddr
+= (word
) (((int)instr
<< 16) >> 16);
1895 return (char *)faultaddr
;
1897 # endif /* !ALPHA */
1900 # if defined(MACOSX) /* Should also test for PowerPC? */
1901 typedef void (* REAL_SIG_PF
)(int, int, struct sigcontext
*);
1903 /* Decodes the machine instruction which was responsible for the sending of the
1904 SIGBUS signal. Sadly this is the only way to find the faulting address because
1905 the signal handler doesn't get it directly from the kernel (although it is
1906 available on the Mach level, but droppped by the BSD personality before it
1907 calls our signal handler...)
1908 This code should be able to deal correctly with all PPCs starting from the
1909 601 up to and including the G4s (including Velocity Engine). */
1910 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1911 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1912 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1913 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1914 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1915 #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1917 static char *get_fault_addr(struct sigcontext
*scp
)
1919 unsigned int instr
= *((unsigned int *) scp
->sc_ir
);
1920 unsigned int * regs
= &((unsigned int *) scp
->sc_regs
)[2];
1922 unsigned int baseA
= 0, baseB
= 0;
1923 unsigned int addr
, alignmask
= 0xFFFFFFFF;
1925 #ifdef GC_DEBUG_DECODER
1926 GC_err_printf1("Instruction: 0x%lx\n", instr
);
1927 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr
));
1929 switch(EXTRACT_OP1(instr
)) {
1933 case 55: /* stfdu */
1935 case 53: /* stfsu */
1941 tmp
= EXTRACT_REGA(instr
);
1944 disp
= EXTRACT_DISP(instr
);
1947 #ifdef GC_DEBUG_DECODER
1948 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr
));
1950 switch(EXTRACT_OP2(instr
)) {
1952 case 54: /* dcbst */
1953 case 1014: /* dcbz */
1954 case 247: /* stbux */
1955 case 215: /* stbx */
1956 case 759: /* stfdux */
1957 case 727: /* stfdx */
1958 case 983: /* stfiwx */
1959 case 695: /* stfsux */
1960 case 663: /* stfsx */
1961 case 918: /* sthbrx */
1962 case 439: /* sthux */
1963 case 407: /* sthx */
1964 case 661: /* stswx */
1965 case 662: /* stwbrx */
1966 case 150: /* stwcx. */
1967 case 183: /* stwux */
1968 case 151: /* stwx */
1969 case 135: /* stvebx */
1970 case 167: /* stvehx */
1971 case 199: /* stvewx */
1972 case 231: /* stvx */
1973 case 487: /* stvxl */
1974 tmp
= EXTRACT_REGA(instr
);
1977 baseB
= regs
[EXTRACT_REGC(instr
)];
1978 /* determine Altivec alignment mask */
1979 switch(EXTRACT_OP2(instr
)) {
1980 case 167: /* stvehx */
1981 alignmask
= 0xFFFFFFFE;
1983 case 199: /* stvewx */
1984 alignmask
= 0xFFFFFFFC;
1986 case 231: /* stvx */
1987 alignmask
= 0xFFFFFFF0;
1989 case 487: /* stvxl */
1990 alignmask
= 0xFFFFFFF0;
1994 case 725: /* stswi */
1995 tmp
= EXTRACT_REGA(instr
);
1999 default: /* ignore instruction */
2000 #ifdef GC_DEBUG_DECODER
2001 GC_err_printf("Ignored by inner handler\n");
2007 default: /* ignore instruction */
2008 #ifdef GC_DEBUG_DECODER
2009 GC_err_printf("Ignored by main handler\n");
2015 addr
= (baseA
+ baseB
) + disp
;
2017 #ifdef GC_DEBUG_DECODER
2018 GC_err_printf1("BaseA: %d\n", baseA
);
2019 GC_err_printf1("BaseB: %d\n", baseB
);
2020 GC_err_printf1("Disp: %d\n", disp
);
2021 GC_err_printf1("Address: %d\n", addr
);
2023 return (char *)addr
;
2027 SIG_PF GC_old_bus_handler
;
2028 SIG_PF GC_old_segv_handler
; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2031 /* We need to lock around the bitmap update in the write fault handler */
2032 /* in order to avoid the risk of losing a bit. We do this with a */
2033 /* test-and-set spin lock if we know how to do that. Otherwise we */
2034 /* check whether we are already in the handler and use the dumb but */
2035 /* safe fallback algorithm of setting all bits in the word. */
2036 /* Contention should be very rare, so we do the minimum to handle it */
2038 #ifdef GC_TEST_AND_SET_DEFINED
2039 static VOLATILE
unsigned int fault_handler_lock
= 0;
2040 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2041 while (GC_test_and_set(&fault_handler_lock
)) {}
2042 /* Could also revert to set_pht_entry_from_index_safe if initial */
2043 /* GC_test_and_set fails. */
2044 set_pht_entry_from_index(db
, index
);
2045 GC_clear(&fault_handler_lock
);
2047 #else /* !GC_TEST_AND_SET_DEFINED */
2048 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2049 /* just before we notice the conflict and correct it. We may end up */
2050 /* looking at it while it's wrong. But this requires contention */
2051 /* exactly when a GC is triggered, which seems far less likely to */
2052 /* fail than the old code, which had no reported failures. Thus we */
2053 /* leave it this way while we think of something better, or support */
2054 /* GC_test_and_set on the remaining platforms. */
2055 static VOLATILE word currently_updating
= 0;
2056 void async_set_pht_entry_from_index(VOLATILE page_hash_table db
, int index
) {
2057 unsigned int update_dummy
;
2058 currently_updating
= (word
)(&update_dummy
);
2059 set_pht_entry_from_index(db
, index
);
2060 /* If we get contention in the 10 or so instruction window here, */
2061 /* and we get stopped by a GC between the two updates, we lose! */
2062 if (currently_updating
!= (word
)(&update_dummy
)) {
2063 set_pht_entry_from_index_safe(db
, index
);
2064 /* We claim that if two threads concurrently try to update the */
2065 /* dirty bit vector, the first one to execute UPDATE_START */
2066 /* will see it changed when UPDATE_END is executed. (Note that */
2067 /* &update_dummy must differ in two distinct threads.) It */
2068 /* will then execute set_pht_entry_from_index_safe, thus */
2069 /* returning us to a safe state, though not soon enough. */
2072 #endif /* !GC_TEST_AND_SET_DEFINED */
2073 #else /* !THREADS */
2074 # define async_set_pht_entry_from_index(db, index) \
2075 set_pht_entry_from_index(db, index)
2076 #endif /* !THREADS */
2079 # if defined (SUNOS4) || defined(FREEBSD)
2080 void GC_write_fault_handler(sig
, code
, scp
, addr
)
2082 struct sigcontext
*scp
;
2085 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2086 # define CODE_OK (FC_CODE(code) == FC_PROT \
2087 || (FC_CODE(code) == FC_OBJERR \
2088 && FC_ERRNO(code) == FC_PROT))
2091 # define SIG_OK (sig == SIGBUS)
2092 # define CODE_OK (code == BUS_PAGE_FAULT)
2095 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2097 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2099 # define SIG_OK (sig == SIGSEGV)
2100 # define CODE_OK (code == 2 /* experimentally determined */)
2103 # define SIG_OK (sig == SIGSEGV)
2104 # define CODE_OK (code == EACCES)
2107 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2108 # define CODE_OK TRUE
2112 # if defined(ALPHA) || defined(M68K)
2113 void GC_write_fault_handler(int sig
, int code
, s_c
* sc
)
2115 # if defined(IA64) || defined(HP_PA)
2116 void GC_write_fault_handler(int sig
, siginfo_t
* si
, s_c
* scp
)
2118 void GC_write_fault_handler(int sig
, s_c sc
)
2121 # define SIG_OK (sig == SIGSEGV)
2122 # define CODE_OK TRUE
2123 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2124 /* Should probably consider alignment issues on other */
2125 /* architectures. */
2127 # if defined(SUNOS5SIGS)
2129 void GC_write_fault_handler(int sig
, struct SIGINFO
*scp
, void * context
)
2131 void GC_write_fault_handler(sig
, scp
, context
)
2133 struct SIGINFO
*scp
;
2137 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2138 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2139 || (scp -> si_code == BUS_ADRERR) \
2140 || (scp -> si_code == BUS_UNKNOWN) \
2141 || (scp -> si_code == SEGV_UNKNOWN) \
2142 || (scp -> si_code == BUS_OBJERR)
2144 # define SIG_OK (sig == SIGSEGV)
2145 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2149 # if defined(MACOSX)
2150 void GC_write_fault_handler(int sig
, int code
, struct sigcontext
*scp
)
2151 # define SIG_OK (sig == SIGBUS)
2152 # define CODE_OK (code == 0 /* experimentally determined */)
2155 # if defined(MSWIN32) || defined(MSWINCE)
2156 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
2157 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2158 STATUS_ACCESS_VIOLATION)
2159 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2163 register unsigned i
;
2165 char *addr
= (char *) code
;
2168 char * addr
= (char *) (size_t) (scp
-> sc_badvaddr
);
2170 # if defined(OSF1) && defined(ALPHA)
2171 char * addr
= (char *) (scp
-> sc_traparg_a0
);
2174 char * addr
= (char *) (scp
-> si_addr
);
2177 # if defined(I386) || defined (X86_64)
2178 char * addr
= (char *) (sc
.cr2
);
2183 struct sigcontext
*scp
= (struct sigcontext
*)(sc
);
2185 int format
= (scp
->sc_formatvec
>> 12) & 0xf;
2186 unsigned long *framedata
= (unsigned long *)(scp
+ 1);
2189 if (format
== 0xa || format
== 0xb) {
2192 } else if (format
== 7) {
2195 if (framedata
[1] & 0x08000000) {
2196 /* correct addr on misaligned access */
2197 ea
= (ea
+4095)&(~4095);
2199 } else if (format
== 4) {
2202 if (framedata
[1] & 0x08000000) {
2203 /* correct addr on misaligned access */
2204 ea
= (ea
+4095)&(~4095);
2210 char * addr
= get_fault_addr(sc
);
2212 # if defined(IA64) || defined(HP_PA)
2213 char * addr
= si
-> si_addr
;
2214 /* I believe this is claimed to work on all platforms for */
2215 /* Linux 2.3.47 and later. Hopefully we don't have to */
2216 /* worry about earlier kernels on IA64. */
2218 # if defined(POWERPC)
2219 char * addr
= (char *) (sc
.regs
->dar
);
2221 --> architecture
not supported
2228 # if defined(MACOSX)
2229 char * addr
= get_fault_addr(scp
);
2231 # if defined(MSWIN32) || defined(MSWINCE)
2232 char * addr
= (char *) (exc_info
-> ExceptionRecord
2233 -> ExceptionInformation
[1]);
2234 # define sig SIGSEGV
2237 if (SIG_OK
&& CODE_OK
) {
2238 register struct hblk
* h
=
2239 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
2240 GC_bool in_allocd_block
;
2243 /* Address is only within the correct physical page. */
2244 in_allocd_block
= FALSE
;
2245 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2246 if (HDR(h
+i
) != 0) {
2247 in_allocd_block
= TRUE
;
2251 in_allocd_block
= (HDR(addr
) != 0);
2253 if (!in_allocd_block
) {
2254 /* Heap blocks now begin and end on page boundaries */
2257 if (sig
== SIGSEGV
) {
2258 old_handler
= GC_old_segv_handler
;
2260 old_handler
= GC_old_bus_handler
;
2262 if (old_handler
== SIG_DFL
) {
2263 # if !defined(MSWIN32) && !defined(MSWINCE)
2264 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2265 ABORT("Unexpected bus error or segmentation fault");
2267 return(EXCEPTION_CONTINUE_SEARCH
);
2270 # if defined (SUNOS4) || defined(FREEBSD)
2271 (*old_handler
) (sig
, code
, scp
, addr
);
2274 # if defined (SUNOS5SIGS)
2275 (*(REAL_SIG_PF
)old_handler
) (sig
, scp
, context
);
2278 # if defined (LINUX)
2279 # if defined(ALPHA) || defined(M68K)
2280 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, sc
);
2282 # if defined(IA64) || defined(HP_PA)
2283 (*(REAL_SIG_PF
)old_handler
) (sig
, si
, scp
);
2285 (*(REAL_SIG_PF
)old_handler
) (sig
, sc
);
2290 # if defined (IRIX5) || defined(OSF1) || defined(HURD)
2291 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2295 (*(REAL_SIG_PF
)old_handler
) (sig
, code
, scp
);
2298 return((*old_handler
)(exc_info
));
2302 UNPROTECT(h
, GC_page_size
);
2303 /* We need to make sure that no collection occurs between */
2304 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2305 /* a write by a third thread might go unnoticed. Reversing */
2306 /* the order is just as bad, since we would end up unprotecting */
2307 /* a page in a GC cycle during which it's not marked. */
2308 /* Currently we do this by disabling the thread stopping */
2309 /* signals while this handler is running. An alternative might */
2310 /* be to record the fact that we're about to unprotect, or */
2311 /* have just unprotected a page in the GC's thread structure, */
2312 /* and then to have the thread stopping code set the dirty */
2313 /* flag, if necessary. */
2314 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2315 register int index
= PHT_HASH(h
+i
);
2317 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2320 /* These reset the signal handler each time by default. */
2321 signal(SIGSEGV
, (SIG_PF
) GC_write_fault_handler
);
2323 /* The write may not take place before dirty bits are read. */
2324 /* But then we'll fault again ... */
2325 # if defined(MSWIN32) || defined(MSWINCE)
2326 return(EXCEPTION_CONTINUE_EXECUTION
);
2331 #if defined(MSWIN32) || defined(MSWINCE)
2332 return EXCEPTION_CONTINUE_SEARCH
;
2334 GC_err_printf1("Segfault at 0x%lx\n", addr
);
2335 ABORT("Unexpected bus error or segmentation fault");
2340 * We hold the allocation lock. We expect block h to be written
2341 * shortly. Ensure that all pages cvontaining any part of the n hblks
2342 * starting at h are no longer protected. If is_ptrfree is false,
2343 * also ensure that they will subsequently appear to be dirty.
2345 void GC_remove_protection(h
, nblocks
, is_ptrfree
)
2350 struct hblk
* h_trunc
; /* Truncated to page boundary */
2351 struct hblk
* h_end
; /* Page boundary following block end */
2352 struct hblk
* current
;
2353 GC_bool found_clean
;
2355 if (!GC_dirty_maintained
) return;
2356 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2357 h_end
= (struct hblk
*)(((word
)(h
+ nblocks
) + GC_page_size
-1)
2358 & ~(GC_page_size
-1));
2359 found_clean
= FALSE
;
2360 for (current
= h_trunc
; current
< h_end
; ++current
) {
2361 int index
= PHT_HASH(current
);
2363 if (!is_ptrfree
|| current
< h
|| current
>= h
+ nblocks
) {
2364 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2367 UNPROTECT(h_trunc
, (ptr_t
)h_end
- (ptr_t
)h_trunc
);
2370 void GC_dirty_init()
2372 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2373 defined(OSF1) || defined(HURD)
2374 struct sigaction act
, oldact
;
2375 /* We should probably specify SA_SIGINFO for Linux, and handle */
2376 /* the different architectures more uniformly. */
2377 # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
2378 act
.sa_flags
= SA_RESTART
;
2379 act
.sa_handler
= (SIG_PF
)GC_write_fault_handler
;
2381 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2382 act
.sa_sigaction
= GC_write_fault_handler
;
2384 (void)sigemptyset(&act
.sa_mask
);
2386 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2387 /* handler. This effectively makes the handler atomic w.r.t. */
2388 /* stopping the world for GC. */
2389 (void)sigaddset(&act
.sa_mask
, SIG_SUSPEND
);
2390 # endif /* SIG_SUSPEND */
2392 # if defined(MACOSX)
2393 struct sigaction act
, oldact
;
2395 act
.sa_flags
= SA_RESTART
;
2396 act
.sa_handler
= GC_write_fault_handler
;
2397 sigemptyset(&act
.sa_mask
);
2400 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2402 GC_dirty_maintained
= TRUE
;
2403 if (GC_page_size
% HBLKSIZE
!= 0) {
2404 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2405 ABORT("Page size not multiple of HBLKSIZE");
2407 # if defined(SUNOS4) || defined(FREEBSD)
2408 GC_old_bus_handler
= signal(SIGBUS
, GC_write_fault_handler
);
2409 if (GC_old_bus_handler
== SIG_IGN
) {
2410 GC_err_printf0("Previously ignored bus error!?");
2411 GC_old_bus_handler
= SIG_DFL
;
2413 if (GC_old_bus_handler
!= SIG_DFL
) {
2415 GC_err_printf0("Replaced other SIGBUS handler\n");
2419 # if defined(SUNOS4)
2420 GC_old_segv_handler
= signal(SIGSEGV
, (SIG_PF
)GC_write_fault_handler
);
2421 if (GC_old_segv_handler
== SIG_IGN
) {
2422 GC_err_printf0("Previously ignored segmentation violation!?");
2423 GC_old_segv_handler
= SIG_DFL
;
2425 if (GC_old_segv_handler
!= SIG_DFL
) {
2427 GC_err_printf0("Replaced other SIGSEGV handler\n");
2431 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
2432 || defined(OSF1) || defined(HURD)
2433 /* SUNOS5SIGS includes HPUX */
2434 # if defined(GC_IRIX_THREADS)
2435 sigaction(SIGSEGV
, 0, &oldact
);
2436 sigaction(SIGSEGV
, &act
, 0);
2438 sigaction(SIGSEGV
, &act
, &oldact
);
2440 # if defined(_sigargs) || defined(HURD)
2441 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2443 GC_old_segv_handler
= oldact
.sa_handler
;
2444 # else /* Irix 6.x or SUNOS5SIGS or LINUX */
2445 if (oldact
.sa_flags
& SA_SIGINFO
) {
2446 GC_old_segv_handler
= (SIG_PF
)(oldact
.sa_sigaction
);
2448 GC_old_segv_handler
= oldact
.sa_handler
;
2451 if (GC_old_segv_handler
== SIG_IGN
) {
2452 GC_err_printf0("Previously ignored segmentation violation!?");
2453 GC_old_segv_handler
= SIG_DFL
;
2455 if (GC_old_segv_handler
!= SIG_DFL
) {
2457 GC_err_printf0("Replaced other SIGSEGV handler\n");
2461 # if defined(MACOSX) || defined(HPUX) || defined(LINUX) || defined(HURD)
2462 sigaction(SIGBUS
, &act
, &oldact
);
2463 GC_old_bus_handler
= oldact
.sa_handler
;
2464 if (GC_old_bus_handler
== SIG_IGN
) {
2465 GC_err_printf0("Previously ignored bus error!?");
2466 GC_old_bus_handler
= SIG_DFL
;
2468 if (GC_old_bus_handler
!= SIG_DFL
) {
2470 GC_err_printf0("Replaced other SIGBUS handler\n");
2473 # endif /* MACOS || HPUX || LINUX */
2474 # if defined(MSWIN32)
2475 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2476 if (GC_old_segv_handler
!= NULL
) {
2478 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2481 GC_old_segv_handler
= SIG_DFL
;
2486 int GC_incremental_protection_needs()
2488 if (GC_page_size
== HBLKSIZE
) {
2489 return GC_PROTECTS_POINTER_HEAP
;
2491 return GC_PROTECTS_POINTER_HEAP
| GC_PROTECTS_PTRFREE_HEAP
;
2495 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
2497 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
2499 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
2500 void GC_protect_heap()
2504 struct hblk
* current
;
2505 struct hblk
* current_start
; /* Start of block to be protected. */
2506 struct hblk
* limit
;
2508 GC_bool protect_all
=
2509 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP
));
2510 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
2511 start
= GC_heap_sects
[i
].hs_start
;
2512 len
= GC_heap_sects
[i
].hs_bytes
;
2514 PROTECT(start
, len
);
2516 GC_ASSERT(PAGE_ALIGNED(len
))
2517 GC_ASSERT(PAGE_ALIGNED(start
))
2518 current_start
= current
= (struct hblk
*)start
;
2519 limit
= (struct hblk
*)(start
+ len
);
2520 while (current
< limit
) {
2525 GC_ASSERT(PAGE_ALIGNED(current
));
2526 GET_HDR(current
, hhdr
);
2527 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
2528 /* This can happen only if we're at the beginning of a */
2529 /* heap segment, and a block spans heap segments. */
2530 /* We will handle that block as part of the preceding */
2532 GC_ASSERT(current_start
== current
);
2533 current_start
= ++current
;
2536 if (HBLK_IS_FREE(hhdr
)) {
2537 GC_ASSERT(PAGE_ALIGNED(hhdr
-> hb_sz
));
2538 nhblks
= divHBLKSZ(hhdr
-> hb_sz
);
2539 is_ptrfree
= TRUE
; /* dirty on alloc */
2541 nhblks
= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
2542 is_ptrfree
= IS_PTRFREE(hhdr
);
2545 if (current_start
< current
) {
2546 PROTECT(current_start
, (ptr_t
)current
- (ptr_t
)current_start
);
2548 current_start
= (current
+= nhblks
);
2553 if (current_start
< current
) {
2554 PROTECT(current_start
, (ptr_t
)current
- (ptr_t
)current_start
);
2560 /* We assume that either the world is stopped or its OK to lose dirty */
2561 /* bits while this is happenning (as in GC_enable_incremental). */
2562 void GC_read_dirty()
2564 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2565 (sizeof GC_dirty_pages
));
2566 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2570 GC_bool
GC_page_was_dirty(h
)
2573 register word index
= PHT_HASH(h
);
2575 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2579 * Acquiring the allocation lock here is dangerous, since this
2580 * can be called from within GC_call_with_alloc_lock, and the cord
2581 * package does so. On systems that allow nested lock acquisition, this
2583 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2586 static GC_bool syscall_acquired_lock
= FALSE
; /* Protected by GC lock. */
2588 void GC_begin_syscall()
2590 if (!I_HOLD_LOCK()) {
2592 syscall_acquired_lock
= TRUE
;
2596 void GC_end_syscall()
2598 if (syscall_acquired_lock
) {
2599 syscall_acquired_lock
= FALSE
;
2604 void GC_unprotect_range(addr
, len
)
2608 struct hblk
* start_block
;
2609 struct hblk
* end_block
;
2610 register struct hblk
*h
;
2613 if (!GC_dirty_maintained
) return;
2614 obj_start
= GC_base(addr
);
2615 if (obj_start
== 0) return;
2616 if (GC_base(addr
+ len
- 1) != obj_start
) {
2617 ABORT("GC_unprotect_range(range bigger than object)");
2619 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
2620 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
2621 end_block
+= GC_page_size
/HBLKSIZE
- 1;
2622 for (h
= start_block
; h
<= end_block
; h
++) {
2623 register word index
= PHT_HASH(h
);
2625 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2627 UNPROTECT(start_block
,
2628 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
2631 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(THREADS) \
2632 && !defined(GC_USE_LD_WRAP)
2633 /* Replacement for UNIX system call. */
2634 /* Other calls that write to the heap should be handled similarly. */
2635 /* Note that this doesn't work well for blocking reads: It will hold */
2636 /* tha allocation lock for the entur duration of the call. Multithreaded */
2637 /* clients should really ensure that it won't block, either by setting */
2638 /* the descriptor nonblocking, or by calling select or poll first, to */
2639 /* make sure that input is available. */
2640 # if defined(__STDC__) && !defined(SUNOS4)
2641 # include <unistd.h>
2642 # include <sys/uio.h>
2643 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
2646 int read(fd
, buf
, nbyte
)
2648 int GC_read(fd
, buf
, nbyte
)
2658 GC_unprotect_range(buf
, (word
)nbyte
);
2659 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
2660 /* Indirect system call may not always be easily available. */
2661 /* We could call _read, but that would interfere with the */
2662 /* libpthread interception of read. */
2663 /* On Linux, we have to be careful with the linuxthreads */
2664 /* read interception. */
2669 iov
.iov_len
= nbyte
;
2670 result
= readv(fd
, &iov
, 1);
2674 result
= __read(fd
, buf
, nbyte
);
2676 /* The two zero args at the end of this list are because one
2677 IA-64 syscall() implementation actually requires six args
2678 to be passed, even though they aren't always used. */
2679 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
2685 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
2687 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
2688 /* We use the GNU ld call wrapping facility. */
2689 /* This requires that the linker be invoked with "--wrap read". */
2690 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2691 /* I'm not sure that this actually wraps whatever version of read */
2692 /* is called by stdio. That code also mentions __read. */
2693 # include <unistd.h>
2694 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2699 GC_unprotect_range(buf
, (word
)nbyte
);
2700 result
= __real_read(fd
, buf
, nbyte
);
2705 /* We should probably also do this for __read, or whatever stdio */
2706 /* actually calls. */
2710 GC_bool
GC_page_was_ever_dirty(h
)
2716 /* Reset the n pages starting at h to "was never dirty" status. */
2718 void GC_is_fresh(h
, n
)
2724 # else /* !MPROTECT_VDB */
2726 # ifdef GC_USE_LD_WRAP
2727 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
2728 { return __real_read(fd
, buf
, nbyte
); }
2731 # endif /* MPROTECT_VDB */
2736 * See DEFAULT_VDB for interface descriptions.
2740 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2741 * from which we can read page modified bits. This facility is far from
2742 * optimal (e.g. we would like to get the info for only some of the
2743 * address space), but it avoids intercepting system calls.
2747 #include <sys/types.h>
2748 #include <sys/signal.h>
2749 #include <sys/fault.h>
2750 #include <sys/syscall.h>
2751 #include <sys/procfs.h>
2752 #include <sys/stat.h>
2754 #define INITIAL_BUF_SZ 4096
2755 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
2758 #ifdef GC_SOLARIS_THREADS
2759 /* We don't have exact sp values for threads. So we count on */
2760 /* occasionally declaring stack pages to be fresh. Thus we */
2761 /* need a real implementation of GC_is_fresh. We can't clear */
2762 /* entries in GC_written_pages, since that would declare all */
2763 /* pages with the given hash address to be fresh. */
2764 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2765 struct hblk
** GC_fresh_pages
; /* A direct mapped cache. */
2766 /* Collisions are dropped. */
2768 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2769 # define ADD_FRESH_PAGE(h) \
2770 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2771 # define PAGE_IS_FRESH(h) \
2772 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2775 /* Add all pages in pht2 to pht1 */
2776 void GC_or_pages(pht1
, pht2
)
2777 page_hash_table pht1
, pht2
;
2781 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2786 void GC_dirty_init()
2791 GC_dirty_maintained
= TRUE
;
2792 if (GC_words_allocd
!= 0 || GC_words_allocd_before_gc
!= 0) {
2795 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
2797 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2799 (GC_words_allocd
+ GC_words_allocd_before_gc
));
2802 sprintf(buf
, "/proc/%d", getpid());
2803 fd
= open(buf
, O_RDONLY
);
2805 ABORT("/proc open failed");
2807 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
2809 if (GC_proc_fd
< 0) {
2810 ABORT("/proc ioctl failed");
2812 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
2813 # ifdef GC_SOLARIS_THREADS
2814 GC_fresh_pages
= (struct hblk
**)
2815 GC_scratch_alloc(MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2816 if (GC_fresh_pages
== 0) {
2817 GC_err_printf0("No space for fresh pages\n");
2820 BZERO(GC_fresh_pages
, MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2824 /* Ignore write hints. They don't help us here. */
2826 void GC_remove_protection(h
, nblocks
, is_ptrfree
)
2833 #ifdef GC_SOLARIS_THREADS
2834 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2836 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2839 void GC_read_dirty()
2841 unsigned long ps
, np
;
2844 struct prasmap
* map
;
2846 ptr_t current_addr
, limit
;
2850 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
2853 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2855 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2859 /* Retry with larger buffer. */
2860 word new_size
= 2 * GC_proc_buf_size
;
2861 char * new_buf
= GC_scratch_alloc(new_size
);
2864 GC_proc_buf
= bufp
= new_buf
;
2865 GC_proc_buf_size
= new_size
;
2867 if (syscall(SYS_read
, GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
2868 WARN("Insufficient space for /proc read\n", 0);
2870 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
2871 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
2872 # ifdef GC_SOLARIS_THREADS
2873 BZERO(GC_fresh_pages
,
2874 MAX_FRESH_PAGES
* sizeof (struct hblk
*));
2880 /* Copy dirty bits into GC_grungy_pages */
2881 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
2882 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2883 nmaps, PG_REFERENCED, PG_MODIFIED); */
2884 bufp
= bufp
+ sizeof(struct prpageheader
);
2885 for (i
= 0; i
< nmaps
; i
++) {
2886 map
= (struct prasmap
*)bufp
;
2887 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
2888 ps
= map
-> pr_pagesize
;
2889 np
= map
-> pr_npage
;
2890 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2891 limit
= vaddr
+ ps
* np
;
2892 bufp
+= sizeof (struct prasmap
);
2893 for (current_addr
= vaddr
;
2894 current_addr
< limit
; current_addr
+= ps
){
2895 if ((*bufp
++) & PG_MODIFIED
) {
2896 register struct hblk
* h
= (struct hblk
*) current_addr
;
2898 while ((ptr_t
)h
< current_addr
+ ps
) {
2899 register word index
= PHT_HASH(h
);
2901 set_pht_entry_from_index(GC_grungy_pages
, index
);
2902 # ifdef GC_SOLARIS_THREADS
2904 register int slot
= FRESH_PAGE_SLOT(h
);
2906 if (GC_fresh_pages
[slot
] == h
) {
2907 GC_fresh_pages
[slot
] = 0;
2915 bufp
+= sizeof(long) - 1;
2916 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
2918 /* Update GC_written_pages. */
2919 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2920 # ifdef GC_SOLARIS_THREADS
2921 /* Make sure that old stacks are considered completely clean */
2922 /* unless written again. */
2923 GC_old_stacks_are_fresh();
2929 GC_bool
GC_page_was_dirty(h
)
2932 register word index
= PHT_HASH(h
);
2933 register GC_bool result
;
2935 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
2936 # ifdef GC_SOLARIS_THREADS
2937 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2938 /* This happens only if page was declared fresh since */
2939 /* the read_dirty call, e.g. because it's in an unused */
2940 /* thread stack. It's OK to treat it as clean, in */
2941 /* that case. And it's consistent with */
2942 /* GC_page_was_ever_dirty. */
2947 GC_bool
GC_page_was_ever_dirty(h
)
2950 register word index
= PHT_HASH(h
);
2951 register GC_bool result
;
2953 result
= get_pht_entry_from_index(GC_written_pages
, index
);
2954 # ifdef GC_SOLARIS_THREADS
2955 if (result
&& PAGE_IS_FRESH(h
)) result
= FALSE
;
2960 /* Caller holds allocation lock. */
2961 void GC_is_fresh(h
, n
)
2966 register word index
;
2968 # ifdef GC_SOLARIS_THREADS
2971 if (GC_fresh_pages
!= 0) {
2972 for (i
= 0; i
< n
; i
++) {
2973 ADD_FRESH_PAGE(h
+ i
);
2979 # endif /* PROC_VDB */
2984 # include "vd/PCR_VD.h"
2986 # define NPAGES (32*1024) /* 128 MB */
2988 PCR_VD_DB GC_grungy_bits
[NPAGES
];
2990 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
2991 /* HBLKSIZE aligned. */
2993 void GC_dirty_init()
2995 GC_dirty_maintained
= TRUE
;
2996 /* For the time being, we assume the heap generally grows up */
2997 GC_vd_base
= GC_heap_sects
[0].hs_start
;
2998 if (GC_vd_base
== 0) {
2999 ABORT("Bad initial heap segment");
3001 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
3003 ABORT("dirty bit initialization failed");
3007 void GC_read_dirty()
3009 /* lazily enable dirty bits on newly added heap sects */
3011 static int onhs
= 0;
3012 int nhs
= GC_n_heap_sects
;
3013 for( ; onhs
< nhs
; onhs
++ ) {
3014 PCR_VD_WriteProtectEnable(
3015 GC_heap_sects
[onhs
].hs_start
,
3016 GC_heap_sects
[onhs
].hs_bytes
);
3021 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
3023 ABORT("dirty bit read failed");
3027 GC_bool
GC_page_was_dirty(h
)
3030 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
3033 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
3037 void GC_remove_protection(h
, nblocks
, is_ptrfree
)
3042 PCR_VD_WriteProtectDisable(h
, nblocks
*HBLKSIZE
);
3043 PCR_VD_WriteProtectEnable(h
, nblocks
*HBLKSIZE
);
3046 # endif /* PCR_VDB */
3048 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
3049 int GC_incremental_protection_needs()
3051 return GC_PROTECTS_NONE
;
3053 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
3056 * Call stack save code for debugging.
3057 * Should probably be in mach_dep.c, but that requires reorganization.
3060 /* I suspect the following works for most X86 *nix variants, so */
3061 /* long as the frame pointer is explicitly stored. In the case of gcc, */
3062 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3063 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
3064 # include <features.h>
3067 struct frame
*fr_savfp
;
3069 long fr_arg
[NARGS
]; /* All the arguments go here. */
3075 # include <features.h>
3080 struct frame
*fr_savfp
;
3089 # if defined(SUNOS4)
3090 # include <machine/frame.h>
3092 # if defined (DRSNX)
3093 # include <sys/sparc/frame.h>
3095 # if defined(OPENBSD) || defined(NETBSD)
3098 # include <sys/frame.h>
3104 --> We only know how to to get the first
6 arguments
3108 #ifdef SAVE_CALL_CHAIN
3109 /* Fill in the pc and argument information for up to NFRAMES of my */
3110 /* callers. Ignore my frame and my callers frame. */
3113 # include <features.h>
3114 # if __GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2
3115 # define HAVE_BUILTIN_BACKTRACE
3119 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
3120 && defined(HAVE_BUILTIN_BACKTRACE)
3122 #include <execinfo.h>
3124 void GC_save_callers (info
)
3125 struct callinfo info
[NFRAMES
];
3127 void * tmp_info
[NFRAMES
+ 1];
3129 # define IGNORE_FRAMES 1
3131 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
3132 /* points to our own frame. */
3133 GC_ASSERT(sizeof(struct callinfo
) == sizeof(void *));
3134 npcs
= backtrace((void **)tmp_info
, NFRAMES
+ IGNORE_FRAMES
);
3135 BCOPY(tmp_info
+IGNORE_FRAMES
, info
, (npcs
- IGNORE_FRAMES
) * sizeof(void *));
3136 for (i
= npcs
- IGNORE_FRAMES
; i
< NFRAMES
; ++i
) info
[i
].ci_pc
= 0;
3139 #else /* No builtin backtrace; do it ourselves */
3141 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
3142 # define FR_SAVFP fr_fp
3143 # define FR_SAVPC fr_pc
3145 # define FR_SAVFP fr_savfp
3146 # define FR_SAVPC fr_savpc
3149 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3155 void GC_save_callers (info
)
3156 struct callinfo info
[NFRAMES
];
3158 struct frame
*frame
;
3162 /* We assume this is turned on only with gcc as the compiler. */
3163 asm("movl %%ebp,%0" : "=r"(frame
));
3166 word
GC_save_regs_in_stack();
3168 frame
= (struct frame
*) GC_save_regs_in_stack ();
3169 fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
3172 for (; (!(fp HOTTER_THAN frame
) && !(GC_stackbottom
HOTTER_THAN (ptr_t
)fp
)
3173 && (nframes
< NFRAMES
));
3174 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
3177 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
3179 for (i
= 0; i
< NARGS
; i
++) {
3180 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
3182 # endif /* NARGS > 0 */
3184 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
3187 #endif /* No builtin backtrace */
3189 #endif /* SAVE_CALL_CHAIN */
3191 #if defined(LINUX) && defined(__ELF__) && \
3192 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3193 #ifdef GC_USE_LD_WRAP
3194 # define READ __real_read
3200 /* Repeatedly perform a read call until the buffer is filled or */
3201 /* we encounter EOF. */
3202 ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
)
3204 ssize_t num_read
= 0;
3207 while (num_read
< count
) {
3208 result
= READ(fd
, buf
+ num_read
, count
- num_read
);
3209 if (result
< 0) return result
;
3210 if (result
== 0) break;
3215 #endif /* LINUX && ... */
3218 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3220 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3221 addresses in FIND_LEAK output. */
3223 void GC_print_address_map()
3227 char maps_temp
[32768];
3228 GC_err_printf0("---------- Begin address map ----------\n");
3229 f
= open("/proc/self/maps", O_RDONLY
);
3230 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
3232 result
= GC_repeat_read(f
, maps_temp
, sizeof(maps_temp
));
3233 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
3234 GC_err_write(maps_temp
, result
);
3235 } while (result
== sizeof(maps_temp
));
3237 GC_err_printf0("---------- End address map ----------\n");