* config.gcc: Quote target_cpu_default2 correctly for
[official-gcc.git] / boehm-gc / os_dep.c
bloba7f6d1940054120b3b6ad2ed1ec63f816849549e
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
26 # define __KERNEL__
27 # include <asm/signal.h>
28 # undef __KERNEL__
29 # else
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
35 # if 2 <= __GLIBC__
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
46 # endif
47 # endif
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
49 && !defined(MSWINCE)
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
52 # include <unistd.h>
53 # endif
54 # endif
56 # include <stdio.h>
57 # if defined(MSWINCE)
58 # define SIGSEGV 0 /* value is irrelevant */
59 # else
60 # include <signal.h>
61 # endif
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
67 # define NEED_FIND_LIMIT
68 # endif
70 # if defined(IRIX_THREADS) || defined(HPUX_THREADS)
71 # define NEED_FIND_LIMIT
72 # endif
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
76 # endif
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
79 # define NEED_FIND_LIMIT
80 # endif
82 # if defined(LINUX) && \
83 (defined(POWERPC) || defined(SPARC) || defined(ALPHA) || defined(IA64) \
84 || defined(MIPS))
85 # define NEED_FIND_LIMIT
86 # endif
88 #ifdef NEED_FIND_LIMIT
89 # include <setjmp.h>
90 #endif
92 #ifdef FREEBSD
93 # include <machine/trap.h>
94 #endif
96 #ifdef AMIGA
97 # define GC_AMIGA_DEF
98 # include "AmigaOS.c"
99 # undef GC_AMIGA_DEF
100 #endif
102 #if defined(MSWIN32) || defined(MSWINCE)
103 # define WIN32_LEAN_AND_MEAN
104 # define NOSERVICE
105 # include <windows.h>
106 #endif
108 #ifdef MACOS
109 # include <Processes.h>
110 #endif
112 #ifdef IRIX5
113 # include <sys/uio.h>
114 # include <malloc.h> /* for locking */
115 #endif
116 #ifdef USE_MMAP
117 # include <sys/types.h>
118 # include <sys/mman.h>
119 # include <sys/stat.h>
120 #endif
122 #ifdef UNIX_LIKE
123 # include <fcntl.h>
124 #endif
126 #ifdef SUNOS5SIGS
127 # include <sys/siginfo.h>
128 # undef setjmp
129 # undef longjmp
130 # define setjmp(env) sigsetjmp(env, 1)
131 # define longjmp(env, val) siglongjmp(env, val)
132 # define jmp_buf sigjmp_buf
133 #endif
135 #ifdef DJGPP
136 /* Apparently necessary for djgpp 2.01. May cause problems with */
137 /* other versions. */
138 typedef long unsigned int caddr_t;
139 #endif
141 #ifdef PCR
142 # include "il/PCR_IL.h"
143 # include "th/PCR_ThCtl.h"
144 # include "mm/PCR_MM.h"
145 #endif
147 #if !defined(NO_EXECUTE_PERMISSION)
148 # define OPT_PROT_EXEC PROT_EXEC
149 #else
150 # define OPT_PROT_EXEC 0
151 #endif
153 #if defined(SEARCH_FOR_DATA_START)
154 /* The I386 case can be handled without a search. The Alpha case */
155 /* used to be handled differently as well, but the rules changed */
156 /* for recent Linux versions. This seems to be the easiest way to */
157 /* cover all versions. */
159 # ifdef LINUX
160 # pragma weak __data_start
161 extern int __data_start;
162 # pragma weak data_start
163 extern int data_start;
164 # endif /* LINUX */
165 extern int _end;
167 ptr_t GC_data_start;
169 void GC_init_linux_data_start()
171 extern ptr_t GC_find_limit();
173 # ifdef LINUX
174 /* Try the easy approaches first: */
175 if (&__data_start != 0) {
176 GC_data_start = (ptr_t)(&__data_start);
177 return;
179 if (&data_start != 0) {
180 GC_data_start = (ptr_t)(&data_start);
181 return;
183 # endif /* LINUX */
184 GC_data_start = GC_find_limit((ptr_t)(&_end), FALSE);
186 #endif
188 # ifdef ECOS
190 # ifndef ECOS_GC_MEMORY_SIZE
191 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
192 # endif /* ECOS_GC_MEMORY_SIZE */
194 // setjmp() function, as described in ANSI para 7.6.1.1
195 #define setjmp( __env__ ) hal_setjmp( __env__ )
197 // FIXME: This is a simple way of allocating memory which is
198 // compatible with ECOS early releases. Later releases use a more
199 // sophisticated means of allocating memory than this simple static
200 // allocator, but this method is at least bound to work.
201 static char memory[ECOS_GC_MEMORY_SIZE];
202 static char *brk = memory;
204 static void *tiny_sbrk(ptrdiff_t increment)
206 void *p = brk;
208 brk += increment;
210 if (brk > memory + sizeof memory)
212 brk -= increment;
213 return NULL;
216 return p;
218 #define sbrk tiny_sbrk
219 # endif /* ECOS */
221 #if defined(NETBSD) && defined(__ELF__)
222 ptr_t GC_data_start;
224 void GC_init_netbsd_elf()
226 extern ptr_t GC_find_limit();
227 extern char **environ;
228 /* This may need to be environ, without the underscore, for */
229 /* some versions. */
230 GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
232 #endif
234 # ifdef OS2
236 # include <stddef.h>
238 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
240 struct exe_hdr {
241 unsigned short magic_number;
242 unsigned short padding[29];
243 long new_exe_offset;
246 #define E_MAGIC(x) (x).magic_number
247 #define EMAGIC 0x5A4D
248 #define E_LFANEW(x) (x).new_exe_offset
250 struct e32_exe {
251 unsigned char magic_number[2];
252 unsigned char byte_order;
253 unsigned char word_order;
254 unsigned long exe_format_level;
255 unsigned short cpu;
256 unsigned short os;
257 unsigned long padding1[13];
258 unsigned long object_table_offset;
259 unsigned long object_count;
260 unsigned long padding2[31];
263 #define E32_MAGIC1(x) (x).magic_number[0]
264 #define E32MAGIC1 'L'
265 #define E32_MAGIC2(x) (x).magic_number[1]
266 #define E32MAGIC2 'X'
267 #define E32_BORDER(x) (x).byte_order
268 #define E32LEBO 0
269 #define E32_WORDER(x) (x).word_order
270 #define E32LEWO 0
271 #define E32_CPU(x) (x).cpu
272 #define E32CPU286 1
273 #define E32_OBJTAB(x) (x).object_table_offset
274 #define E32_OBJCNT(x) (x).object_count
276 struct o32_obj {
277 unsigned long size;
278 unsigned long base;
279 unsigned long flags;
280 unsigned long pagemap;
281 unsigned long mapsize;
282 unsigned long reserved;
285 #define O32_FLAGS(x) (x).flags
286 #define OBJREAD 0x0001L
287 #define OBJWRITE 0x0002L
288 #define OBJINVALID 0x0080L
289 #define O32_SIZE(x) (x).size
290 #define O32_BASE(x) (x).base
292 # else /* IBM's compiler */
294 /* A kludge to get around what appears to be a header file bug */
295 # ifndef WORD
296 # define WORD unsigned short
297 # endif
298 # ifndef DWORD
299 # define DWORD unsigned long
300 # endif
302 # define EXE386 1
303 # include <newexe.h>
304 # include <exe386.h>
306 # endif /* __IBMC__ */
308 # define INCL_DOSEXCEPTIONS
309 # define INCL_DOSPROCESS
310 # define INCL_DOSERRORS
311 # define INCL_DOSMODULEMGR
312 # define INCL_DOSMEMMGR
313 # include <os2.h>
316 /* Disable and enable signals during nontrivial allocations */
318 void GC_disable_signals(void)
320 ULONG nest;
322 DosEnterMustComplete(&nest);
323 if (nest != 1) ABORT("nested GC_disable_signals");
326 void GC_enable_signals(void)
328 ULONG nest;
330 DosExitMustComplete(&nest);
331 if (nest != 0) ABORT("GC_enable_signals");
335 # else
337 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
338 && !defined(MSWINCE) \
339 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
341 # if defined(sigmask) && !defined(UTS4)
342 /* Use the traditional BSD interface */
343 # define SIGSET_T int
344 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
345 # define SIG_FILL(set) (set) = 0x7fffffff
346 /* Setting the leading bit appears to provoke a bug in some */
347 /* longjmp implementations. Most systems appear not to have */
348 /* a signal 32. */
349 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
350 # else
351 /* Use POSIX/SYSV interface */
352 # define SIGSET_T sigset_t
353 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
354 # define SIG_FILL(set) sigfillset(&set)
355 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
356 # endif
358 static GC_bool mask_initialized = FALSE;
360 static SIGSET_T new_mask;
362 static SIGSET_T old_mask;
364 static SIGSET_T dummy;
366 #if defined(PRINTSTATS) && !defined(THREADS)
367 # define CHECK_SIGNALS
368 int GC_sig_disabled = 0;
369 #endif
371 void GC_disable_signals()
373 if (!mask_initialized) {
374 SIG_FILL(new_mask);
376 SIG_DEL(new_mask, SIGSEGV);
377 SIG_DEL(new_mask, SIGILL);
378 SIG_DEL(new_mask, SIGQUIT);
379 # ifdef SIGBUS
380 SIG_DEL(new_mask, SIGBUS);
381 # endif
382 # ifdef SIGIOT
383 SIG_DEL(new_mask, SIGIOT);
384 # endif
385 # ifdef SIGEMT
386 SIG_DEL(new_mask, SIGEMT);
387 # endif
388 # ifdef SIGTRAP
389 SIG_DEL(new_mask, SIGTRAP);
390 # endif
391 mask_initialized = TRUE;
393 # ifdef CHECK_SIGNALS
394 if (GC_sig_disabled != 0) ABORT("Nested disables");
395 GC_sig_disabled++;
396 # endif
397 SIGSETMASK(old_mask,new_mask);
400 void GC_enable_signals()
402 # ifdef CHECK_SIGNALS
403 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
404 GC_sig_disabled--;
405 # endif
406 SIGSETMASK(dummy,old_mask);
409 # endif /* !PCR */
411 # endif /*!OS/2 */
413 /* Ivan Demakov: simplest way (to me) */
414 #if defined (DOS4GW)
415 void GC_disable_signals() { }
416 void GC_enable_signals() { }
417 #endif
419 /* Find the page size */
420 word GC_page_size;
422 # if defined(MSWIN32) || defined(MSWINCE)
423 void GC_setpagesize()
425 GetSystemInfo(&GC_sysinfo);
426 GC_page_size = GC_sysinfo.dwPageSize;
429 # else
430 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
431 || defined(USE_MUNMAP)
432 void GC_setpagesize()
434 GC_page_size = GETPAGESIZE();
436 # else
437 /* It's acceptable to fake it. */
438 void GC_setpagesize()
440 GC_page_size = HBLKSIZE;
442 # endif
443 # endif
446 * Find the base of the stack.
447 * Used only in single-threaded environment.
448 * With threads, GC_mark_roots needs to know how to do this.
449 * Called with allocator lock held.
451 # if defined(MSWIN32) || defined(MSWINCE)
452 # define is_writable(prot) ((prot) == PAGE_READWRITE \
453 || (prot) == PAGE_WRITECOPY \
454 || (prot) == PAGE_EXECUTE_READWRITE \
455 || (prot) == PAGE_EXECUTE_WRITECOPY)
456 /* Return the number of bytes that are writable starting at p. */
457 /* The pointer p is assumed to be page aligned. */
458 /* If base is not 0, *base becomes the beginning of the */
459 /* allocation region containing p. */
460 word GC_get_writable_length(ptr_t p, ptr_t *base)
462 MEMORY_BASIC_INFORMATION buf;
463 word result;
464 word protect;
466 result = VirtualQuery(p, &buf, sizeof(buf));
467 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
468 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
469 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
470 if (!is_writable(protect)) {
471 return(0);
473 if (buf.State != MEM_COMMIT) return(0);
474 return(buf.RegionSize);
477 ptr_t GC_get_stack_base()
479 int dummy;
480 ptr_t sp = (ptr_t)(&dummy);
481 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
482 word size = GC_get_writable_length(trunc_sp, 0);
484 return(trunc_sp + size);
488 # endif /* MS Windows */
490 # ifdef BEOS
491 # include <kernel/OS.h>
492 ptr_t GC_get_stack_base(){
493 thread_info th;
494 get_thread_info(find_thread(NULL),&th);
495 return th.stack_end;
497 # endif /* BEOS */
500 # ifdef OS2
502 ptr_t GC_get_stack_base()
504 PTIB ptib;
505 PPIB ppib;
507 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
508 GC_err_printf0("DosGetInfoBlocks failed\n");
509 ABORT("DosGetInfoBlocks failed\n");
511 return((ptr_t)(ptib -> tib_pstacklimit));
514 # endif /* OS2 */
516 # ifdef AMIGA
517 # define GC_AMIGA_SB
518 # include "AmigaOS.c"
519 # undef GC_AMIGA_SB
520 # endif /* AMIGA */
522 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
524 # ifdef __STDC__
525 typedef void (*handler)(int);
526 # else
527 typedef void (*handler)();
528 # endif
530 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
531 static struct sigaction old_segv_act;
532 # if defined(_sigargs) || defined(HPUX) /* !Irix6.x */
533 static struct sigaction old_bus_act;
534 # endif
535 # else
536 static handler old_segv_handler, old_bus_handler;
537 # endif
539 # ifdef __STDC__
540 void GC_set_and_save_fault_handler(handler h)
541 # else
542 void GC_set_and_save_fault_handler(h)
543 handler h;
544 # endif
546 # ifndef ECOS
547 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
548 struct sigaction act;
550 act.sa_handler = h;
551 act.sa_flags = SA_RESTART | SA_NODEFER;
552 /* The presence of SA_NODEFER represents yet another gross */
553 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
554 /* interact correctly with -lthread. We hide the confusion */
555 /* by making sure that signal handling doesn't affect the */
556 /* signal mask. */
558 (void) sigemptyset(&act.sa_mask);
559 # ifdef IRIX_THREADS
560 /* Older versions have a bug related to retrieving and */
561 /* and setting a handler at the same time. */
562 (void) sigaction(SIGSEGV, 0, &old_segv_act);
563 (void) sigaction(SIGSEGV, &act, 0);
564 # else
565 (void) sigaction(SIGSEGV, &act, &old_segv_act);
566 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
567 || defined(HPUX)
568 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
569 /* Pthreads doesn't exist under Irix 5.x, so we */
570 /* don't have to worry in the threads case. */
571 (void) sigaction(SIGBUS, &act, &old_bus_act);
572 # endif
573 # endif /* IRIX_THREADS */
574 # else
575 old_segv_handler = signal(SIGSEGV, h);
576 # ifdef SIGBUS
577 old_bus_handler = signal(SIGBUS, h);
578 # endif
579 # endif
580 # endif /* ECOS */
582 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
584 # ifdef NEED_FIND_LIMIT
585 /* Some tools to implement HEURISTIC2 */
586 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
587 /* static */ jmp_buf GC_jmp_buf;
589 /*ARGSUSED*/
590 void GC_fault_handler(sig)
591 int sig;
593 longjmp(GC_jmp_buf, 1);
596 void GC_setup_temporary_fault_handler()
598 GC_set_and_save_fault_handler(GC_fault_handler);
601 void GC_reset_fault_handler()
603 # ifndef ECOS
604 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1)
605 (void) sigaction(SIGSEGV, &old_segv_act, 0);
606 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
607 || defined(HPUX)
608 (void) sigaction(SIGBUS, &old_bus_act, 0);
609 # endif
610 # else
611 (void) signal(SIGSEGV, old_segv_handler);
612 # ifdef SIGBUS
613 (void) signal(SIGBUS, old_bus_handler);
614 # endif
615 # endif
616 # endif /* ECOS */
619 /* Return the first nonaddressible location > p (up) or */
620 /* the smallest location q s.t. [q,p] is addressible (!up). */
621 ptr_t GC_find_limit(p, up)
622 ptr_t p;
623 GC_bool up;
625 # ifndef ECOS
626 static VOLATILE ptr_t result;
627 /* Needs to be static, since otherwise it may not be */
628 /* preserved across the longjmp. Can safely be */
629 /* static since it's only called once, with the */
630 /* allocation lock held. */
633 GC_setup_temporary_fault_handler();
634 if (setjmp(GC_jmp_buf) == 0) {
635 result = (ptr_t)(((word)(p))
636 & ~(MIN_PAGE_SIZE-1));
637 for (;;) {
638 if (up) {
639 result += MIN_PAGE_SIZE;
640 } else {
641 result -= MIN_PAGE_SIZE;
643 GC_noop1((word)(*result));
646 GC_reset_fault_handler();
647 if (!up) {
648 result += MIN_PAGE_SIZE;
650 return(result);
651 # else /* ECOS */
652 abort();
653 # endif /* ECOS */
655 # endif
657 # ifndef ECOS
659 #ifdef LINUX_STACKBOTTOM
661 #include <sys/types.h>
662 #include <sys/stat.h>
664 # define STAT_SKIP 27 /* Number of fields preceding startstack */
665 /* field in /proc/self/stat */
667 # pragma weak __libc_stack_end
668 extern ptr_t __libc_stack_end;
670 # ifdef IA64
671 # pragma weak __libc_ia64_register_backing_store_base
672 extern ptr_t __libc_ia64_register_backing_store_base;
674 ptr_t GC_get_register_stack_base(void)
676 if (0 != &__libc_ia64_register_backing_store_base) {
677 return __libc_ia64_register_backing_store_base;
678 } else {
679 word result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
680 result += BACKING_STORE_ALIGNMENT - 1;
681 result &= ~(BACKING_STORE_ALIGNMENT - 1);
682 return (ptr_t)result;
685 # endif
687 ptr_t GC_linux_stack_base(void)
689 /* We read the stack base value from /proc/self/stat. We do this */
690 /* using direct I/O system calls in order to avoid calling malloc */
691 /* in case REDIRECT_MALLOC is defined. */
692 # define STAT_BUF_SIZE 4096
693 # if defined(GC_USE_LD_WRAP)
694 # define STAT_READ __real_read
695 # else
696 # define STAT_READ read
697 # endif
698 char stat_buf[STAT_BUF_SIZE];
699 int f;
700 char c;
701 word result = 0;
702 size_t i, buf_offset = 0;
704 /* First try the easy way. This should work for glibc 2.2 */
705 if (0 != &__libc_stack_end) {
706 return __libc_stack_end;
708 f = open("/proc/self/stat", O_RDONLY);
709 if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
710 ABORT("Couldn't read /proc/self/stat");
712 c = stat_buf[buf_offset++];
713 /* Skip the required number of fields. This number is hopefully */
714 /* constant across all Linux implementations. */
715 for (i = 0; i < STAT_SKIP; ++i) {
716 while (isspace(c)) c = stat_buf[buf_offset++];
717 while (!isspace(c)) c = stat_buf[buf_offset++];
719 while (isspace(c)) c = stat_buf[buf_offset++];
720 while (isdigit(c)) {
721 result *= 10;
722 result += c - '0';
723 c = stat_buf[buf_offset++];
725 close(f);
726 if (result < 0x10000000) ABORT("Absurd stack bottom value");
727 return (ptr_t)result;
730 #endif /* LINUX_STACKBOTTOM */
732 #ifdef FREEBSD_STACKBOTTOM
734 /* This uses an undocumented sysctl call, but at least one expert */
735 /* believes it will stay. */
737 #include <unistd.h>
738 #include <sys/types.h>
739 #include <sys/sysctl.h>
741 ptr_t GC_freebsd_stack_base(void)
743 int nm[2] = { CTL_KERN, KERN_USRSTACK}, base, len, r;
745 len = sizeof(int);
746 r = sysctl(nm, 2, &base, &len, NULL, 0);
748 if (r) ABORT("Error getting stack base");
750 return (ptr_t)base;
753 #endif /* FREEBSD_STACKBOTTOM */
755 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
756 && !defined(MSWINCE) && !defined(OS2)
758 ptr_t GC_get_stack_base()
760 word dummy;
761 ptr_t result;
763 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
765 # ifdef STACKBOTTOM
766 return(STACKBOTTOM);
767 # else
768 # ifdef HEURISTIC1
769 # ifdef STACK_GROWS_DOWN
770 result = (ptr_t)((((word)(&dummy))
771 + STACKBOTTOM_ALIGNMENT_M1)
772 & ~STACKBOTTOM_ALIGNMENT_M1);
773 # else
774 result = (ptr_t)(((word)(&dummy))
775 & ~STACKBOTTOM_ALIGNMENT_M1);
776 # endif
777 # endif /* HEURISTIC1 */
778 # ifdef LINUX_STACKBOTTOM
779 result = GC_linux_stack_base();
780 # endif
781 # ifdef FREEBSD_STACKBOTTOM
782 result = GC_freebsd_stack_base();
783 # endif
784 # ifdef HEURISTIC2
785 # ifdef STACK_GROWS_DOWN
786 result = GC_find_limit((ptr_t)(&dummy), TRUE);
787 # ifdef HEURISTIC2_LIMIT
788 if (result > HEURISTIC2_LIMIT
789 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
790 result = HEURISTIC2_LIMIT;
792 # endif
793 # else
794 result = GC_find_limit((ptr_t)(&dummy), FALSE);
795 # ifdef HEURISTIC2_LIMIT
796 if (result < HEURISTIC2_LIMIT
797 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
798 result = HEURISTIC2_LIMIT;
800 # endif
801 # endif
803 # endif /* HEURISTIC2 */
804 # ifdef STACK_GROWS_DOWN
805 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
806 # endif
807 return(result);
808 # endif /* STACKBOTTOM */
810 # endif /* ECOS */
812 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
815 * Register static data segment(s) as roots.
816 * If more data segments are added later then they need to be registered
817 * add that point (as we do with SunOS dynamic loading),
818 * or GC_mark_roots needs to check for them (as we do with PCR).
819 * Called with allocator lock held.
822 # ifdef OS2
824 void GC_register_data_segments()
826 PTIB ptib;
827 PPIB ppib;
828 HMODULE module_handle;
829 # define PBUFSIZ 512
830 UCHAR path[PBUFSIZ];
831 FILE * myexefile;
832 struct exe_hdr hdrdos; /* MSDOS header. */
833 struct e32_exe hdr386; /* Real header for my executable */
834 struct o32_obj seg; /* Currrent segment */
835 int nsegs;
838 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
839 GC_err_printf0("DosGetInfoBlocks failed\n");
840 ABORT("DosGetInfoBlocks failed\n");
842 module_handle = ppib -> pib_hmte;
843 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
844 GC_err_printf0("DosQueryModuleName failed\n");
845 ABORT("DosGetInfoBlocks failed\n");
847 myexefile = fopen(path, "rb");
848 if (myexefile == 0) {
849 GC_err_puts("Couldn't open executable ");
850 GC_err_puts(path); GC_err_puts("\n");
851 ABORT("Failed to open executable\n");
853 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
854 GC_err_puts("Couldn't read MSDOS header from ");
855 GC_err_puts(path); GC_err_puts("\n");
856 ABORT("Couldn't read MSDOS header");
858 if (E_MAGIC(hdrdos) != EMAGIC) {
859 GC_err_puts("Executable has wrong DOS magic number: ");
860 GC_err_puts(path); GC_err_puts("\n");
861 ABORT("Bad DOS magic number");
863 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
864 GC_err_puts("Seek to new header failed in ");
865 GC_err_puts(path); GC_err_puts("\n");
866 ABORT("Bad DOS magic number");
868 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
869 GC_err_puts("Couldn't read MSDOS header from ");
870 GC_err_puts(path); GC_err_puts("\n");
871 ABORT("Couldn't read OS/2 header");
873 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
874 GC_err_puts("Executable has wrong OS/2 magic number:");
875 GC_err_puts(path); GC_err_puts("\n");
876 ABORT("Bad OS/2 magic number");
878 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
879 GC_err_puts("Executable %s has wrong byte order: ");
880 GC_err_puts(path); GC_err_puts("\n");
881 ABORT("Bad byte order");
883 if ( E32_CPU(hdr386) == E32CPU286) {
884 GC_err_puts("GC can't handle 80286 executables: ");
885 GC_err_puts(path); GC_err_puts("\n");
886 EXIT();
888 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
889 SEEK_SET) != 0) {
890 GC_err_puts("Seek to object table failed: ");
891 GC_err_puts(path); GC_err_puts("\n");
892 ABORT("Seek to object table failed");
894 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
895 int flags;
896 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
897 GC_err_puts("Couldn't read obj table entry from ");
898 GC_err_puts(path); GC_err_puts("\n");
899 ABORT("Couldn't read obj table entry");
901 flags = O32_FLAGS(seg);
902 if (!(flags & OBJWRITE)) continue;
903 if (!(flags & OBJREAD)) continue;
904 if (flags & OBJINVALID) {
905 GC_err_printf0("Object with invalid pages?\n");
906 continue;
908 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
912 # else /* !OS2 */
914 # if defined(MSWIN32) || defined(MSWINCE)
916 # ifdef MSWIN32
917 /* Unfortunately, we have to handle win32s very differently from NT, */
918 /* Since VirtualQuery has very different semantics. In particular, */
919 /* under win32s a VirtualQuery call on an unmapped page returns an */
920 /* invalid result. Under GC_register_data_segments is a noop and */
921 /* all real work is done by GC_register_dynamic_libraries. Under */
922 /* win32s, we cannot find the data segments associated with dll's. */
923 /* We rgister the main data segment here. */
924 GC_bool GC_win32s = FALSE; /* We're running under win32s. */
926 GC_bool GC_is_win32s()
928 DWORD v = GetVersion();
930 /* Check that this is not NT, and Windows major version <= 3 */
931 return ((v & 0x80000000) && (v & 0xff) <= 3);
934 void GC_init_win32()
936 GC_win32s = GC_is_win32s();
939 /* Return the smallest address a such that VirtualQuery */
940 /* returns correct results for all addresses between a and start. */
941 /* Assumes VirtualQuery returns correct information for start. */
942 ptr_t GC_least_described_address(ptr_t start)
944 MEMORY_BASIC_INFORMATION buf;
945 DWORD result;
946 LPVOID limit;
947 ptr_t p;
948 LPVOID q;
950 limit = GC_sysinfo.lpMinimumApplicationAddress;
951 p = (ptr_t)((word)start & ~(GC_page_size - 1));
952 for (;;) {
953 q = (LPVOID)(p - GC_page_size);
954 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
955 result = VirtualQuery(q, &buf, sizeof(buf));
956 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
957 p = (ptr_t)(buf.AllocationBase);
959 return(p);
961 # endif
963 /* Is p the start of either the malloc heap, or of one of our */
964 /* heap sections? */
965 GC_bool GC_is_heap_base (ptr_t p)
968 register unsigned i;
970 # ifndef REDIRECT_MALLOC
971 static ptr_t malloc_heap_pointer = 0;
973 if (0 == malloc_heap_pointer) {
974 MEMORY_BASIC_INFORMATION buf;
975 void *pTemp = malloc( 1 );
976 register DWORD result = VirtualQuery(pTemp, &buf, sizeof(buf));
978 free( pTemp );
981 if (result != sizeof(buf)) {
982 ABORT("Weird VirtualQuery result");
984 malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
986 if (p == malloc_heap_pointer) return(TRUE);
987 # endif
988 for (i = 0; i < GC_n_heap_bases; i++) {
989 if (GC_heap_bases[i] == p) return(TRUE);
991 return(FALSE);
994 # ifdef MSWIN32
995 void GC_register_root_section(ptr_t static_root)
997 MEMORY_BASIC_INFORMATION buf;
998 DWORD result;
999 DWORD protect;
1000 LPVOID p;
1001 char * base;
1002 char * limit, * new_limit;
1004 if (!GC_win32s) return;
1005 p = base = limit = GC_least_described_address(static_root);
1006 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1007 result = VirtualQuery(p, &buf, sizeof(buf));
1008 if (result != sizeof(buf) || buf.AllocationBase == 0
1009 || GC_is_heap_base(buf.AllocationBase)) break;
1010 new_limit = (char *)p + buf.RegionSize;
1011 protect = buf.Protect;
1012 if (buf.State == MEM_COMMIT
1013 && is_writable(protect)) {
1014 if ((char *)p == limit) {
1015 limit = new_limit;
1016 } else {
1017 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1018 base = p;
1019 limit = new_limit;
1022 if (p > (LPVOID)new_limit /* overflow */) break;
1023 p = (LPVOID)new_limit;
1025 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1027 #endif
1029 void GC_register_data_segments()
1031 # ifdef MSWIN32
1032 static char dummy;
1033 GC_register_root_section((ptr_t)(&dummy));
1034 # endif
1037 # else /* !OS2 && !Windows */
1039 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1040 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1041 char * GC_SysVGetDataStart(max_page_size, etext_addr)
1042 int max_page_size;
1043 int * etext_addr;
1045 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1046 & ~(sizeof(word) - 1);
1047 /* etext rounded to word boundary */
1048 word next_page = ((text_end + (word)max_page_size - 1)
1049 & ~((word)max_page_size - 1));
1050 word page_offset = (text_end & ((word)max_page_size - 1));
1051 VOLATILE char * result = (char *)(next_page + page_offset);
1052 /* Note that this isnt equivalent to just adding */
1053 /* max_page_size to &etext if &etext is at a page boundary */
1055 GC_setup_temporary_fault_handler();
1056 if (setjmp(GC_jmp_buf) == 0) {
1057 /* Try writing to the address. */
1058 *result = *result;
1059 GC_reset_fault_handler();
1060 } else {
1061 GC_reset_fault_handler();
1062 /* We got here via a longjmp. The address is not readable. */
1063 /* This is known to happen under Solaris 2.4 + gcc, which place */
1064 /* string constants in the text segment, but after etext. */
1065 /* Use plan B. Note that we now know there is a gap between */
1066 /* text and data segments, so plan A bought us something. */
1067 result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1069 return((char *)result);
1071 # endif
1074 #ifdef AMIGA
1076 # define GC_AMIGA_DS
1077 # include "AmigaOS.c"
1078 # undef GC_AMIGA_DS
1080 #else /* !OS2 && !Windows && !AMIGA */
1082 void GC_register_data_segments()
1084 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1085 && !defined(MACOSX)
1086 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
1087 /* As of Solaris 2.3, the Solaris threads implementation */
1088 /* allocates the data structure for the initial thread with */
1089 /* sbrk at process startup. It needs to be scanned, so that */
1090 /* we don't lose some malloc allocated data structures */
1091 /* hanging from it. We're on thin ice here ... */
1092 extern caddr_t sbrk();
1094 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1095 # else
1096 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1097 # endif
1098 # endif
1099 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1100 GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1101 # endif
1102 # if defined(MACOS)
1104 # if defined(THINK_C)
1105 extern void* GC_MacGetDataStart(void);
1106 /* globals begin above stack and end at a5. */
1107 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1108 (ptr_t)LMGetCurrentA5(), FALSE);
1109 # else
1110 # if defined(__MWERKS__)
1111 # if !__POWERPC__
1112 extern void* GC_MacGetDataStart(void);
1113 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1114 # if __option(far_data)
1115 extern void* GC_MacGetDataEnd(void);
1116 # endif
1117 /* globals begin above stack and end at a5. */
1118 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1119 (ptr_t)LMGetCurrentA5(), FALSE);
1120 /* MATTHEW: Handle Far Globals */
1121 # if __option(far_data)
1122 /* Far globals follow he QD globals: */
1123 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1124 (ptr_t)GC_MacGetDataEnd(), FALSE);
1125 # endif
1126 # else
1127 extern char __data_start__[], __data_end__[];
1128 GC_add_roots_inner((ptr_t)&__data_start__,
1129 (ptr_t)&__data_end__, FALSE);
1130 # endif /* __POWERPC__ */
1131 # endif /* __MWERKS__ */
1132 # endif /* !THINK_C */
1134 # endif /* MACOS */
1136 /* Dynamic libraries are added at every collection, since they may */
1137 /* change. */
1140 # endif /* ! AMIGA */
1141 # endif /* ! MSWIN32 && ! MSWINCE*/
1142 # endif /* ! OS2 */
1145 * Auxiliary routines for obtaining memory from OS.
1148 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1149 && !defined(MSWIN32) && !defined(MSWINCE) \
1150 && !defined(MACOS) && !defined(DOS4GW)
1152 # ifdef SUNOS4
1153 extern caddr_t sbrk();
1154 # endif
1155 # ifdef __STDC__
1156 # define SBRK_ARG_T ptrdiff_t
1157 # else
1158 # define SBRK_ARG_T int
1159 # endif
1162 # ifdef RS6000
1163 /* The compiler seems to generate speculative reads one past the end of */
1164 /* an allocated object. Hence we need to make sure that the page */
1165 /* following the last heap page is also mapped. */
1166 ptr_t GC_unix_get_mem(bytes)
1167 word bytes;
1169 caddr_t cur_brk = (caddr_t)sbrk(0);
1170 caddr_t result;
1171 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1172 static caddr_t my_brk_val = 0;
1174 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1175 if (lsbs != 0) {
1176 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1178 if (cur_brk == my_brk_val) {
1179 /* Use the extra block we allocated last time. */
1180 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1181 if (result == (caddr_t)(-1)) return(0);
1182 result -= GC_page_size;
1183 } else {
1184 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1185 if (result == (caddr_t)(-1)) return(0);
1187 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1188 return((ptr_t)result);
1191 #else /* Not RS6000 */
1193 #if defined(USE_MMAP)
1194 /* Tested only under Linux, IRIX5 and Solaris 2 */
1196 #ifdef USE_MMAP_FIXED
1197 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1198 /* Seems to yield better performance on Solaris 2, but can */
1199 /* be unreliable if something is already mapped at the address. */
1200 #else
1201 # define GC_MMAP_FLAGS MAP_PRIVATE
1202 #endif
1204 #ifndef HEAP_START
1205 # define HEAP_START 0
1206 #endif
1208 ptr_t GC_unix_get_mem(bytes)
1209 word bytes;
1211 static GC_bool initialized = FALSE;
1212 static int fd;
1213 void *result;
1214 static ptr_t last_addr = HEAP_START;
1216 if (!initialized) {
1217 fd = open("/dev/zero", O_RDONLY);
1218 initialized = TRUE;
1220 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1221 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1222 GC_MMAP_FLAGS, fd, 0/* offset */);
1223 if (result == MAP_FAILED) return(0);
1224 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1225 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1226 # if !defined(LINUX)
1227 if (last_addr == 0) {
1228 /* Oops. We got the end of the address space. This isn't */
1229 /* usable by arbitrary C code, since one-past-end pointers */
1230 /* don't work, so we discard it and try again. */
1231 munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1232 /* Leave last page mapped, so we can't repeat. */
1233 return GC_unix_get_mem(bytes);
1235 # else
1236 GC_ASSERT(last_addr != 0);
1237 # endif
1238 return((ptr_t)result);
1241 #else /* Not RS6000, not USE_MMAP */
1242 ptr_t GC_unix_get_mem(bytes)
1243 word bytes;
1245 ptr_t result;
1246 # ifdef IRIX5
1247 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1248 /* The equivalent may be needed on other systems as well. */
1249 __LOCK_MALLOC();
1250 # endif
1252 ptr_t cur_brk = (ptr_t)sbrk(0);
1253 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1255 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1256 if (lsbs != 0) {
1257 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1259 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1260 if (result == (ptr_t)(-1)) result = 0;
1262 # ifdef IRIX5
1263 __UNLOCK_MALLOC();
1264 # endif
1265 return(result);
1268 #endif /* Not USE_MMAP */
1269 #endif /* Not RS6000 */
1271 # endif /* UN*X */
1273 # ifdef OS2
1275 void * os2_alloc(size_t bytes)
1277 void * result;
1279 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1280 PAG_WRITE | PAG_COMMIT)
1281 != NO_ERROR) {
1282 return(0);
1284 if (result == 0) return(os2_alloc(bytes));
1285 return(result);
1288 # endif /* OS2 */
1291 # if defined(MSWIN32) || defined(MSWINCE)
1292 SYSTEM_INFO GC_sysinfo;
1293 # endif
1296 # ifdef MSWIN32
1297 word GC_n_heap_bases = 0;
1299 ptr_t GC_win32_get_mem(bytes)
1300 word bytes;
1302 ptr_t result;
1304 if (GC_win32s) {
1305 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1306 /* There are also unconfirmed rumors of other */
1307 /* problems, so we dodge the issue. */
1308 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1309 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1310 } else {
1311 result = (ptr_t) VirtualAlloc(NULL, bytes,
1312 MEM_COMMIT | MEM_RESERVE,
1313 PAGE_EXECUTE_READWRITE);
1315 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1316 /* If I read the documentation correctly, this can */
1317 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1318 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1319 GC_heap_bases[GC_n_heap_bases++] = result;
1320 return(result);
1323 void GC_win32_free_heap ()
1325 if (GC_win32s) {
1326 while (GC_n_heap_bases > 0) {
1327 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1328 GC_heap_bases[GC_n_heap_bases] = 0;
1332 # endif
1334 #ifdef AMIGA
1335 # define GC_AMIGA_AM
1336 # include "AmigaOS.c"
1337 # undef GC_AMIGA_AM
1338 #endif
1341 # ifdef MSWINCE
1342 word GC_n_heap_bases = 0;
1344 ptr_t GC_wince_get_mem(bytes)
1345 word bytes;
1347 ptr_t result;
1348 word i;
1350 /* Round up allocation size to multiple of page size */
1351 bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1353 /* Try to find reserved, uncommitted pages */
1354 for (i = 0; i < GC_n_heap_bases; i++) {
1355 if (((word)(-(signed_word)GC_heap_lengths[i])
1356 & (GC_sysinfo.dwAllocationGranularity-1))
1357 >= bytes) {
1358 result = GC_heap_bases[i] + GC_heap_lengths[i];
1359 break;
1363 if (i == GC_n_heap_bases) {
1364 /* Reserve more pages */
1365 word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1366 & ~(GC_sysinfo.dwAllocationGranularity-1);
1367 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1368 MEM_RESERVE | MEM_TOP_DOWN,
1369 PAGE_EXECUTE_READWRITE);
1370 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1371 /* If I read the documentation correctly, this can */
1372 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1373 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1374 GC_heap_bases[GC_n_heap_bases] = result;
1375 GC_heap_lengths[GC_n_heap_bases] = 0;
1376 GC_n_heap_bases++;
1379 /* Commit pages */
1380 result = (ptr_t) VirtualAlloc(result, bytes,
1381 MEM_COMMIT,
1382 PAGE_EXECUTE_READWRITE);
1383 if (result != NULL) {
1384 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1385 GC_heap_lengths[i] += bytes;
1388 return(result);
1390 # endif
1392 #ifdef USE_MUNMAP
1394 /* For now, this only works on Win32/WinCE and some Unix-like */
1395 /* systems. If you have something else, don't define */
1396 /* USE_MUNMAP. */
1397 /* We assume ANSI C to support this feature. */
1399 #if !defined(MSWIN32) && !defined(MSWINCE)
1401 #include <unistd.h>
1402 #include <sys/mman.h>
1403 #include <sys/stat.h>
1404 #include <sys/types.h>
1406 #endif
1408 /* Compute a page aligned starting address for the unmap */
1409 /* operation on a block of size bytes starting at start. */
1410 /* Return 0 if the block is too small to make this feasible. */
1411 ptr_t GC_unmap_start(ptr_t start, word bytes)
1413 ptr_t result = start;
1414 /* Round start to next page boundary. */
1415 result += GC_page_size - 1;
1416 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1417 if (result + GC_page_size > start + bytes) return 0;
1418 return result;
1421 /* Compute end address for an unmap operation on the indicated */
1422 /* block. */
1423 ptr_t GC_unmap_end(ptr_t start, word bytes)
1425 ptr_t end_addr = start + bytes;
1426 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1427 return end_addr;
1430 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1431 /* memory using VirtualAlloc and VirtualFree. These functions */
1432 /* work on individual allocations of virtual memory, made */
1433 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1434 /* The ranges we need to (de)commit may span several of these */
1435 /* allocations; therefore we use VirtualQuery to check */
1436 /* allocation lengths, and split up the range as necessary. */
1438 /* We assume that GC_remap is called on exactly the same range */
1439 /* as a previous call to GC_unmap. It is safe to consistently */
1440 /* round the endpoints in both places. */
1441 void GC_unmap(ptr_t start, word bytes)
1443 ptr_t start_addr = GC_unmap_start(start, bytes);
1444 ptr_t end_addr = GC_unmap_end(start, bytes);
1445 word len = end_addr - start_addr;
1446 if (0 == start_addr) return;
1447 # if defined(MSWIN32) || defined(MSWINCE)
1448 while (len != 0) {
1449 MEMORY_BASIC_INFORMATION mem_info;
1450 GC_word free_len;
1451 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1452 != sizeof(mem_info))
1453 ABORT("Weird VirtualQuery result");
1454 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1455 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1456 ABORT("VirtualFree failed");
1457 GC_unmapped_bytes += free_len;
1458 start_addr += free_len;
1459 len -= free_len;
1461 # else
1462 if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1463 GC_unmapped_bytes += len;
1464 # endif
1468 void GC_remap(ptr_t start, word bytes)
1470 static int zero_descr = -1;
1471 ptr_t start_addr = GC_unmap_start(start, bytes);
1472 ptr_t end_addr = GC_unmap_end(start, bytes);
1473 word len = end_addr - start_addr;
1474 ptr_t result;
1476 # if defined(MSWIN32) || defined(MSWINCE)
1477 if (0 == start_addr) return;
1478 while (len != 0) {
1479 MEMORY_BASIC_INFORMATION mem_info;
1480 GC_word alloc_len;
1481 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1482 != sizeof(mem_info))
1483 ABORT("Weird VirtualQuery result");
1484 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1485 result = VirtualAlloc(start_addr, alloc_len,
1486 MEM_COMMIT,
1487 PAGE_EXECUTE_READWRITE);
1488 if (result != start_addr) {
1489 ABORT("VirtualAlloc remapping failed");
1491 GC_unmapped_bytes -= alloc_len;
1492 start_addr += alloc_len;
1493 len -= alloc_len;
1495 # else
1496 if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1497 if (0 == start_addr) return;
1498 result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1499 MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1500 if (result != start_addr) {
1501 ABORT("mmap remapping failed");
1503 GC_unmapped_bytes -= len;
1504 # endif
1507 /* Two adjacent blocks have already been unmapped and are about to */
1508 /* be merged. Unmap the whole block. This typically requires */
1509 /* that we unmap a small section in the middle that was not previously */
1510 /* unmapped due to alignment constraints. */
1511 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1513 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1514 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1515 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1516 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1517 ptr_t start_addr = end1_addr;
1518 ptr_t end_addr = start2_addr;
1519 word len;
1520 GC_ASSERT(start1 + bytes1 == start2);
1521 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1522 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1523 if (0 == start_addr) return;
1524 len = end_addr - start_addr;
1525 # if defined(MSWIN32) || defined(MSWINCE)
1526 while (len != 0) {
1527 MEMORY_BASIC_INFORMATION mem_info;
1528 GC_word free_len;
1529 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1530 != sizeof(mem_info))
1531 ABORT("Weird VirtualQuery result");
1532 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1533 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1534 ABORT("VirtualFree failed");
1535 GC_unmapped_bytes += free_len;
1536 start_addr += free_len;
1537 len -= free_len;
1539 # else
1540 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1541 GC_unmapped_bytes += len;
1542 # endif
1545 #endif /* USE_MUNMAP */
1547 /* Routine for pushing any additional roots. In THREADS */
1548 /* environment, this is also responsible for marking from */
1549 /* thread stacks. */
1550 #ifndef THREADS
1551 void (*GC_push_other_roots)() = 0;
1552 #else /* THREADS */
1554 # ifdef PCR
1555 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1557 struct PCR_ThCtl_TInfoRep info;
1558 PCR_ERes result;
1560 info.ti_stkLow = info.ti_stkHi = 0;
1561 result = PCR_ThCtl_GetInfo(t, &info);
1562 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1563 return(result);
1566 /* Push the contents of an old object. We treat this as stack */
1567 /* data only becasue that makes it robust against mark stack */
1568 /* overflow. */
1569 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1571 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1572 return(PCR_ERes_okay);
1576 void GC_default_push_other_roots GC_PROTO((void))
1578 /* Traverse data allocated by previous memory managers. */
1580 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1582 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1583 GC_push_old_obj, 0)
1584 != PCR_ERes_okay) {
1585 ABORT("Old object enumeration failed");
1588 /* Traverse all thread stacks. */
1589 if (PCR_ERes_IsErr(
1590 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1591 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1592 ABORT("Thread stack marking failed\n");
1596 # endif /* PCR */
1598 # ifdef SRC_M3
1600 # ifdef ALL_INTERIOR_POINTERS
1601 --> misconfigured
1602 # endif
1604 void GC_push_thread_structures GC_PROTO((void))
1606 /* Not our responsibibility. */
1609 extern void ThreadF__ProcessStacks();
1611 void GC_push_thread_stack(start, stop)
1612 word start, stop;
1614 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1617 /* Push routine with M3 specific calling convention. */
1618 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1619 word *p;
1620 ptr_t dummy1, dummy2;
1621 int dummy3;
1623 word q = *p;
1625 GC_PUSH_ONE_STACK(q, p);
1628 /* M3 set equivalent to RTHeap.TracedRefTypes */
1629 typedef struct { int elts[1]; } RefTypeSet;
1630 RefTypeSet GC_TracedRefTypes = {{0x1}};
1632 void GC_default_push_other_roots GC_PROTO((void))
1634 /* Use the M3 provided routine for finding static roots. */
1635 /* This is a bit dubious, since it presumes no C roots. */
1636 /* We handle the collector roots explicitly in GC_push_roots */
1637 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1638 if (GC_words_allocd > 0) {
1639 ThreadF__ProcessStacks(GC_push_thread_stack);
1641 /* Otherwise this isn't absolutely necessary, and we have */
1642 /* startup ordering problems. */
1645 # endif /* SRC_M3 */
1647 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
1648 || defined(IRIX_THREADS) || defined(LINUX_THREADS) \
1649 || defined(HPUX_THREADS)
1651 extern void GC_push_all_stacks();
1653 void GC_default_push_other_roots GC_PROTO((void))
1655 GC_push_all_stacks();
1658 # endif /* SOLARIS_THREADS || ... */
1660 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
1662 #endif
1665 * Routines for accessing dirty bits on virtual pages.
1666 * We plan to eventually implement four strategies for doing so:
1667 * DEFAULT_VDB: A simple dummy implementation that treats every page
1668 * as possibly dirty. This makes incremental collection
1669 * useless, but the implementation is still correct.
1670 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1671 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1672 * works under some SVR4 variants. Even then, it may be
1673 * too slow to be entirely satisfactory. Requires reading
1674 * dirty bits for entire address space. Implementations tend
1675 * to assume that the client is a (slow) debugger.
1676 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1677 * dirtied pages. The implementation (and implementability)
1678 * is highly system dependent. This usually fails when system
1679 * calls write to a protected page. We prevent the read system
1680 * call from doing so. It is the clients responsibility to
1681 * make sure that other system calls are similarly protected
1682 * or write only to the stack.
1685 GC_bool GC_dirty_maintained = FALSE;
1687 # ifdef DEFAULT_VDB
1689 /* All of the following assume the allocation lock is held, and */
1690 /* signals are disabled. */
1692 /* The client asserts that unallocated pages in the heap are never */
1693 /* written. */
1695 /* Initialize virtual dirty bit implementation. */
1696 void GC_dirty_init()
1698 GC_dirty_maintained = TRUE;
1701 /* Retrieve system dirty bits for heap to a local buffer. */
1702 /* Restore the systems notion of which pages are dirty. */
1703 void GC_read_dirty()
1706 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1707 /* If the actual page size is different, this returns TRUE if any */
1708 /* of the pages overlapping h are dirty. This routine may err on the */
1709 /* side of labelling pages as dirty (and this implementation does). */
1710 /*ARGSUSED*/
1711 GC_bool GC_page_was_dirty(h)
1712 struct hblk *h;
1714 return(TRUE);
1718 * The following two routines are typically less crucial. They matter
1719 * most with large dynamic libraries, or if we can't accurately identify
1720 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1721 * versions are adequate.
1724 /* Could any valid GC heap pointer ever have been written to this page? */
1725 /*ARGSUSED*/
1726 GC_bool GC_page_was_ever_dirty(h)
1727 struct hblk *h;
1729 return(TRUE);
1732 /* Reset the n pages starting at h to "was never dirty" status. */
1733 void GC_is_fresh(h, n)
1734 struct hblk *h;
1735 word n;
1739 /* A call hints that h is about to be written. */
1740 /* May speed up some dirty bit implementations. */
1741 /*ARGSUSED*/
1742 void GC_write_hint(h)
1743 struct hblk *h;
1747 # endif /* DEFAULT_VDB */
1750 # ifdef MPROTECT_VDB
1753 * See DEFAULT_VDB for interface descriptions.
1757 * This implementation maintains dirty bits itself by catching write
1758 * faults and keeping track of them. We assume nobody else catches
1759 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1760 * except as a result of a read system call. This means clients must
1761 * either ensure that system calls do not touch the heap, or must
1762 * provide their own wrappers analogous to the one for read.
1763 * We assume the page size is a multiple of HBLKSIZE.
1764 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1765 * tried to use portable code where easily possible. It is known
1766 * not to work under a number of other systems.
1769 # if !defined(MSWIN32) && !defined(MSWINCE)
1771 # include <sys/mman.h>
1772 # include <signal.h>
1773 # include <sys/syscall.h>
1775 # define PROTECT(addr, len) \
1776 if (mprotect((caddr_t)(addr), (size_t)(len), \
1777 PROT_READ | OPT_PROT_EXEC) < 0) { \
1778 ABORT("mprotect failed"); \
1780 # define UNPROTECT(addr, len) \
1781 if (mprotect((caddr_t)(addr), (size_t)(len), \
1782 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1783 ABORT("un-mprotect failed"); \
1786 # else
1788 # ifndef MSWINCE
1789 # include <signal.h>
1790 # endif
1792 static DWORD protect_junk;
1793 # define PROTECT(addr, len) \
1794 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1795 &protect_junk)) { \
1796 DWORD last_error = GetLastError(); \
1797 GC_printf1("Last error code: %lx\n", last_error); \
1798 ABORT("VirtualProtect failed"); \
1800 # define UNPROTECT(addr, len) \
1801 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1802 &protect_junk)) { \
1803 ABORT("un-VirtualProtect failed"); \
1806 # endif
1808 #if defined(SUNOS4) || defined(FREEBSD)
1809 typedef void (* SIG_PF)();
1810 #endif
1811 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) || defined(MACOSX)
1812 # ifdef __STDC__
1813 typedef void (* SIG_PF)(int);
1814 # else
1815 typedef void (* SIG_PF)();
1816 # endif
1817 #endif
1818 #if defined(MSWIN32)
1819 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1820 # undef SIG_DFL
1821 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1822 #endif
1823 #if defined(MSWINCE)
1824 typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
1825 # undef SIG_DFL
1826 # define SIG_DFL (SIG_PF) (-1)
1827 #endif
1829 #if defined(IRIX5) || defined(OSF1)
1830 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1831 #endif
1832 #if defined(SUNOS5SIGS)
1833 # ifdef HPUX
1834 # define SIGINFO __siginfo
1835 # else
1836 # define SIGINFO siginfo
1837 # endif
1838 # ifdef __STDC__
1839 typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1840 # else
1841 typedef void (* REAL_SIG_PF)();
1842 # endif
1843 #endif
1844 #if defined(LINUX)
1845 # include <linux/version.h>
1846 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1847 typedef struct sigcontext s_c;
1848 # else
1849 typedef struct sigcontext_struct s_c;
1850 # endif
1851 # if defined(ALPHA) || defined(M68K)
1852 typedef void (* REAL_SIG_PF)(int, int, s_c *);
1853 # else
1854 # if defined(IA64) || defined(HP_PA)
1855 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1856 # else
1857 typedef void (* REAL_SIG_PF)(int, s_c);
1858 # endif
1859 # endif
1860 # ifdef ALPHA
1861 /* Retrieve fault address from sigcontext structure by decoding */
1862 /* instruction. */
1863 char * get_fault_addr(s_c *sc) {
1864 unsigned instr;
1865 word faultaddr;
1867 instr = *((unsigned *)(sc->sc_pc));
1868 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1869 faultaddr += (word) (((int)instr << 16) >> 16);
1870 return (char *)faultaddr;
1872 # endif /* !ALPHA */
1873 # endif
1875 # if defined(MACOSX) /* Should also test for PowerPC? */
1876 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1878 /* Decodes the machine instruction which was responsible for the sending of the
1879 SIGBUS signal. Sadly this is the only way to find the faulting address because
1880 the signal handler doesn't get it directly from the kernel (although it is
1881 available on the Mach level, but droppped by the BSD personality before it
1882 calls our signal handler...)
1883 This code should be able to deal correctly with all PPCs starting from the
1884 601 up to and including the G4s (including Velocity Engine). */
1885 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1886 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1887 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1888 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1889 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1890 #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1892 static char *get_fault_addr(struct sigcontext *scp)
1894 unsigned int instr = *((unsigned int *) scp->sc_ir);
1895 unsigned int * regs = &((unsigned int *) scp->sc_regs)[2];
1896 int disp = 0, tmp;
1897 unsigned int baseA = 0, baseB = 0;
1898 unsigned int addr, alignmask = 0xFFFFFFFF;
1900 #ifdef GC_DEBUG_DECODER
1901 GC_err_printf1("Instruction: 0x%lx\n", instr);
1902 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr));
1903 #endif
1904 switch(EXTRACT_OP1(instr)) {
1905 case 38: /* stb */
1906 case 39: /* stbu */
1907 case 54: /* stfd */
1908 case 55: /* stfdu */
1909 case 52: /* stfs */
1910 case 53: /* stfsu */
1911 case 44: /* sth */
1912 case 45: /* sthu */
1913 case 47: /* stmw */
1914 case 36: /* stw */
1915 case 37: /* stwu */
1916 tmp = EXTRACT_REGA(instr);
1917 if(tmp > 0)
1918 baseA = regs[tmp];
1919 disp = EXTRACT_DISP(instr);
1920 break;
1921 case 31:
1922 #ifdef GC_DEBUG_DECODER
1923 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr));
1924 #endif
1925 switch(EXTRACT_OP2(instr)) {
1926 case 86: /* dcbf */
1927 case 54: /* dcbst */
1928 case 1014: /* dcbz */
1929 case 247: /* stbux */
1930 case 215: /* stbx */
1931 case 759: /* stfdux */
1932 case 727: /* stfdx */
1933 case 983: /* stfiwx */
1934 case 695: /* stfsux */
1935 case 663: /* stfsx */
1936 case 918: /* sthbrx */
1937 case 439: /* sthux */
1938 case 407: /* sthx */
1939 case 661: /* stswx */
1940 case 662: /* stwbrx */
1941 case 150: /* stwcx. */
1942 case 183: /* stwux */
1943 case 151: /* stwx */
1944 case 135: /* stvebx */
1945 case 167: /* stvehx */
1946 case 199: /* stvewx */
1947 case 231: /* stvx */
1948 case 487: /* stvxl */
1949 tmp = EXTRACT_REGA(instr);
1950 if(tmp > 0)
1951 baseA = regs[tmp];
1952 baseB = regs[EXTRACT_REGC(instr)];
1953 /* determine Altivec alignment mask */
1954 switch(EXTRACT_OP2(instr)) {
1955 case 167: /* stvehx */
1956 alignmask = 0xFFFFFFFE;
1957 break;
1958 case 199: /* stvewx */
1959 alignmask = 0xFFFFFFFC;
1960 break;
1961 case 231: /* stvx */
1962 alignmask = 0xFFFFFFF0;
1963 break;
1964 case 487: /* stvxl */
1965 alignmask = 0xFFFFFFF0;
1966 break;
1968 break;
1969 case 725: /* stswi */
1970 tmp = EXTRACT_REGA(instr);
1971 if(tmp > 0)
1972 baseA = regs[tmp];
1973 break;
1974 default: /* ignore instruction */
1975 #ifdef GC_DEBUG_DECODER
1976 GC_err_printf("Ignored by inner handler\n");
1977 #endif
1978 return NULL;
1979 break;
1981 break;
1982 default: /* ignore instruction */
1983 #ifdef GC_DEBUG_DECODER
1984 GC_err_printf("Ignored by main handler\n");
1985 #endif
1986 return NULL;
1987 break;
1990 addr = (baseA + baseB) + disp;
1991 addr &= alignmask;
1992 #ifdef GC_DEBUG_DECODER
1993 GC_err_printf1("BaseA: %d\n", baseA);
1994 GC_err_printf1("BaseB: %d\n", baseB);
1995 GC_err_printf1("Disp: %d\n", disp);
1996 GC_err_printf1("Address: %d\n", addr);
1997 #endif
1998 return (char *)addr;
2000 #endif /* MACOSX */
2002 SIG_PF GC_old_bus_handler;
2003 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2005 #ifdef THREADS
2006 /* We need to lock around the bitmap update in the write fault handler */
2007 /* in order to avoid the risk of losing a bit. We do this with a */
2008 /* test-and-set spin lock if we know how to do that. Otherwise we */
2009 /* check whether we are already in the handler and use the dumb but */
2010 /* safe fallback algorithm of setting all bits in the word. */
2011 /* Contention should be very rare, so we do the minimum to handle it */
2012 /* correctly. */
2013 #ifdef GC_TEST_AND_SET_DEFINED
2014 static VOLATILE unsigned int fault_handler_lock = 0;
2015 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2016 while (GC_test_and_set(&fault_handler_lock));
2017 /* Could also revert to set_pht_entry_from_index_safe if initial */
2018 /* GC_test_and_set fails. */
2019 set_pht_entry_from_index(db, index);
2020 GC_clear(&fault_handler_lock);
2022 #else /* !GC_TEST_AND_SET_DEFINED */
2023 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2024 /* just before we notice the conflict and correct it. We may end up */
2025 /* looking at it while it's wrong. But this requires contention */
2026 /* exactly when a GC is triggered, which seems far less likely to */
2027 /* fail than the old code, which had no reported failures. Thus we */
2028 /* leave it this way while we think of something better, or support */
2029 /* GC_test_and_set on the remaining platforms. */
2030 static VOLATILE word currently_updating = 0;
2031 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2032 unsigned int update_dummy;
2033 currently_updating = (word)(&update_dummy);
2034 set_pht_entry_from_index(db, index);
2035 /* If we get contention in the 10 or so instruction window here, */
2036 /* and we get stopped by a GC between the two updates, we lose! */
2037 if (currently_updating != (word)(&update_dummy)) {
2038 set_pht_entry_from_index_safe(db, index);
2039 /* We claim that if two threads concurrently try to update the */
2040 /* dirty bit vector, the first one to execute UPDATE_START */
2041 /* will see it changed when UPDATE_END is executed. (Note that */
2042 /* &update_dummy must differ in two distinct threads.) It */
2043 /* will then execute set_pht_entry_from_index_safe, thus */
2044 /* returning us to a safe state, though not soon enough. */
2047 #endif /* !GC_TEST_AND_SET_DEFINED */
2048 #else /* !THREADS */
2049 # define async_set_pht_entry_from_index(db, index) \
2050 set_pht_entry_from_index(db, index)
2051 #endif /* !THREADS */
2053 /*ARGSUSED*/
2054 # if defined (SUNOS4) || defined(FREEBSD)
2055 void GC_write_fault_handler(sig, code, scp, addr)
2056 int sig, code;
2057 struct sigcontext *scp;
2058 char * addr;
2059 # ifdef SUNOS4
2060 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2061 # define CODE_OK (FC_CODE(code) == FC_PROT \
2062 || (FC_CODE(code) == FC_OBJERR \
2063 && FC_ERRNO(code) == FC_PROT))
2064 # endif
2065 # ifdef FREEBSD
2066 # define SIG_OK (sig == SIGBUS)
2067 # define CODE_OK (code == BUS_PAGE_FAULT)
2068 # endif
2069 # endif
2070 # if defined(IRIX5) || defined(OSF1)
2071 # include <errno.h>
2072 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2073 # define SIG_OK (sig == SIGSEGV)
2074 # ifdef OSF1
2075 # define CODE_OK (code == 2 /* experimentally determined */)
2076 # endif
2077 # ifdef IRIX5
2078 # define CODE_OK (code == EACCES)
2079 # endif
2080 # endif
2081 # if defined(LINUX)
2082 # if defined(ALPHA) || defined(M68K)
2083 void GC_write_fault_handler(int sig, int code, s_c * sc)
2084 # else
2085 # if defined(IA64) || defined(HP_PA)
2086 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2087 # else
2088 void GC_write_fault_handler(int sig, s_c sc)
2089 # endif
2090 # endif
2091 # define SIG_OK (sig == SIGSEGV)
2092 # define CODE_OK TRUE
2093 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2094 /* Should probably consider alignment issues on other */
2095 /* architectures. */
2096 # endif
2097 # if defined(SUNOS5SIGS)
2098 # ifdef __STDC__
2099 void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
2100 # else
2101 void GC_write_fault_handler(sig, scp, context)
2102 int sig;
2103 struct SIGINFO *scp;
2104 void * context;
2105 # endif
2106 # ifdef HPUX
2107 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2108 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2109 || (scp -> si_code == BUS_ADRERR) \
2110 || (scp -> si_code == BUS_UNKNOWN) \
2111 || (scp -> si_code == SEGV_UNKNOWN) \
2112 || (scp -> si_code == BUS_OBJERR)
2113 # else
2114 # define SIG_OK (sig == SIGSEGV)
2115 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2116 # endif
2117 # endif
2119 # if defined(MACOSX)
2120 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2121 # define SIG_OK (sig == SIGBUS)
2122 # define CODE_OK (code == 0 /* experimentally determined */)
2123 # endif
2125 # if defined(MSWIN32) || defined(MSWINCE)
2126 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2127 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2128 STATUS_ACCESS_VIOLATION)
2129 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2130 /* Write fault */
2131 # endif
2133 register unsigned i;
2134 # ifdef IRIX5
2135 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2136 # endif
2137 # if defined(OSF1) && defined(ALPHA)
2138 char * addr = (char *) (scp -> sc_traparg_a0);
2139 # endif
2140 # ifdef SUNOS5SIGS
2141 char * addr = (char *) (scp -> si_addr);
2142 # endif
2143 # ifdef LINUX
2144 # ifdef I386
2145 char * addr = (char *) (sc.cr2);
2146 # else
2147 # if defined(M68K)
2148 char * addr = NULL;
2150 struct sigcontext *scp = (struct sigcontext *)(sc);
2152 int format = (scp->sc_formatvec >> 12) & 0xf;
2153 unsigned long *framedata = (unsigned long *)(scp + 1);
2154 unsigned long ea;
2156 if (format == 0xa || format == 0xb) {
2157 /* 68020/030 */
2158 ea = framedata[2];
2159 } else if (format == 7) {
2160 /* 68040 */
2161 ea = framedata[3];
2162 if (framedata[1] & 0x08000000) {
2163 /* correct addr on misaligned access */
2164 ea = (ea+4095)&(~4095);
2166 } else if (format == 4) {
2167 /* 68060 */
2168 ea = framedata[0];
2169 if (framedata[1] & 0x08000000) {
2170 /* correct addr on misaligned access */
2171 ea = (ea+4095)&(~4095);
2174 addr = (char *)ea;
2175 # else
2176 # ifdef ALPHA
2177 char * addr = get_fault_addr(sc);
2178 # else
2179 # if defined(IA64) || defined(HP_PA)
2180 char * addr = si -> si_addr;
2181 /* I believe this is claimed to work on all platforms for */
2182 /* Linux 2.3.47 and later. Hopefully we don't have to */
2183 /* worry about earlier kernels on IA64. */
2184 # else
2185 # if defined(POWERPC)
2186 char * addr = (char *) (sc.regs->dar);
2187 # else
2188 --> architecture not supported
2189 # endif
2190 # endif
2191 # endif
2192 # endif
2193 # endif
2194 # endif
2195 # if defined(MACOSX)
2196 char * addr = get_fault_addr(scp);
2197 # endif
2198 # if defined(MSWIN32) || defined(MSWINCE)
2199 char * addr = (char *) (exc_info -> ExceptionRecord
2200 -> ExceptionInformation[1]);
2201 # define sig SIGSEGV
2202 # endif
2204 if (SIG_OK && CODE_OK) {
2205 register struct hblk * h =
2206 (struct hblk *)((word)addr & ~(GC_page_size-1));
2207 GC_bool in_allocd_block;
2209 # ifdef SUNOS5SIGS
2210 /* Address is only within the correct physical page. */
2211 in_allocd_block = FALSE;
2212 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2213 if (HDR(h+i) != 0) {
2214 in_allocd_block = TRUE;
2217 # else
2218 in_allocd_block = (HDR(addr) != 0);
2219 # endif
2220 if (!in_allocd_block) {
2221 /* Heap blocks now begin and end on page boundaries */
2222 SIG_PF old_handler;
2224 if (sig == SIGSEGV) {
2225 old_handler = GC_old_segv_handler;
2226 } else {
2227 old_handler = GC_old_bus_handler;
2229 if (old_handler == SIG_DFL) {
2230 # if !defined(MSWIN32) && !defined(MSWINCE)
2231 GC_err_printf1("Segfault at 0x%lx\n", addr);
2232 ABORT("Unexpected bus error or segmentation fault");
2233 # else
2234 return(EXCEPTION_CONTINUE_SEARCH);
2235 # endif
2236 } else {
2237 # if defined (SUNOS4) || defined(FREEBSD)
2238 (*old_handler) (sig, code, scp, addr);
2239 return;
2240 # endif
2241 # if defined (SUNOS5SIGS)
2242 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2243 return;
2244 # endif
2245 # if defined (LINUX)
2246 # if defined(ALPHA) || defined(M68K)
2247 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2248 # else
2249 # if defined(IA64) || defined(HP_PA)
2250 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2251 # else
2252 (*(REAL_SIG_PF)old_handler) (sig, sc);
2253 # endif
2254 # endif
2255 return;
2256 # endif
2257 # if defined (IRIX5) || defined(OSF1)
2258 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2259 return;
2260 # endif
2261 # ifdef MACOSX
2262 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2263 # endif
2264 # ifdef MSWIN32
2265 return((*old_handler)(exc_info));
2266 # endif
2269 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2270 register int index = PHT_HASH(h+i);
2272 async_set_pht_entry_from_index(GC_dirty_pages, index);
2274 UNPROTECT(h, GC_page_size);
2275 # if defined(OSF1) || defined(LINUX)
2276 /* These reset the signal handler each time by default. */
2277 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2278 # endif
2279 /* The write may not take place before dirty bits are read. */
2280 /* But then we'll fault again ... */
2281 # if defined(MSWIN32) || defined(MSWINCE)
2282 return(EXCEPTION_CONTINUE_EXECUTION);
2283 # else
2284 return;
2285 # endif
2287 #if defined(MSWIN32) || defined(MSWINCE)
2288 return EXCEPTION_CONTINUE_SEARCH;
2289 #else
2290 GC_err_printf1("Segfault at 0x%lx\n", addr);
2291 ABORT("Unexpected bus error or segmentation fault");
2292 #endif
2296 * We hold the allocation lock. We expect block h to be written
2297 * shortly.
2299 void GC_write_hint(h)
2300 struct hblk *h;
2302 register struct hblk * h_trunc;
2303 register unsigned i;
2304 register GC_bool found_clean;
2306 if (!GC_dirty_maintained) return;
2307 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
2308 found_clean = FALSE;
2309 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2310 register int index = PHT_HASH(h_trunc+i);
2312 if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
2313 found_clean = TRUE;
2314 async_set_pht_entry_from_index(GC_dirty_pages, index);
2317 if (found_clean) {
2318 UNPROTECT(h_trunc, GC_page_size);
2322 void GC_dirty_init()
2324 # if defined(SUNOS5SIGS) || defined(IRIX5) /* || defined(OSF1) */
2325 struct sigaction act, oldact;
2326 # ifdef IRIX5
2327 act.sa_flags = SA_RESTART;
2328 act.sa_handler = GC_write_fault_handler;
2329 # else
2330 act.sa_flags = SA_RESTART | SA_SIGINFO;
2331 act.sa_sigaction = GC_write_fault_handler;
2332 # endif
2333 (void)sigemptyset(&act.sa_mask);
2334 # endif
2335 # if defined(MACOSX)
2336 struct sigaction act, oldact;
2338 act.sa_flags = SA_RESTART;
2339 act.sa_handler = GC_write_fault_handler;
2340 sigemptyset(&act.sa_mask);
2341 # endif
2342 # ifdef PRINTSTATS
2343 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2344 # endif
2345 GC_dirty_maintained = TRUE;
2346 if (GC_page_size % HBLKSIZE != 0) {
2347 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2348 ABORT("Page size not multiple of HBLKSIZE");
2350 # if defined(SUNOS4) || defined(FREEBSD)
2351 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2352 if (GC_old_bus_handler == SIG_IGN) {
2353 GC_err_printf0("Previously ignored bus error!?");
2354 GC_old_bus_handler = SIG_DFL;
2356 if (GC_old_bus_handler != SIG_DFL) {
2357 # ifdef PRINTSTATS
2358 GC_err_printf0("Replaced other SIGBUS handler\n");
2359 # endif
2361 # endif
2362 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
2363 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2364 if (GC_old_segv_handler == SIG_IGN) {
2365 GC_err_printf0("Previously ignored segmentation violation!?");
2366 GC_old_segv_handler = SIG_DFL;
2368 if (GC_old_segv_handler != SIG_DFL) {
2369 # ifdef PRINTSTATS
2370 GC_err_printf0("Replaced other SIGSEGV handler\n");
2371 # endif
2373 # endif
2374 # if defined(SUNOS5SIGS) || defined(IRIX5)
2375 # if defined(IRIX_THREADS)
2376 sigaction(SIGSEGV, 0, &oldact);
2377 sigaction(SIGSEGV, &act, 0);
2378 # else
2379 sigaction(SIGSEGV, &act, &oldact);
2380 # endif
2381 # if defined(_sigargs)
2382 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2383 /* sa_sigaction. */
2384 GC_old_segv_handler = oldact.sa_handler;
2385 # else /* Irix 6.x or SUNOS5SIGS */
2386 if (oldact.sa_flags & SA_SIGINFO) {
2387 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2388 } else {
2389 GC_old_segv_handler = oldact.sa_handler;
2391 # endif
2392 if (GC_old_segv_handler == SIG_IGN) {
2393 GC_err_printf0("Previously ignored segmentation violation!?");
2394 GC_old_segv_handler = SIG_DFL;
2396 if (GC_old_segv_handler != SIG_DFL) {
2397 # ifdef PRINTSTATS
2398 GC_err_printf0("Replaced other SIGSEGV handler\n");
2399 # endif
2401 # endif
2402 # if defined(MACOSX) || defined(HPUX)
2403 sigaction(SIGBUS, &act, &oldact);
2404 GC_old_bus_handler = oldact.sa_handler;
2405 if (GC_old_bus_handler == SIG_IGN) {
2406 GC_err_printf0("Previously ignored bus error!?");
2407 GC_old_bus_handler = SIG_DFL;
2409 if (GC_old_bus_handler != SIG_DFL) {
2410 # ifdef PRINTSTATS
2411 GC_err_printf0("Replaced other SIGBUS handler\n");
2412 # endif
2414 # endif /* MACOS || HPUX */
2415 # if defined(MSWIN32)
2416 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2417 if (GC_old_segv_handler != NULL) {
2418 # ifdef PRINTSTATS
2419 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2420 # endif
2421 } else {
2422 GC_old_segv_handler = SIG_DFL;
2424 # endif
2429 void GC_protect_heap()
2431 ptr_t start;
2432 word len;
2433 unsigned i;
2435 for (i = 0; i < GC_n_heap_sects; i++) {
2436 start = GC_heap_sects[i].hs_start;
2437 len = GC_heap_sects[i].hs_bytes;
2438 PROTECT(start, len);
2442 /* We assume that either the world is stopped or its OK to lose dirty */
2443 /* bits while this is happenning (as in GC_enable_incremental). */
2444 void GC_read_dirty()
2446 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2447 (sizeof GC_dirty_pages));
2448 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2449 GC_protect_heap();
2452 GC_bool GC_page_was_dirty(h)
2453 struct hblk * h;
2455 register word index = PHT_HASH(h);
2457 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2461 * Acquiring the allocation lock here is dangerous, since this
2462 * can be called from within GC_call_with_alloc_lock, and the cord
2463 * package does so. On systems that allow nested lock acquisition, this
2464 * happens to work.
2465 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2468 static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
2470 void GC_begin_syscall()
2472 if (!I_HOLD_LOCK()) {
2473 LOCK();
2474 syscall_acquired_lock = TRUE;
2478 void GC_end_syscall()
2480 if (syscall_acquired_lock) {
2481 syscall_acquired_lock = FALSE;
2482 UNLOCK();
2486 void GC_unprotect_range(addr, len)
2487 ptr_t addr;
2488 word len;
2490 struct hblk * start_block;
2491 struct hblk * end_block;
2492 register struct hblk *h;
2493 ptr_t obj_start;
2495 if (!GC_incremental) return;
2496 obj_start = GC_base(addr);
2497 if (obj_start == 0) return;
2498 if (GC_base(addr + len - 1) != obj_start) {
2499 ABORT("GC_unprotect_range(range bigger than object)");
2501 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2502 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2503 end_block += GC_page_size/HBLKSIZE - 1;
2504 for (h = start_block; h <= end_block; h++) {
2505 register word index = PHT_HASH(h);
2507 async_set_pht_entry_from_index(GC_dirty_pages, index);
2509 UNPROTECT(start_block,
2510 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2513 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(LINUX_THREADS) \
2514 && !defined(GC_USE_LD_WRAP)
2515 /* Replacement for UNIX system call. */
2516 /* Other calls that write to the heap */
2517 /* should be handled similarly. */
2518 # if defined(__STDC__) && !defined(SUNOS4)
2519 # include <unistd.h>
2520 # include <sys/uio.h>
2521 ssize_t read(int fd, void *buf, size_t nbyte)
2522 # else
2523 # ifndef LINT
2524 int read(fd, buf, nbyte)
2525 # else
2526 int GC_read(fd, buf, nbyte)
2527 # endif
2528 int fd;
2529 char *buf;
2530 int nbyte;
2531 # endif
2533 int result;
2535 GC_begin_syscall();
2536 GC_unprotect_range(buf, (word)nbyte);
2537 # if defined(IRIX5) || defined(LINUX_THREADS)
2538 /* Indirect system call may not always be easily available. */
2539 /* We could call _read, but that would interfere with the */
2540 /* libpthread interception of read. */
2541 /* On Linux, we have to be careful with the linuxthreads */
2542 /* read interception. */
2544 struct iovec iov;
2546 iov.iov_base = buf;
2547 iov.iov_len = nbyte;
2548 result = readv(fd, &iov, 1);
2550 # else
2551 /* The two zero args at the end of this list are because one
2552 IA-64 syscall() implementation actually requires six args
2553 to be passed, even though they aren't always used. */
2554 result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
2555 # endif
2556 GC_end_syscall();
2557 return(result);
2559 #endif /* !MSWIN32 && !MSWINCE && !LINUX_THREADS */
2561 #ifdef GC_USE_LD_WRAP
2562 /* We use the GNU ld call wrapping facility. */
2563 /* This requires that the linker be invoked with "--wrap read". */
2564 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2565 /* I'm not sure that this actually wraps whatever version of read */
2566 /* is called by stdio. That code also mentions __read. */
2567 # include <unistd.h>
2568 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2570 int result;
2572 GC_begin_syscall();
2573 GC_unprotect_range(buf, (word)nbyte);
2574 result = __real_read(fd, buf, nbyte);
2575 GC_end_syscall();
2576 return(result);
2579 /* We should probably also do this for __read, or whatever stdio */
2580 /* actually calls. */
2581 #endif
2583 /*ARGSUSED*/
2584 GC_bool GC_page_was_ever_dirty(h)
2585 struct hblk *h;
2587 return(TRUE);
2590 /* Reset the n pages starting at h to "was never dirty" status. */
2591 /*ARGSUSED*/
2592 void GC_is_fresh(h, n)
2593 struct hblk *h;
2594 word n;
2598 # else /* !MPROTECT_VDB */
2600 # ifdef GC_USE_LD_WRAP
2601 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2602 { return __real_read(fd, buf, nbyte); }
2603 # endif
2605 # endif /* MPROTECT_VDB */
2607 # ifdef PROC_VDB
2610 * See DEFAULT_VDB for interface descriptions.
2614 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2615 * from which we can read page modified bits. This facility is far from
2616 * optimal (e.g. we would like to get the info for only some of the
2617 * address space), but it avoids intercepting system calls.
2620 #include <errno.h>
2621 #include <sys/types.h>
2622 #include <sys/signal.h>
2623 #include <sys/fault.h>
2624 #include <sys/syscall.h>
2625 #include <sys/procfs.h>
2626 #include <sys/stat.h>
2628 #define INITIAL_BUF_SZ 4096
2629 word GC_proc_buf_size = INITIAL_BUF_SZ;
2630 char *GC_proc_buf;
2632 #ifdef SOLARIS_THREADS
2633 /* We don't have exact sp values for threads. So we count on */
2634 /* occasionally declaring stack pages to be fresh. Thus we */
2635 /* need a real implementation of GC_is_fresh. We can't clear */
2636 /* entries in GC_written_pages, since that would declare all */
2637 /* pages with the given hash address to be fresh. */
2638 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2639 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2640 /* Collisions are dropped. */
2642 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2643 # define ADD_FRESH_PAGE(h) \
2644 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2645 # define PAGE_IS_FRESH(h) \
2646 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2647 #endif
2649 /* Add all pages in pht2 to pht1 */
2650 void GC_or_pages(pht1, pht2)
2651 page_hash_table pht1, pht2;
2653 register int i;
2655 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2658 int GC_proc_fd;
2660 void GC_dirty_init()
2662 int fd;
2663 char buf[30];
2665 GC_dirty_maintained = TRUE;
2666 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2667 register int i;
2669 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2670 # ifdef PRINTSTATS
2671 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2672 (unsigned long)
2673 (GC_words_allocd + GC_words_allocd_before_gc));
2674 # endif
2676 sprintf(buf, "/proc/%d", getpid());
2677 fd = open(buf, O_RDONLY);
2678 if (fd < 0) {
2679 ABORT("/proc open failed");
2681 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2682 close(fd);
2683 if (GC_proc_fd < 0) {
2684 ABORT("/proc ioctl failed");
2686 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2687 # ifdef SOLARIS_THREADS
2688 GC_fresh_pages = (struct hblk **)
2689 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2690 if (GC_fresh_pages == 0) {
2691 GC_err_printf0("No space for fresh pages\n");
2692 EXIT();
2694 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2695 # endif
2698 /* Ignore write hints. They don't help us here. */
2699 /*ARGSUSED*/
2700 void GC_write_hint(h)
2701 struct hblk *h;
2705 #ifdef SOLARIS_THREADS
2706 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2707 #else
2708 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2709 #endif
2711 void GC_read_dirty()
2713 unsigned long ps, np;
2714 int nmaps;
2715 ptr_t vaddr;
2716 struct prasmap * map;
2717 char * bufp;
2718 ptr_t current_addr, limit;
2719 int i;
2720 int dummy;
2722 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2724 bufp = GC_proc_buf;
2725 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2726 # ifdef PRINTSTATS
2727 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2728 GC_proc_buf_size);
2729 # endif
2731 /* Retry with larger buffer. */
2732 word new_size = 2 * GC_proc_buf_size;
2733 char * new_buf = GC_scratch_alloc(new_size);
2735 if (new_buf != 0) {
2736 GC_proc_buf = bufp = new_buf;
2737 GC_proc_buf_size = new_size;
2739 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2740 WARN("Insufficient space for /proc read\n", 0);
2741 /* Punt: */
2742 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2743 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2744 # ifdef SOLARIS_THREADS
2745 BZERO(GC_fresh_pages,
2746 MAX_FRESH_PAGES * sizeof (struct hblk *));
2747 # endif
2748 return;
2752 /* Copy dirty bits into GC_grungy_pages */
2753 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2754 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2755 nmaps, PG_REFERENCED, PG_MODIFIED); */
2756 bufp = bufp + sizeof(struct prpageheader);
2757 for (i = 0; i < nmaps; i++) {
2758 map = (struct prasmap *)bufp;
2759 vaddr = (ptr_t)(map -> pr_vaddr);
2760 ps = map -> pr_pagesize;
2761 np = map -> pr_npage;
2762 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2763 limit = vaddr + ps * np;
2764 bufp += sizeof (struct prasmap);
2765 for (current_addr = vaddr;
2766 current_addr < limit; current_addr += ps){
2767 if ((*bufp++) & PG_MODIFIED) {
2768 register struct hblk * h = (struct hblk *) current_addr;
2770 while ((ptr_t)h < current_addr + ps) {
2771 register word index = PHT_HASH(h);
2773 set_pht_entry_from_index(GC_grungy_pages, index);
2774 # ifdef SOLARIS_THREADS
2776 register int slot = FRESH_PAGE_SLOT(h);
2778 if (GC_fresh_pages[slot] == h) {
2779 GC_fresh_pages[slot] = 0;
2782 # endif
2783 h++;
2787 bufp += sizeof(long) - 1;
2788 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2790 /* Update GC_written_pages. */
2791 GC_or_pages(GC_written_pages, GC_grungy_pages);
2792 # ifdef SOLARIS_THREADS
2793 /* Make sure that old stacks are considered completely clean */
2794 /* unless written again. */
2795 GC_old_stacks_are_fresh();
2796 # endif
2799 #undef READ
2801 GC_bool GC_page_was_dirty(h)
2802 struct hblk *h;
2804 register word index = PHT_HASH(h);
2805 register GC_bool result;
2807 result = get_pht_entry_from_index(GC_grungy_pages, index);
2808 # ifdef SOLARIS_THREADS
2809 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2810 /* This happens only if page was declared fresh since */
2811 /* the read_dirty call, e.g. because it's in an unused */
2812 /* thread stack. It's OK to treat it as clean, in */
2813 /* that case. And it's consistent with */
2814 /* GC_page_was_ever_dirty. */
2815 # endif
2816 return(result);
2819 GC_bool GC_page_was_ever_dirty(h)
2820 struct hblk *h;
2822 register word index = PHT_HASH(h);
2823 register GC_bool result;
2825 result = get_pht_entry_from_index(GC_written_pages, index);
2826 # ifdef SOLARIS_THREADS
2827 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2828 # endif
2829 return(result);
2832 /* Caller holds allocation lock. */
2833 void GC_is_fresh(h, n)
2834 struct hblk *h;
2835 word n;
2838 register word index;
2840 # ifdef SOLARIS_THREADS
2841 register word i;
2843 if (GC_fresh_pages != 0) {
2844 for (i = 0; i < n; i++) {
2845 ADD_FRESH_PAGE(h + i);
2848 # endif
2851 # endif /* PROC_VDB */
2854 # ifdef PCR_VDB
2856 # include "vd/PCR_VD.h"
2858 # define NPAGES (32*1024) /* 128 MB */
2860 PCR_VD_DB GC_grungy_bits[NPAGES];
2862 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2863 /* HBLKSIZE aligned. */
2865 void GC_dirty_init()
2867 GC_dirty_maintained = TRUE;
2868 /* For the time being, we assume the heap generally grows up */
2869 GC_vd_base = GC_heap_sects[0].hs_start;
2870 if (GC_vd_base == 0) {
2871 ABORT("Bad initial heap segment");
2873 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2874 != PCR_ERes_okay) {
2875 ABORT("dirty bit initialization failed");
2879 void GC_read_dirty()
2881 /* lazily enable dirty bits on newly added heap sects */
2883 static int onhs = 0;
2884 int nhs = GC_n_heap_sects;
2885 for( ; onhs < nhs; onhs++ ) {
2886 PCR_VD_WriteProtectEnable(
2887 GC_heap_sects[onhs].hs_start,
2888 GC_heap_sects[onhs].hs_bytes );
2893 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2894 != PCR_ERes_okay) {
2895 ABORT("dirty bit read failed");
2899 GC_bool GC_page_was_dirty(h)
2900 struct hblk *h;
2902 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2903 return(TRUE);
2905 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2908 /*ARGSUSED*/
2909 void GC_write_hint(h)
2910 struct hblk *h;
2912 PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2913 PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2916 # endif /* PCR_VDB */
2919 * Call stack save code for debugging.
2920 * Should probably be in mach_dep.c, but that requires reorganization.
2923 /* I suspect the following works for most X86 *nix variants, so */
2924 /* long as the frame pointer is explicitly stored. In the case of gcc, */
2925 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
2926 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
2927 struct frame {
2928 struct frame *fr_savfp;
2929 long fr_savpc;
2930 long fr_arg[NARGS]; /* All the arguments go here. */
2932 #endif
2934 #if defined(SPARC)
2935 # if defined(LINUX)
2936 struct frame {
2937 long fr_local[8];
2938 long fr_arg[6];
2939 struct frame *fr_savfp;
2940 long fr_savpc;
2941 # ifndef __arch64__
2942 char *fr_stret;
2943 # endif
2944 long fr_argd[6];
2945 long fr_argx[0];
2947 # else
2948 # if defined(SUNOS4)
2949 # include <machine/frame.h>
2950 # else
2951 # if defined (DRSNX)
2952 # include <sys/sparc/frame.h>
2953 # else
2954 # if defined(OPENBSD) || defined(NETBSD)
2955 # include <frame.h>
2956 # else
2957 # include <sys/frame.h>
2958 # endif
2959 # endif
2960 # endif
2961 # endif
2962 # if NARGS > 6
2963 --> We only know how to to get the first 6 arguments
2964 # endif
2965 #endif /* SPARC */
2967 #ifdef SAVE_CALL_CHAIN
2968 /* Fill in the pc and argument information for up to NFRAMES of my */
2969 /* callers. Ignore my frame and my callers frame. */
2971 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
2972 # define FR_SAVFP fr_fp
2973 # define FR_SAVPC fr_pc
2974 #else
2975 # define FR_SAVFP fr_savfp
2976 # define FR_SAVPC fr_savpc
2977 #endif
2979 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
2980 # define BIAS 2047
2981 #else
2982 # define BIAS 0
2983 #endif
2985 void GC_save_callers (info)
2986 struct callinfo info[NFRAMES];
2988 struct frame *frame;
2989 struct frame *fp;
2990 int nframes = 0;
2991 # ifdef I386
2992 /* We assume this is turned on only with gcc as the compiler. */
2993 asm("movl %%ebp,%0" : "=r"(frame));
2994 fp = frame;
2995 # else
2996 word GC_save_regs_in_stack();
2998 frame = (struct frame *) GC_save_regs_in_stack ();
2999 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
3000 #endif
3002 for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
3003 && (nframes < NFRAMES));
3004 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
3005 register int i;
3007 info[nframes].ci_pc = fp->FR_SAVPC;
3008 for (i = 0; i < NARGS; i++) {
3009 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
3012 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
3015 #endif /* SAVE_CALL_CHAIN */
3017 #if defined(LINUX) && defined(__ELF__) && \
3018 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3019 #ifdef GC_USE_LD_WRAP
3020 # define READ __real_read
3021 #else
3022 # define READ read
3023 #endif
3026 /* Repeatedly perform a read call until the buffer is filled or */
3027 /* we encounter EOF. */
3028 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
3030 ssize_t num_read = 0;
3031 ssize_t result;
3033 while (num_read < count) {
3034 result = READ(fd, buf + num_read, count - num_read);
3035 if (result < 0) return result;
3036 if (result == 0) break;
3037 num_read += result;
3039 return num_read;
3041 #endif /* LINUX && ... */
3044 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3046 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3047 addresses in FIND_LEAK output. */
3049 void GC_print_address_map()
3051 int f;
3052 int result;
3053 char maps_temp[32768];
3054 GC_err_printf0("---------- Begin address map ----------\n");
3055 f = open("/proc/self/maps", O_RDONLY);
3056 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
3057 do {
3058 result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
3059 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
3060 GC_err_write(maps_temp, result);
3061 } while (result == sizeof(maps_temp));
3063 GC_err_printf0("---------- End address map ----------\n");
3066 #endif