Attempting to build Quake and Doom for ARM. Let's see what happens.
[AROS-Contrib.git] / gc / os_dep.c
blobf98a86501b44759e1b462703eb37449766830f3c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # if defined(LINUX) && !defined(POWERPC)
20 # include <linux/version.h>
21 # if (LINUX_VERSION_CODE <= 0x10400)
22 /* Ugly hack to get struct sigcontext_struct definition. Required */
23 /* for some early 1.3.X releases. Will hopefully go away soon. */
24 /* in some later Linux releases, asm/sigcontext.h may have to */
25 /* be included instead. */
26 # define __KERNEL__
27 # include <asm/signal.h>
28 # undef __KERNEL__
29 # else
30 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 /* prototypes, so we have to include the top-level sigcontext.h to */
33 /* make sure the former gets defined to be the latter if appropriate. */
34 # include <features.h>
35 # if 2 <= __GLIBC__
36 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 /* has the right declaration for glibc 2.1. */
39 # include <sigcontext.h>
40 # endif /* 0 == __GLIBC_MINOR__ */
41 # else /* not 2 <= __GLIBC__ */
42 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 # include <asm/sigcontext.h>
45 # endif /* 2 <= __GLIBC__ */
46 # endif
47 # endif
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
49 && !defined(MSWINCE)
50 # include <sys/types.h>
51 # if !defined(MSWIN32) && !defined(SUNOS4)
52 # include <unistd.h>
53 # endif
54 # endif
56 # include <stdio.h>
57 # if defined(MSWINCE)
58 # define SIGSEGV 0 /* value is irrelevant */
59 # else
60 # include <signal.h>
61 # endif
63 /* Blatantly OS dependent routines, except for those that are related */
64 /* to dynamic loading. */
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
67 # define NEED_FIND_LIMIT
68 # endif
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
71 # define NEED_FIND_LIMIT
72 # endif
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 # define NEED_FIND_LIMIT
76 # endif
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
80 # define NEED_FIND_LIMIT
81 # endif
83 #ifdef NEED_FIND_LIMIT
84 # include <setjmp.h>
85 #endif
87 #ifdef FREEBSD
88 # include <machine/trap.h>
89 #endif
91 #ifdef __AMIGAOS__
92 # define GC_AMIGA_DEF
93 # include "exec/execbase.h"
94 # include "AmigaOS.c"
95 # undef GC_AMIGA_DEF
96 #endif
98 #ifdef __AROS__
99 # include <proto/exec.h>
100 # include <exec/execbase.h>
101 # include <dos/dos.h>
102 # include <dos/dosextens.h>
103 #endif
105 #if defined(MSWIN32) || defined(MSWINCE)
106 # define WIN32_LEAN_AND_MEAN
107 # define NOSERVICE
108 # include <windows.h>
109 #endif
111 #ifdef MACOS
112 # include <Processes.h>
113 #endif
115 #ifdef IRIX5
116 # include <sys/uio.h>
117 # include <malloc.h> /* for locking */
118 #endif
119 #ifdef USE_MMAP
120 # include <sys/types.h>
121 # include <sys/mman.h>
122 # include <sys/stat.h>
123 #endif
125 #ifdef UNIX_LIKE
126 # include <fcntl.h>
127 #endif
129 #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
130 # ifdef SUNOS5SIGS
131 # include <sys/siginfo.h>
132 # endif
133 # undef setjmp
134 # undef longjmp
135 # define setjmp(env) sigsetjmp(env, 1)
136 # define longjmp(env, val) siglongjmp(env, val)
137 # define jmp_buf sigjmp_buf
138 #endif
140 #ifdef DJGPP
141 /* Apparently necessary for djgpp 2.01. May cause problems with */
142 /* other versions. */
143 typedef long unsigned int caddr_t;
144 #endif
146 #ifdef PCR
147 # include "il/PCR_IL.h"
148 # include "th/PCR_ThCtl.h"
149 # include "mm/PCR_MM.h"
150 #endif
152 #if !defined(NO_EXECUTE_PERMISSION)
153 # define OPT_PROT_EXEC PROT_EXEC
154 #else
155 # define OPT_PROT_EXEC 0
156 #endif
158 #if defined(SEARCH_FOR_DATA_START)
159 /* The I386 case can be handled without a search. The Alpha case */
160 /* used to be handled differently as well, but the rules changed */
161 /* for recent Linux versions. This seems to be the easiest way to */
162 /* cover all versions. */
164 # ifdef LINUX
165 # pragma weak __data_start
166 extern int __data_start;
167 # pragma weak data_start
168 extern int data_start;
169 # endif /* LINUX */
170 extern int _end;
172 ptr_t GC_data_start;
174 void GC_init_linux_data_start()
176 extern ptr_t GC_find_limit();
178 # ifdef LINUX
179 /* Try the easy approaches first: */
180 if (&__data_start != 0) {
181 GC_data_start = (ptr_t)(&__data_start);
182 return;
184 if (&data_start != 0) {
185 GC_data_start = (ptr_t)(&data_start);
186 return;
188 # endif /* LINUX */
189 GC_data_start = GC_find_limit((ptr_t)(&_end), FALSE);
191 #endif
193 # ifdef ECOS
195 # ifndef ECOS_GC_MEMORY_SIZE
196 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
197 # endif /* ECOS_GC_MEMORY_SIZE */
199 // setjmp() function, as described in ANSI para 7.6.1.1
200 #define setjmp( __env__ ) hal_setjmp( __env__ )
202 // FIXME: This is a simple way of allocating memory which is
203 // compatible with ECOS early releases. Later releases use a more
204 // sophisticated means of allocating memory than this simple static
205 // allocator, but this method is at least bound to work.
206 static char memory[ECOS_GC_MEMORY_SIZE];
207 static char *brk = memory;
209 static void *tiny_sbrk(ptrdiff_t increment)
211 void *p = brk;
213 brk += increment;
215 if (brk > memory + sizeof memory)
217 brk -= increment;
218 return NULL;
221 return p;
223 #define sbrk tiny_sbrk
224 # endif /* ECOS */
226 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
227 ptr_t GC_data_start;
229 void GC_init_netbsd_elf()
231 extern ptr_t GC_find_limit();
232 extern char **environ;
233 /* This may need to be environ, without the underscore, for */
234 /* some versions. */
235 GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
237 #endif
239 # ifdef OS2
241 # include <stddef.h>
243 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
245 struct exe_hdr {
246 unsigned short magic_number;
247 unsigned short padding[29];
248 long new_exe_offset;
251 #define E_MAGIC(x) (x).magic_number
252 #define EMAGIC 0x5A4D
253 #define E_LFANEW(x) (x).new_exe_offset
255 struct e32_exe {
256 unsigned char magic_number[2];
257 unsigned char byte_order;
258 unsigned char word_order;
259 unsigned long exe_format_level;
260 unsigned short cpu;
261 unsigned short os;
262 unsigned long padding1[13];
263 unsigned long object_table_offset;
264 unsigned long object_count;
265 unsigned long padding2[31];
268 #define E32_MAGIC1(x) (x).magic_number[0]
269 #define E32MAGIC1 'L'
270 #define E32_MAGIC2(x) (x).magic_number[1]
271 #define E32MAGIC2 'X'
272 #define E32_BORDER(x) (x).byte_order
273 #define E32LEBO 0
274 #define E32_WORDER(x) (x).word_order
275 #define E32LEWO 0
276 #define E32_CPU(x) (x).cpu
277 #define E32CPU286 1
278 #define E32_OBJTAB(x) (x).object_table_offset
279 #define E32_OBJCNT(x) (x).object_count
281 struct o32_obj {
282 unsigned long size;
283 unsigned long base;
284 unsigned long flags;
285 unsigned long pagemap;
286 unsigned long mapsize;
287 unsigned long reserved;
290 #define O32_FLAGS(x) (x).flags
291 #define OBJREAD 0x0001L
292 #define OBJWRITE 0x0002L
293 #define OBJINVALID 0x0080L
294 #define O32_SIZE(x) (x).size
295 #define O32_BASE(x) (x).base
297 # else /* IBM's compiler */
299 /* A kludge to get around what appears to be a header file bug */
300 # ifndef WORD
301 # define WORD unsigned short
302 # endif
303 # ifndef DWORD
304 # define DWORD unsigned long
305 # endif
307 # define EXE386 1
308 # include <newexe.h>
309 # include <exe386.h>
311 # endif /* __IBMC__ */
313 # define INCL_DOSEXCEPTIONS
314 # define INCL_DOSPROCESS
315 # define INCL_DOSERRORS
316 # define INCL_DOSMODULEMGR
317 # define INCL_DOSMEMMGR
318 # include <os2.h>
321 /* Disable and enable signals during nontrivial allocations */
323 void GC_disable_signals(void)
325 ULONG nest;
327 DosEnterMustComplete(&nest);
328 if (nest != 1) ABORT("nested GC_disable_signals");
331 void GC_enable_signals(void)
333 ULONG nest;
335 DosExitMustComplete(&nest);
336 if (nest != 0) ABORT("GC_enable_signals");
340 # else
342 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
343 && !defined(MSWINCE) \
344 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
346 # if defined(sigmask) && !defined(UTS4) && !defined(HURD)
347 /* Use the traditional BSD interface */
348 # define SIGSET_T int
349 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
350 # define SIG_FILL(set) (set) = 0x7fffffff
351 /* Setting the leading bit appears to provoke a bug in some */
352 /* longjmp implementations. Most systems appear not to have */
353 /* a signal 32. */
354 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
355 # else
356 /* Use POSIX/SYSV interface */
357 # define SIGSET_T sigset_t
358 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
359 # define SIG_FILL(set) sigfillset(&set)
360 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
361 # endif
363 static GC_bool mask_initialized = FALSE;
365 static SIGSET_T new_mask;
367 static SIGSET_T old_mask;
369 static SIGSET_T dummy;
371 #if defined(PRINTSTATS) && !defined(THREADS)
372 # define CHECK_SIGNALS
373 int GC_sig_disabled = 0;
374 #endif
376 void GC_disable_signals()
378 if (!mask_initialized) {
379 SIG_FILL(new_mask);
381 SIG_DEL(new_mask, SIGSEGV);
382 SIG_DEL(new_mask, SIGILL);
383 SIG_DEL(new_mask, SIGQUIT);
384 # ifdef SIGBUS
385 SIG_DEL(new_mask, SIGBUS);
386 # endif
387 # ifdef SIGIOT
388 SIG_DEL(new_mask, SIGIOT);
389 # endif
390 # ifdef SIGEMT
391 SIG_DEL(new_mask, SIGEMT);
392 # endif
393 # ifdef SIGTRAP
394 SIG_DEL(new_mask, SIGTRAP);
395 # endif
396 mask_initialized = TRUE;
398 # ifdef CHECK_SIGNALS
399 if (GC_sig_disabled != 0) ABORT("Nested disables");
400 GC_sig_disabled++;
401 # endif
402 SIGSETMASK(old_mask,new_mask);
405 void GC_enable_signals()
407 # ifdef CHECK_SIGNALS
408 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
409 GC_sig_disabled--;
410 # endif
411 SIGSETMASK(dummy,old_mask);
414 # endif /* !PCR */
416 # endif /*!OS/2 */
418 /* Ivan Demakov: simplest way (to me) */
419 #if defined (DOS4GW)
420 void GC_disable_signals() { }
421 void GC_enable_signals() { }
422 #endif
424 /* Find the page size */
425 word GC_page_size;
427 # if defined(MSWIN32) || defined(MSWINCE)
428 void GC_setpagesize()
430 GetSystemInfo(&GC_sysinfo);
431 GC_page_size = GC_sysinfo.dwPageSize;
434 # else
435 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
436 || defined(USE_MUNMAP)
437 void GC_setpagesize()
439 GC_page_size = GETPAGESIZE();
441 # else
442 /* It's acceptable to fake it. */
443 void GC_setpagesize()
445 GC_page_size = HBLKSIZE;
447 # endif
448 # endif
451 * Find the base of the stack.
452 * Used only in single-threaded environment.
453 * With threads, GC_mark_roots needs to know how to do this.
454 * Called with allocator lock held.
456 # if defined(MSWIN32) || defined(MSWINCE)
457 # define is_writable(prot) ((prot) == PAGE_READWRITE \
458 || (prot) == PAGE_WRITECOPY \
459 || (prot) == PAGE_EXECUTE_READWRITE \
460 || (prot) == PAGE_EXECUTE_WRITECOPY)
461 /* Return the number of bytes that are writable starting at p. */
462 /* The pointer p is assumed to be page aligned. */
463 /* If base is not 0, *base becomes the beginning of the */
464 /* allocation region containing p. */
465 word GC_get_writable_length(ptr_t p, ptr_t *base)
467 MEMORY_BASIC_INFORMATION buf;
468 word result;
469 word protect;
471 result = VirtualQuery(p, &buf, sizeof(buf));
472 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
473 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
474 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
475 if (!is_writable(protect)) {
476 return(0);
478 if (buf.State != MEM_COMMIT) return(0);
479 return(buf.RegionSize);
482 ptr_t GC_get_stack_base()
484 int dummy;
485 ptr_t sp = (ptr_t)(&dummy);
486 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
487 word size = GC_get_writable_length(trunc_sp, 0);
489 return(trunc_sp + size);
493 # endif /* MS Windows */
495 # ifdef BEOS
496 # include <kernel/OS.h>
497 ptr_t GC_get_stack_base(){
498 thread_info th;
499 get_thread_info(find_thread(NULL),&th);
500 return th.stack_end;
502 # endif /* BEOS */
505 # ifdef OS2
507 ptr_t GC_get_stack_base()
509 PTIB ptib;
510 PPIB ppib;
512 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
513 GC_err_printf0("DosGetInfoBlocks failed\n");
514 ABORT("DosGetInfoBlocks failed\n");
516 return((ptr_t)(ptib -> tib_pstacklimit));
519 # endif /* OS2 */
521 # ifdef __AROS__
522 ptr_t GC_get_stack_base(){
523 return (char *)SysBase->ThisTask->tc_SPUpper;
525 # endif
526 # ifdef __AMIGAOS__
527 # define GC_AMIGA_SB
528 # include "AmigaOS.c"
529 # undef GC_AMIGA_SB
530 # endif /* __AMIGAOS__ */
532 # if defined(NEED_FIND_LIMIT) || (defined(UNIX_LIKE) && !defined(ECOS))
534 # ifdef __STDC__
535 typedef void (*handler)(int);
536 # else
537 typedef void (*handler)();
538 # endif
540 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
541 static struct sigaction old_segv_act;
542 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
543 static struct sigaction old_bus_act;
544 # endif
545 # else
546 static handler old_segv_handler, old_bus_handler;
547 # endif
549 # ifdef __STDC__
550 void GC_set_and_save_fault_handler(handler h)
551 # else
552 void GC_set_and_save_fault_handler(h)
553 handler h;
554 # endif
556 # if defined(SUNOS5SIGS) || defined(IRIX5) \
557 || defined(OSF1) || defined(HURD)
558 struct sigaction act;
560 act.sa_handler = h;
561 # ifdef SUNOS5SIGS
562 act.sa_flags = SA_RESTART | SA_NODEFER;
563 # else
564 act.sa_flags = SA_RESTART;
565 # endif
566 /* The presence of SA_NODEFER represents yet another gross */
567 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
568 /* interact correctly with -lthread. We hide the confusion */
569 /* by making sure that signal handling doesn't affect the */
570 /* signal mask. */
572 (void) sigemptyset(&act.sa_mask);
573 # ifdef GC_IRIX_THREADS
574 /* Older versions have a bug related to retrieving and */
575 /* and setting a handler at the same time. */
576 (void) sigaction(SIGSEGV, 0, &old_segv_act);
577 (void) sigaction(SIGSEGV, &act, 0);
578 # else
579 (void) sigaction(SIGSEGV, &act, &old_segv_act);
580 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
581 || defined(HPUX) || defined(HURD)
582 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
583 /* Pthreads doesn't exist under Irix 5.x, so we */
584 /* don't have to worry in the threads case. */
585 (void) sigaction(SIGBUS, &act, &old_bus_act);
586 # endif
587 # endif /* GC_IRIX_THREADS */
588 # else
589 old_segv_handler = signal(SIGSEGV, h);
590 # ifdef SIGBUS
591 old_bus_handler = signal(SIGBUS, h);
592 # endif
593 # endif
595 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
597 # ifdef NEED_FIND_LIMIT
598 /* Some tools to implement HEURISTIC2 */
599 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
600 /* static */ jmp_buf GC_jmp_buf;
602 /*ARGSUSED*/
603 void GC_fault_handler(sig)
604 int sig;
606 longjmp(GC_jmp_buf, 1);
609 void GC_setup_temporary_fault_handler()
611 GC_set_and_save_fault_handler(GC_fault_handler);
614 void GC_reset_fault_handler()
616 # if defined(SUNOS5SIGS) || defined(IRIX5) \
617 || defined(OSF1) || defined(HURD)
618 (void) sigaction(SIGSEGV, &old_segv_act, 0);
619 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
620 || defined(HPUX) || defined(HURD)
621 (void) sigaction(SIGBUS, &old_bus_act, 0);
622 # endif
623 # else
624 (void) signal(SIGSEGV, old_segv_handler);
625 # ifdef SIGBUS
626 (void) signal(SIGBUS, old_bus_handler);
627 # endif
628 # endif
631 /* Return the first nonaddressible location > p (up) or */
632 /* the smallest location q s.t. [q,p] is addressible (!up). */
633 ptr_t GC_find_limit(p, up)
634 ptr_t p;
635 GC_bool up;
637 static VOLATILE ptr_t result;
638 /* Needs to be static, since otherwise it may not be */
639 /* preserved across the longjmp. Can safely be */
640 /* static since it's only called once, with the */
641 /* allocation lock held. */
644 GC_setup_temporary_fault_handler();
645 if (setjmp(GC_jmp_buf) == 0) {
646 result = (ptr_t)(((word)(p))
647 & ~(MIN_PAGE_SIZE-1));
648 for (;;) {
649 if (up) {
650 result += MIN_PAGE_SIZE;
651 } else {
652 result -= MIN_PAGE_SIZE;
654 GC_noop1((word)(*result));
657 GC_reset_fault_handler();
658 if (!up) {
659 result += MIN_PAGE_SIZE;
661 return(result);
663 # endif
665 #ifdef LINUX_STACKBOTTOM
667 #include <sys/types.h>
668 #include <sys/stat.h>
670 # define STAT_SKIP 27 /* Number of fields preceding startstack */
671 /* field in /proc/self/stat */
673 # pragma weak __libc_stack_end
674 extern ptr_t __libc_stack_end;
676 # ifdef IA64
677 # pragma weak __libc_ia64_register_backing_store_base
678 extern ptr_t __libc_ia64_register_backing_store_base;
680 ptr_t GC_get_register_stack_base(void)
682 if (0 != &__libc_ia64_register_backing_store_base
683 && 0 != __libc_ia64_register_backing_store_base) {
684 /* Glibc 2.2.4 has a bug such that for dynamically linked */
685 /* executables __libc_ia64_register_backing_store_base is */
686 /* defined but ininitialized during constructor calls. */
687 /* Hence we check for both nonzero address and value. */
688 return __libc_ia64_register_backing_store_base;
689 } else {
690 word result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
691 result += BACKING_STORE_ALIGNMENT - 1;
692 result &= ~(BACKING_STORE_ALIGNMENT - 1);
693 return (ptr_t)result;
696 # endif
698 ptr_t GC_linux_stack_base(void)
700 /* We read the stack base value from /proc/self/stat. We do this */
701 /* using direct I/O system calls in order to avoid calling malloc */
702 /* in case REDIRECT_MALLOC is defined. */
703 # define STAT_BUF_SIZE 4096
704 # if defined(GC_USE_LD_WRAP)
705 # define STAT_READ __real_read
706 # else
707 # define STAT_READ read
708 # endif
709 char stat_buf[STAT_BUF_SIZE];
710 int f;
711 char c;
712 word result = 0;
713 size_t i, buf_offset = 0;
715 /* First try the easy way. This should work for glibc 2.2 */
716 if (0 != &__libc_stack_end) {
717 return __libc_stack_end;
719 f = open("/proc/self/stat", O_RDONLY);
720 if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
721 ABORT("Couldn't read /proc/self/stat");
723 c = stat_buf[buf_offset++];
724 /* Skip the required number of fields. This number is hopefully */
725 /* constant across all Linux implementations. */
726 for (i = 0; i < STAT_SKIP; ++i) {
727 while (isspace(c)) c = stat_buf[buf_offset++];
728 while (!isspace(c)) c = stat_buf[buf_offset++];
730 while (isspace(c)) c = stat_buf[buf_offset++];
731 while (isdigit(c)) {
732 result *= 10;
733 result += c - '0';
734 c = stat_buf[buf_offset++];
736 close(f);
737 if (result < 0x10000000) ABORT("Absurd stack bottom value");
738 return (ptr_t)result;
741 #endif /* LINUX_STACKBOTTOM */
743 #ifdef FREEBSD_STACKBOTTOM
745 /* This uses an undocumented sysctl call, but at least one expert */
746 /* believes it will stay. */
748 #include <unistd.h>
749 #include <sys/types.h>
750 #include <sys/sysctl.h>
752 ptr_t GC_freebsd_stack_base(void)
754 int nm[2] = { CTL_KERN, KERN_USRSTACK}, base, len, r;
756 len = sizeof(int);
757 r = sysctl(nm, 2, &base, &len, NULL, 0);
759 if (r) ABORT("Error getting stack base");
761 return (ptr_t)base;
764 #endif /* FREEBSD_STACKBOTTOM */
766 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
767 && !defined(MSWINCE) && !defined(OS2) && !defined(ECOS)
769 ptr_t GC_get_stack_base()
771 word dummy;
772 ptr_t result;
774 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
776 # ifdef STACKBOTTOM
777 return(STACKBOTTOM);
778 # else
779 # ifdef HEURISTIC1
780 # ifdef STACK_GROWS_DOWN
781 result = (ptr_t)((((word)(&dummy))
782 + STACKBOTTOM_ALIGNMENT_M1)
783 & ~STACKBOTTOM_ALIGNMENT_M1);
784 # else
785 result = (ptr_t)(((word)(&dummy))
786 & ~STACKBOTTOM_ALIGNMENT_M1);
787 # endif
788 # endif /* HEURISTIC1 */
789 # ifdef LINUX_STACKBOTTOM
790 result = GC_linux_stack_base();
791 # endif
792 # ifdef FREEBSD_STACKBOTTOM
793 result = GC_freebsd_stack_base();
794 # endif
795 # ifdef HEURISTIC2
796 # ifdef STACK_GROWS_DOWN
797 result = GC_find_limit((ptr_t)(&dummy), TRUE);
798 # ifdef HEURISTIC2_LIMIT
799 if (result > HEURISTIC2_LIMIT
800 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
801 result = HEURISTIC2_LIMIT;
803 # endif
804 # else
805 result = GC_find_limit((ptr_t)(&dummy), FALSE);
806 # ifdef HEURISTIC2_LIMIT
807 if (result < HEURISTIC2_LIMIT
808 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
809 result = HEURISTIC2_LIMIT;
811 # endif
812 # endif
814 # endif /* HEURISTIC2 */
815 # ifdef STACK_GROWS_DOWN
816 if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
817 # endif
818 return(result);
819 # endif /* STACKBOTTOM */
822 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS */
825 * Register static data segment(s) as roots.
826 * If more data segments are added later then they need to be registered
827 * add that point (as we do with SunOS dynamic loading),
828 * or GC_mark_roots needs to check for them (as we do with PCR).
829 * Called with allocator lock held.
832 # ifdef OS2
834 void GC_register_data_segments()
836 PTIB ptib;
837 PPIB ppib;
838 HMODULE module_handle;
839 # define PBUFSIZ 512
840 UCHAR path[PBUFSIZ];
841 FILE * myexefile;
842 struct exe_hdr hdrdos; /* MSDOS header. */
843 struct e32_exe hdr386; /* Real header for my executable */
844 struct o32_obj seg; /* Currrent segment */
845 int nsegs;
848 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
849 GC_err_printf0("DosGetInfoBlocks failed\n");
850 ABORT("DosGetInfoBlocks failed\n");
852 module_handle = ppib -> pib_hmte;
853 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
854 GC_err_printf0("DosQueryModuleName failed\n");
855 ABORT("DosGetInfoBlocks failed\n");
857 myexefile = fopen(path, "rb");
858 if (myexefile == 0) {
859 GC_err_puts("Couldn't open executable ");
860 GC_err_puts(path); GC_err_puts("\n");
861 ABORT("Failed to open executable\n");
863 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
864 GC_err_puts("Couldn't read MSDOS header from ");
865 GC_err_puts(path); GC_err_puts("\n");
866 ABORT("Couldn't read MSDOS header");
868 if (E_MAGIC(hdrdos) != EMAGIC) {
869 GC_err_puts("Executable has wrong DOS magic number: ");
870 GC_err_puts(path); GC_err_puts("\n");
871 ABORT("Bad DOS magic number");
873 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
874 GC_err_puts("Seek to new header failed in ");
875 GC_err_puts(path); GC_err_puts("\n");
876 ABORT("Bad DOS magic number");
878 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
879 GC_err_puts("Couldn't read MSDOS header from ");
880 GC_err_puts(path); GC_err_puts("\n");
881 ABORT("Couldn't read OS/2 header");
883 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
884 GC_err_puts("Executable has wrong OS/2 magic number:");
885 GC_err_puts(path); GC_err_puts("\n");
886 ABORT("Bad OS/2 magic number");
888 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
889 GC_err_puts("Executable %s has wrong byte order: ");
890 GC_err_puts(path); GC_err_puts("\n");
891 ABORT("Bad byte order");
893 if ( E32_CPU(hdr386) == E32CPU286) {
894 GC_err_puts("GC can't handle 80286 executables: ");
895 GC_err_puts(path); GC_err_puts("\n");
896 EXIT();
898 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
899 SEEK_SET) != 0) {
900 GC_err_puts("Seek to object table failed: ");
901 GC_err_puts(path); GC_err_puts("\n");
902 ABORT("Seek to object table failed");
904 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
905 int flags;
906 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
907 GC_err_puts("Couldn't read obj table entry from ");
908 GC_err_puts(path); GC_err_puts("\n");
909 ABORT("Couldn't read obj table entry");
911 flags = O32_FLAGS(seg);
912 if (!(flags & OBJWRITE)) continue;
913 if (!(flags & OBJREAD)) continue;
914 if (flags & OBJINVALID) {
915 GC_err_printf0("Object with invalid pages?\n");
916 continue;
918 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
922 # else /* !OS2 */
924 # if defined(MSWIN32) || defined(MSWINCE)
926 # ifdef MSWIN32
927 /* Unfortunately, we have to handle win32s very differently from NT, */
928 /* Since VirtualQuery has very different semantics. In particular, */
929 /* under win32s a VirtualQuery call on an unmapped page returns an */
930 /* invalid result. Under GC_register_data_segments is a noop and */
931 /* all real work is done by GC_register_dynamic_libraries. Under */
932 /* win32s, we cannot find the data segments associated with dll's. */
933 /* We rgister the main data segment here. */
934 GC_bool GC_win32s = FALSE; /* We're running under win32s. */
936 GC_bool GC_is_win32s()
938 DWORD v = GetVersion();
940 /* Check that this is not NT, and Windows major version <= 3 */
941 return ((v & 0x80000000) && (v & 0xff) <= 3);
944 void GC_init_win32()
946 GC_win32s = GC_is_win32s();
949 /* Return the smallest address a such that VirtualQuery */
950 /* returns correct results for all addresses between a and start. */
951 /* Assumes VirtualQuery returns correct information for start. */
952 ptr_t GC_least_described_address(ptr_t start)
954 MEMORY_BASIC_INFORMATION buf;
955 DWORD result;
956 LPVOID limit;
957 ptr_t p;
958 LPVOID q;
960 limit = GC_sysinfo.lpMinimumApplicationAddress;
961 p = (ptr_t)((word)start & ~(GC_page_size - 1));
962 for (;;) {
963 q = (LPVOID)(p - GC_page_size);
964 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
965 result = VirtualQuery(q, &buf, sizeof(buf));
966 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
967 p = (ptr_t)(buf.AllocationBase);
969 return(p);
971 # endif
973 /* Is p the start of either the malloc heap, or of one of our */
974 /* heap sections? */
975 GC_bool GC_is_heap_base (ptr_t p)
978 register unsigned i;
980 # ifndef REDIRECT_MALLOC
981 static ptr_t malloc_heap_pointer = 0;
983 if (0 == malloc_heap_pointer) {
984 MEMORY_BASIC_INFORMATION buf;
985 void *pTemp = malloc( 1 );
986 register DWORD result = VirtualQuery(pTemp, &buf, sizeof(buf));
988 free( pTemp );
991 if (result != sizeof(buf)) {
992 ABORT("Weird VirtualQuery result");
994 malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
996 if (p == malloc_heap_pointer) return(TRUE);
997 # endif
998 for (i = 0; i < GC_n_heap_bases; i++) {
999 if (GC_heap_bases[i] == p) return(TRUE);
1001 return(FALSE);
1004 # ifdef MSWIN32
1005 void GC_register_root_section(ptr_t static_root)
1007 MEMORY_BASIC_INFORMATION buf;
1008 DWORD result;
1009 DWORD protect;
1010 LPVOID p;
1011 char * base;
1012 char * limit, * new_limit;
1014 if (!GC_win32s) return;
1015 p = base = limit = GC_least_described_address(static_root);
1016 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1017 result = VirtualQuery(p, &buf, sizeof(buf));
1018 if (result != sizeof(buf) || buf.AllocationBase == 0
1019 || GC_is_heap_base(buf.AllocationBase)) break;
1020 new_limit = (char *)p + buf.RegionSize;
1021 protect = buf.Protect;
1022 if (buf.State == MEM_COMMIT
1023 && is_writable(protect)) {
1024 if ((char *)p == limit) {
1025 limit = new_limit;
1026 } else {
1027 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1028 base = p;
1029 limit = new_limit;
1032 if (p > (LPVOID)new_limit /* overflow */) break;
1033 p = (LPVOID)new_limit;
1035 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1037 #endif
1039 void GC_register_data_segments()
1041 # ifdef MSWIN32
1042 static char dummy;
1043 GC_register_root_section((ptr_t)(&dummy));
1044 # endif
1047 # else /* !OS2 && !Windows */
1049 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1050 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1051 char * GC_SysVGetDataStart(max_page_size, etext_addr)
1052 int max_page_size;
1053 int * etext_addr;
1055 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1056 & ~(sizeof(word) - 1);
1057 /* etext rounded to word boundary */
1058 word next_page = ((text_end + (word)max_page_size - 1)
1059 & ~((word)max_page_size - 1));
1060 word page_offset = (text_end & ((word)max_page_size - 1));
1061 VOLATILE char * result = (char *)(next_page + page_offset);
1062 /* Note that this isnt equivalent to just adding */
1063 /* max_page_size to &etext if &etext is at a page boundary */
1065 GC_setup_temporary_fault_handler();
1066 if (setjmp(GC_jmp_buf) == 0) {
1067 /* Try writing to the address. */
1068 *result = *result;
1069 GC_reset_fault_handler();
1070 } else {
1071 GC_reset_fault_handler();
1072 /* We got here via a longjmp. The address is not readable. */
1073 /* This is known to happen under Solaris 2.4 + gcc, which place */
1074 /* string constants in the text segment, but after etext. */
1075 /* Use plan B. Note that we now know there is a gap between */
1076 /* text and data segments, so plan A bought us something. */
1077 result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
1079 return((char *)result);
1081 # endif
1084 #ifdef __AROS__
1085 void GC_register_data_segments()
1087 struct Process *proc;
1088 struct CommandLineInterface *cli;
1089 BPTR myseglist;
1090 ULONG *data;
1092 if ((proc = (struct Process *)FindTask(0)) == 0) {
1093 GC_err_puts("Cannot find process structure\n");
1094 return;
1096 if ((cli = BADDR(proc->pr_CLI)) == 0) {
1097 GC_err_puts("No CLI\n");
1098 return;
1100 if ((myseglist = cli->cli_Module) == 0) {
1101 GC_err_puts("No seglist from CLI\n");
1102 return;
1105 for (data = (ULONG *)BADDR(myseglist); data != 0;
1106 data = (ULONG *)BADDR(data[0])) {
1107 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
1108 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
1109 GC_add_roots_inner((char *)&data[1],
1110 ((char *)&data[1]) + data[-1], FALSE);
1114 #else
1115 #ifdef __AMIGAOS__
1116 # define GC_AMIGA_DS
1117 # include "AmigaOS.c"
1118 # undef GC_AMIGA_DS
1119 #else /* !OS2 && !Windows && !__AMIGAOS__ */
1121 void GC_register_data_segments()
1123 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS) \
1124 && !defined(MACOSX)
1125 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1126 /* As of Solaris 2.3, the Solaris threads implementation */
1127 /* allocates the data structure for the initial thread with */
1128 /* sbrk at process startup. It needs to be scanned, so that */
1129 /* we don't lose some malloc allocated data structures */
1130 /* hanging from it. We're on thin ice here ... */
1131 extern caddr_t sbrk();
1133 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1134 # else
1135 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1136 # endif
1137 # endif
1138 # if !defined(PCR) && (defined(NEXT) || defined(MACOSX))
1139 GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
1140 # endif
1141 # if defined(MACOS)
1143 # if defined(THINK_C)
1144 extern void* GC_MacGetDataStart(void);
1145 /* globals begin above stack and end at a5. */
1146 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1147 (ptr_t)LMGetCurrentA5(), FALSE);
1148 # else
1149 # if defined(__MWERKS__)
1150 # if !__POWERPC__
1151 extern void* GC_MacGetDataStart(void);
1152 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1153 # if __option(far_data)
1154 extern void* GC_MacGetDataEnd(void);
1155 # endif
1156 /* globals begin above stack and end at a5. */
1157 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1158 (ptr_t)LMGetCurrentA5(), FALSE);
1159 /* MATTHEW: Handle Far Globals */
1160 # if __option(far_data)
1161 /* Far globals follow he QD globals: */
1162 GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1163 (ptr_t)GC_MacGetDataEnd(), FALSE);
1164 # endif
1165 # else
1166 extern char __data_start__[], __data_end__[];
1167 GC_add_roots_inner((ptr_t)&__data_start__,
1168 (ptr_t)&__data_end__, FALSE);
1169 # endif /* __POWERPC__ */
1170 # endif /* __MWERKS__ */
1171 # endif /* !THINK_C */
1173 # endif /* MACOS */
1175 /* Dynamic libraries are added at every collection, since they may */
1176 /* change. */
1179 # endif /* ! __AMIGAOS__ */
1180 # endif /* ! __AROS__ */
1181 # endif /* ! MSWIN32 && ! MSWINCE*/
1182 # endif /* ! OS2 */
1185 * Auxiliary routines for obtaining memory from OS.
1188 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1189 && !defined(MSWIN32) && !defined(MSWINCE) \
1190 && !defined(MACOS) && !defined(DOS4GW)
1192 # ifdef SUNOS4
1193 extern caddr_t sbrk();
1194 # endif
1195 # ifdef __STDC__
1196 # define SBRK_ARG_T ptrdiff_t
1197 # else
1198 # define SBRK_ARG_T int
1199 # endif
1202 # ifdef RS6000
1203 /* The compiler seems to generate speculative reads one past the end of */
1204 /* an allocated object. Hence we need to make sure that the page */
1205 /* following the last heap page is also mapped. */
1206 ptr_t GC_unix_get_mem(bytes)
1207 word bytes;
1209 caddr_t cur_brk = (caddr_t)sbrk(0);
1210 caddr_t result;
1211 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1212 static caddr_t my_brk_val = 0;
1214 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1215 if (lsbs != 0) {
1216 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1218 if (cur_brk == my_brk_val) {
1219 /* Use the extra block we allocated last time. */
1220 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1221 if (result == (caddr_t)(-1)) return(0);
1222 result -= GC_page_size;
1223 } else {
1224 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1225 if (result == (caddr_t)(-1)) return(0);
1227 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
1228 return((ptr_t)result);
1231 #else /* Not RS6000 */
1233 #if defined(USE_MMAP)
1234 /* Tested only under Linux, IRIX5 and Solaris 2 */
1236 #ifdef USE_MMAP_FIXED
1237 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1238 /* Seems to yield better performance on Solaris 2, but can */
1239 /* be unreliable if something is already mapped at the address. */
1240 #else
1241 # define GC_MMAP_FLAGS MAP_PRIVATE
1242 #endif
1244 #ifndef HEAP_START
1245 # define HEAP_START 0
1246 #endif
1248 ptr_t GC_unix_get_mem(bytes)
1249 word bytes;
1251 static GC_bool initialized = FALSE;
1252 static int fd;
1253 void *result;
1254 static ptr_t last_addr = HEAP_START;
1256 if (!initialized) {
1257 fd = open("/dev/zero", O_RDONLY);
1258 initialized = TRUE;
1260 if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1261 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1262 GC_MMAP_FLAGS, fd, 0/* offset */);
1263 if (result == MAP_FAILED) return(0);
1264 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1265 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1266 # if !defined(LINUX)
1267 if (last_addr == 0) {
1268 /* Oops. We got the end of the address space. This isn't */
1269 /* usable by arbitrary C code, since one-past-end pointers */
1270 /* don't work, so we discard it and try again. */
1271 munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1272 /* Leave last page mapped, so we can't repeat. */
1273 return GC_unix_get_mem(bytes);
1275 # else
1276 GC_ASSERT(last_addr != 0);
1277 # endif
1278 return((ptr_t)result);
1281 #else /* Not RS6000, not USE_MMAP */
1282 ptr_t GC_unix_get_mem(bytes)
1283 word bytes;
1285 ptr_t result;
1286 # ifdef IRIX5
1287 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1288 /* The equivalent may be needed on other systems as well. */
1289 __LOCK_MALLOC();
1290 # endif
1292 ptr_t cur_brk = (ptr_t)sbrk(0);
1293 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1295 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1296 if (lsbs != 0) {
1297 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1299 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1300 if (result == (ptr_t)(-1)) result = 0;
1302 # ifdef IRIX5
1303 __UNLOCK_MALLOC();
1304 # endif
1305 return(result);
1308 #endif /* Not USE_MMAP */
1309 #endif /* Not RS6000 */
1311 # endif /* UN*X */
1313 # ifdef OS2
1315 void * os2_alloc(size_t bytes)
1317 void * result;
1319 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1320 PAG_WRITE | PAG_COMMIT)
1321 != NO_ERROR) {
1322 return(0);
1324 if (result == 0) return(os2_alloc(bytes));
1325 return(result);
1328 # endif /* OS2 */
1331 # if defined(MSWIN32) || defined(MSWINCE)
1332 SYSTEM_INFO GC_sysinfo;
1333 # endif
1336 # ifdef MSWIN32
1337 word GC_n_heap_bases = 0;
1339 ptr_t GC_win32_get_mem(bytes)
1340 word bytes;
1342 ptr_t result;
1344 if (GC_win32s) {
1345 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1346 /* There are also unconfirmed rumors of other */
1347 /* problems, so we dodge the issue. */
1348 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1349 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1350 } else {
1351 result = (ptr_t) VirtualAlloc(NULL, bytes,
1352 MEM_COMMIT | MEM_RESERVE,
1353 PAGE_EXECUTE_READWRITE);
1355 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1356 /* If I read the documentation correctly, this can */
1357 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1358 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1359 GC_heap_bases[GC_n_heap_bases++] = result;
1360 return(result);
1363 void GC_win32_free_heap ()
1365 if (GC_win32s) {
1366 while (GC_n_heap_bases > 0) {
1367 GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1368 GC_heap_bases[GC_n_heap_bases] = 0;
1372 # endif
1374 #ifdef __AMIGAOS__
1375 # define GC_AMIGA_AM
1376 # include "AmigaOS.c"
1377 # undef GC_AMIGA_AM
1378 #endif
1381 # ifdef MSWINCE
1382 word GC_n_heap_bases = 0;
1384 ptr_t GC_wince_get_mem(bytes)
1385 word bytes;
1387 ptr_t result;
1388 word i;
1390 /* Round up allocation size to multiple of page size */
1391 bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1393 /* Try to find reserved, uncommitted pages */
1394 for (i = 0; i < GC_n_heap_bases; i++) {
1395 if (((word)(-(signed_word)GC_heap_lengths[i])
1396 & (GC_sysinfo.dwAllocationGranularity-1))
1397 >= bytes) {
1398 result = GC_heap_bases[i] + GC_heap_lengths[i];
1399 break;
1403 if (i == GC_n_heap_bases) {
1404 /* Reserve more pages */
1405 word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1406 & ~(GC_sysinfo.dwAllocationGranularity-1);
1407 result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1408 MEM_RESERVE | MEM_TOP_DOWN,
1409 PAGE_EXECUTE_READWRITE);
1410 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1411 /* If I read the documentation correctly, this can */
1412 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1413 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1414 GC_heap_bases[GC_n_heap_bases] = result;
1415 GC_heap_lengths[GC_n_heap_bases] = 0;
1416 GC_n_heap_bases++;
1419 /* Commit pages */
1420 result = (ptr_t) VirtualAlloc(result, bytes,
1421 MEM_COMMIT,
1422 PAGE_EXECUTE_READWRITE);
1423 if (result != NULL) {
1424 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1425 GC_heap_lengths[i] += bytes;
1428 return(result);
1430 # endif
1432 #ifdef USE_MUNMAP
1434 /* For now, this only works on Win32/WinCE and some Unix-like */
1435 /* systems. If you have something else, don't define */
1436 /* USE_MUNMAP. */
1437 /* We assume ANSI C to support this feature. */
1439 #if !defined(MSWIN32) && !defined(MSWINCE)
1441 #include <unistd.h>
1442 #include <sys/mman.h>
1443 #include <sys/stat.h>
1444 #include <sys/types.h>
1446 #endif
1448 /* Compute a page aligned starting address for the unmap */
1449 /* operation on a block of size bytes starting at start. */
1450 /* Return 0 if the block is too small to make this feasible. */
1451 ptr_t GC_unmap_start(ptr_t start, word bytes)
1453 ptr_t result = start;
1454 /* Round start to next page boundary. */
1455 result += GC_page_size - 1;
1456 result = (ptr_t)((word)result & ~(GC_page_size - 1));
1457 if (result + GC_page_size > start + bytes) return 0;
1458 return result;
1461 /* Compute end address for an unmap operation on the indicated */
1462 /* block. */
1463 ptr_t GC_unmap_end(ptr_t start, word bytes)
1465 ptr_t end_addr = start + bytes;
1466 end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1467 return end_addr;
1470 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
1471 /* memory using VirtualAlloc and VirtualFree. These functions */
1472 /* work on individual allocations of virtual memory, made */
1473 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
1474 /* The ranges we need to (de)commit may span several of these */
1475 /* allocations; therefore we use VirtualQuery to check */
1476 /* allocation lengths, and split up the range as necessary. */
1478 /* We assume that GC_remap is called on exactly the same range */
1479 /* as a previous call to GC_unmap. It is safe to consistently */
1480 /* round the endpoints in both places. */
1481 void GC_unmap(ptr_t start, word bytes)
1483 ptr_t start_addr = GC_unmap_start(start, bytes);
1484 ptr_t end_addr = GC_unmap_end(start, bytes);
1485 word len = end_addr - start_addr;
1486 if (0 == start_addr) return;
1487 # if defined(MSWIN32) || defined(MSWINCE)
1488 while (len != 0) {
1489 MEMORY_BASIC_INFORMATION mem_info;
1490 GC_word free_len;
1491 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1492 != sizeof(mem_info))
1493 ABORT("Weird VirtualQuery result");
1494 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1495 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1496 ABORT("VirtualFree failed");
1497 GC_unmapped_bytes += free_len;
1498 start_addr += free_len;
1499 len -= free_len;
1501 # else
1502 if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1503 GC_unmapped_bytes += len;
1504 # endif
1508 void GC_remap(ptr_t start, word bytes)
1510 static int zero_descr = -1;
1511 ptr_t start_addr = GC_unmap_start(start, bytes);
1512 ptr_t end_addr = GC_unmap_end(start, bytes);
1513 word len = end_addr - start_addr;
1514 ptr_t result;
1516 # if defined(MSWIN32) || defined(MSWINCE)
1517 if (0 == start_addr) return;
1518 while (len != 0) {
1519 MEMORY_BASIC_INFORMATION mem_info;
1520 GC_word alloc_len;
1521 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1522 != sizeof(mem_info))
1523 ABORT("Weird VirtualQuery result");
1524 alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1525 result = VirtualAlloc(start_addr, alloc_len,
1526 MEM_COMMIT,
1527 PAGE_EXECUTE_READWRITE);
1528 if (result != start_addr) {
1529 ABORT("VirtualAlloc remapping failed");
1531 GC_unmapped_bytes -= alloc_len;
1532 start_addr += alloc_len;
1533 len -= alloc_len;
1535 # else
1536 if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1537 if (0 == start_addr) return;
1538 result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1539 MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1540 if (result != start_addr) {
1541 ABORT("mmap remapping failed");
1543 GC_unmapped_bytes -= len;
1544 # endif
1547 /* Two adjacent blocks have already been unmapped and are about to */
1548 /* be merged. Unmap the whole block. This typically requires */
1549 /* that we unmap a small section in the middle that was not previously */
1550 /* unmapped due to alignment constraints. */
1551 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1553 ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1554 ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1555 ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1556 ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1557 ptr_t start_addr = end1_addr;
1558 ptr_t end_addr = start2_addr;
1559 word len;
1560 GC_ASSERT(start1 + bytes1 == start2);
1561 if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1562 if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1563 if (0 == start_addr) return;
1564 len = end_addr - start_addr;
1565 # if defined(MSWIN32) || defined(MSWINCE)
1566 while (len != 0) {
1567 MEMORY_BASIC_INFORMATION mem_info;
1568 GC_word free_len;
1569 if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1570 != sizeof(mem_info))
1571 ABORT("Weird VirtualQuery result");
1572 free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1573 if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1574 ABORT("VirtualFree failed");
1575 GC_unmapped_bytes += free_len;
1576 start_addr += free_len;
1577 len -= free_len;
1579 # else
1580 if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1581 GC_unmapped_bytes += len;
1582 # endif
1585 #endif /* USE_MUNMAP */
1587 /* Routine for pushing any additional roots. In THREADS */
1588 /* environment, this is also responsible for marking from */
1589 /* thread stacks. */
1590 #ifndef THREADS
1591 void (*GC_push_other_roots)() = 0;
1592 #else /* THREADS */
1594 # ifdef PCR
1595 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1597 struct PCR_ThCtl_TInfoRep info;
1598 PCR_ERes result;
1600 info.ti_stkLow = info.ti_stkHi = 0;
1601 result = PCR_ThCtl_GetInfo(t, &info);
1602 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1603 return(result);
1606 /* Push the contents of an old object. We treat this as stack */
1607 /* data only becasue that makes it robust against mark stack */
1608 /* overflow. */
1609 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1611 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1612 return(PCR_ERes_okay);
1616 void GC_default_push_other_roots GC_PROTO((void))
1618 /* Traverse data allocated by previous memory managers. */
1620 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1622 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1623 GC_push_old_obj, 0)
1624 != PCR_ERes_okay) {
1625 ABORT("Old object enumeration failed");
1628 /* Traverse all thread stacks. */
1629 if (PCR_ERes_IsErr(
1630 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1631 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1632 ABORT("Thread stack marking failed\n");
1636 # endif /* PCR */
1638 # ifdef SRC_M3
1640 # ifdef ALL_INTERIOR_POINTERS
1641 --> misconfigured
1642 # endif
1644 void GC_push_thread_structures GC_PROTO((void))
1646 /* Not our responsibibility. */
1649 extern void ThreadF__ProcessStacks();
1651 void GC_push_thread_stack(start, stop)
1652 word start, stop;
1654 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1657 /* Push routine with M3 specific calling convention. */
1658 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1659 word *p;
1660 ptr_t dummy1, dummy2;
1661 int dummy3;
1663 word q = *p;
1665 GC_PUSH_ONE_STACK(q, p);
1668 /* M3 set equivalent to RTHeap.TracedRefTypes */
1669 typedef struct { int elts[1]; } RefTypeSet;
1670 RefTypeSet GC_TracedRefTypes = {{0x1}};
1672 void GC_default_push_other_roots GC_PROTO((void))
1674 /* Use the M3 provided routine for finding static roots. */
1675 /* This is a bit dubious, since it presumes no C roots. */
1676 /* We handle the collector roots explicitly in GC_push_roots */
1677 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1678 if (GC_words_allocd > 0) {
1679 ThreadF__ProcessStacks(GC_push_thread_stack);
1681 /* Otherwise this isn't absolutely necessary, and we have */
1682 /* startup ordering problems. */
1685 # endif /* SRC_M3 */
1687 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
1688 defined(GC_WIN32_THREADS)
1690 extern void GC_push_all_stacks();
1692 void GC_default_push_other_roots GC_PROTO((void))
1694 GC_push_all_stacks();
1697 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1699 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
1701 #endif /* THREADS */
1704 * Routines for accessing dirty bits on virtual pages.
1705 * We plan to eventually implement four strategies for doing so:
1706 * DEFAULT_VDB: A simple dummy implementation that treats every page
1707 * as possibly dirty. This makes incremental collection
1708 * useless, but the implementation is still correct.
1709 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1710 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1711 * works under some SVR4 variants. Even then, it may be
1712 * too slow to be entirely satisfactory. Requires reading
1713 * dirty bits for entire address space. Implementations tend
1714 * to assume that the client is a (slow) debugger.
1715 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1716 * dirtied pages. The implementation (and implementability)
1717 * is highly system dependent. This usually fails when system
1718 * calls write to a protected page. We prevent the read system
1719 * call from doing so. It is the clients responsibility to
1720 * make sure that other system calls are similarly protected
1721 * or write only to the stack.
1724 GC_bool GC_dirty_maintained = FALSE;
1726 # ifdef DEFAULT_VDB
1728 /* All of the following assume the allocation lock is held, and */
1729 /* signals are disabled. */
1731 /* The client asserts that unallocated pages in the heap are never */
1732 /* written. */
1734 /* Initialize virtual dirty bit implementation. */
1735 void GC_dirty_init()
1737 GC_dirty_maintained = TRUE;
1740 /* Retrieve system dirty bits for heap to a local buffer. */
1741 /* Restore the systems notion of which pages are dirty. */
1742 void GC_read_dirty()
1745 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1746 /* If the actual page size is different, this returns TRUE if any */
1747 /* of the pages overlapping h are dirty. This routine may err on the */
1748 /* side of labelling pages as dirty (and this implementation does). */
1749 /*ARGSUSED*/
1750 GC_bool GC_page_was_dirty(h)
1751 struct hblk *h;
1753 return(TRUE);
1757 * The following two routines are typically less crucial. They matter
1758 * most with large dynamic libraries, or if we can't accurately identify
1759 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1760 * versions are adequate.
1763 /* Could any valid GC heap pointer ever have been written to this page? */
1764 /*ARGSUSED*/
1765 GC_bool GC_page_was_ever_dirty(h)
1766 struct hblk *h;
1768 return(TRUE);
1771 /* Reset the n pages starting at h to "was never dirty" status. */
1772 void GC_is_fresh(h, n)
1773 struct hblk *h;
1774 word n;
1778 /* A call hints that h is about to be written. */
1779 /* May speed up some dirty bit implementations. */
1780 /*ARGSUSED*/
1781 void GC_write_hint(h)
1782 struct hblk *h;
1786 # endif /* DEFAULT_VDB */
1789 # ifdef MPROTECT_VDB
1792 * See DEFAULT_VDB for interface descriptions.
1796 * This implementation maintains dirty bits itself by catching write
1797 * faults and keeping track of them. We assume nobody else catches
1798 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1799 * except as a result of a read system call. This means clients must
1800 * either ensure that system calls do not touch the heap, or must
1801 * provide their own wrappers analogous to the one for read.
1802 * We assume the page size is a multiple of HBLKSIZE.
1803 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1804 * tried to use portable code where easily possible. It is known
1805 * not to work under a number of other systems.
1808 # if !defined(MSWIN32) && !defined(MSWINCE)
1810 # include <sys/mman.h>
1811 # include <signal.h>
1812 # include <sys/syscall.h>
1814 # define PROTECT(addr, len) \
1815 if (mprotect((caddr_t)(addr), (size_t)(len), \
1816 PROT_READ | OPT_PROT_EXEC) < 0) { \
1817 ABORT("mprotect failed"); \
1819 # define UNPROTECT(addr, len) \
1820 if (mprotect((caddr_t)(addr), (size_t)(len), \
1821 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1822 ABORT("un-mprotect failed"); \
1825 # else
1827 # ifndef MSWINCE
1828 # include <signal.h>
1829 # endif
1831 static DWORD protect_junk;
1832 # define PROTECT(addr, len) \
1833 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1834 &protect_junk)) { \
1835 DWORD last_error = GetLastError(); \
1836 GC_printf1("Last error code: %lx\n", last_error); \
1837 ABORT("VirtualProtect failed"); \
1839 # define UNPROTECT(addr, len) \
1840 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1841 &protect_junk)) { \
1842 ABORT("un-VirtualProtect failed"); \
1845 # endif
1847 #if defined(SUNOS4) || defined(FREEBSD)
1848 typedef void (* SIG_PF)();
1849 #endif
1850 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
1851 || defined(MACOSX) || defined(HURD)
1852 # ifdef __STDC__
1853 typedef void (* SIG_PF)(int);
1854 # else
1855 typedef void (* SIG_PF)();
1856 # endif
1857 #endif
1858 #if defined(MSWIN32)
1859 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1860 # undef SIG_DFL
1861 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1862 #endif
1863 #if defined(MSWINCE)
1864 typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
1865 # undef SIG_DFL
1866 # define SIG_DFL (SIG_PF) (-1)
1867 #endif
1869 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
1870 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1871 #endif
1872 #if defined(SUNOS5SIGS)
1873 # ifdef HPUX
1874 # define SIGINFO __siginfo
1875 # else
1876 # define SIGINFO siginfo
1877 # endif
1878 # ifdef __STDC__
1879 typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
1880 # else
1881 typedef void (* REAL_SIG_PF)();
1882 # endif
1883 #endif
1884 #if defined(LINUX)
1885 # include <linux/version.h>
1886 # if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(IA64)
1887 typedef struct sigcontext s_c;
1888 # else
1889 typedef struct sigcontext_struct s_c;
1890 # endif
1891 # if defined(ALPHA) || defined(M68K)
1892 typedef void (* REAL_SIG_PF)(int, int, s_c *);
1893 # else
1894 # if defined(IA64) || defined(HP_PA)
1895 typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
1896 # else
1897 typedef void (* REAL_SIG_PF)(int, s_c);
1898 # endif
1899 # endif
1900 # ifdef ALPHA
1901 /* Retrieve fault address from sigcontext structure by decoding */
1902 /* instruction. */
1903 char * get_fault_addr(s_c *sc) {
1904 unsigned instr;
1905 word faultaddr;
1907 instr = *((unsigned *)(sc->sc_pc));
1908 faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
1909 faultaddr += (word) (((int)instr << 16) >> 16);
1910 return (char *)faultaddr;
1912 # endif /* !ALPHA */
1913 # endif
1915 # if defined(MACOSX) /* Should also test for PowerPC? */
1916 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1918 /* Decodes the machine instruction which was responsible for the sending of the
1919 SIGBUS signal. Sadly this is the only way to find the faulting address because
1920 the signal handler doesn't get it directly from the kernel (although it is
1921 available on the Mach level, but droppped by the BSD personality before it
1922 calls our signal handler...)
1923 This code should be able to deal correctly with all PPCs starting from the
1924 601 up to and including the G4s (including Velocity Engine). */
1925 #define EXTRACT_OP1(iw) (((iw) & 0xFC000000) >> 26)
1926 #define EXTRACT_OP2(iw) (((iw) & 0x000007FE) >> 1)
1927 #define EXTRACT_REGA(iw) (((iw) & 0x001F0000) >> 16)
1928 #define EXTRACT_REGB(iw) (((iw) & 0x03E00000) >> 21)
1929 #define EXTRACT_REGC(iw) (((iw) & 0x0000F800) >> 11)
1930 #define EXTRACT_DISP(iw) ((short *) &(iw))[1]
1932 static char *get_fault_addr(struct sigcontext *scp)
1934 unsigned int instr = *((unsigned int *) scp->sc_ir);
1935 unsigned int * regs = &((unsigned int *) scp->sc_regs)[2];
1936 int disp = 0, tmp;
1937 unsigned int baseA = 0, baseB = 0;
1938 unsigned int addr, alignmask = 0xFFFFFFFF;
1940 #ifdef GC_DEBUG_DECODER
1941 GC_err_printf1("Instruction: 0x%lx\n", instr);
1942 GC_err_printf1("Opcode 1: d\n", (int)EXTRACT_OP1(instr));
1943 #endif
1944 switch(EXTRACT_OP1(instr)) {
1945 case 38: /* stb */
1946 case 39: /* stbu */
1947 case 54: /* stfd */
1948 case 55: /* stfdu */
1949 case 52: /* stfs */
1950 case 53: /* stfsu */
1951 case 44: /* sth */
1952 case 45: /* sthu */
1953 case 47: /* stmw */
1954 case 36: /* stw */
1955 case 37: /* stwu */
1956 tmp = EXTRACT_REGA(instr);
1957 if(tmp > 0)
1958 baseA = regs[tmp];
1959 disp = EXTRACT_DISP(instr);
1960 break;
1961 case 31:
1962 #ifdef GC_DEBUG_DECODER
1963 GC_err_printf1("Opcode 2: %d\n", (int)EXTRACT_OP2(instr));
1964 #endif
1965 switch(EXTRACT_OP2(instr)) {
1966 case 86: /* dcbf */
1967 case 54: /* dcbst */
1968 case 1014: /* dcbz */
1969 case 247: /* stbux */
1970 case 215: /* stbx */
1971 case 759: /* stfdux */
1972 case 727: /* stfdx */
1973 case 983: /* stfiwx */
1974 case 695: /* stfsux */
1975 case 663: /* stfsx */
1976 case 918: /* sthbrx */
1977 case 439: /* sthux */
1978 case 407: /* sthx */
1979 case 661: /* stswx */
1980 case 662: /* stwbrx */
1981 case 150: /* stwcx. */
1982 case 183: /* stwux */
1983 case 151: /* stwx */
1984 case 135: /* stvebx */
1985 case 167: /* stvehx */
1986 case 199: /* stvewx */
1987 case 231: /* stvx */
1988 case 487: /* stvxl */
1989 tmp = EXTRACT_REGA(instr);
1990 if(tmp > 0)
1991 baseA = regs[tmp];
1992 baseB = regs[EXTRACT_REGC(instr)];
1993 /* determine Altivec alignment mask */
1994 switch(EXTRACT_OP2(instr)) {
1995 case 167: /* stvehx */
1996 alignmask = 0xFFFFFFFE;
1997 break;
1998 case 199: /* stvewx */
1999 alignmask = 0xFFFFFFFC;
2000 break;
2001 case 231: /* stvx */
2002 alignmask = 0xFFFFFFF0;
2003 break;
2004 case 487: /* stvxl */
2005 alignmask = 0xFFFFFFF0;
2006 break;
2008 break;
2009 case 725: /* stswi */
2010 tmp = EXTRACT_REGA(instr);
2011 if(tmp > 0)
2012 baseA = regs[tmp];
2013 break;
2014 default: /* ignore instruction */
2015 #ifdef GC_DEBUG_DECODER
2016 GC_err_printf("Ignored by inner handler\n");
2017 #endif
2018 return NULL;
2019 break;
2021 break;
2022 default: /* ignore instruction */
2023 #ifdef GC_DEBUG_DECODER
2024 GC_err_printf("Ignored by main handler\n");
2025 #endif
2026 return NULL;
2027 break;
2030 addr = (baseA + baseB) + disp;
2031 addr &= alignmask;
2032 #ifdef GC_DEBUG_DECODER
2033 GC_err_printf1("BaseA: %d\n", baseA);
2034 GC_err_printf1("BaseB: %d\n", baseB);
2035 GC_err_printf1("Disp: %d\n", disp);
2036 GC_err_printf1("Address: %d\n", addr);
2037 #endif
2038 return (char *)addr;
2040 #endif /* MACOSX */
2042 SIG_PF GC_old_bus_handler;
2043 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
2045 #ifdef THREADS
2046 /* We need to lock around the bitmap update in the write fault handler */
2047 /* in order to avoid the risk of losing a bit. We do this with a */
2048 /* test-and-set spin lock if we know how to do that. Otherwise we */
2049 /* check whether we are already in the handler and use the dumb but */
2050 /* safe fallback algorithm of setting all bits in the word. */
2051 /* Contention should be very rare, so we do the minimum to handle it */
2052 /* correctly. */
2053 #ifdef GC_TEST_AND_SET_DEFINED
2054 static VOLATILE unsigned int fault_handler_lock = 0;
2055 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2056 while (GC_test_and_set(&fault_handler_lock)) {}
2057 /* Could also revert to set_pht_entry_from_index_safe if initial */
2058 /* GC_test_and_set fails. */
2059 set_pht_entry_from_index(db, index);
2060 GC_clear(&fault_handler_lock);
2062 #else /* !GC_TEST_AND_SET_DEFINED */
2063 /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
2064 /* just before we notice the conflict and correct it. We may end up */
2065 /* looking at it while it's wrong. But this requires contention */
2066 /* exactly when a GC is triggered, which seems far less likely to */
2067 /* fail than the old code, which had no reported failures. Thus we */
2068 /* leave it this way while we think of something better, or support */
2069 /* GC_test_and_set on the remaining platforms. */
2070 static VOLATILE word currently_updating = 0;
2071 void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2072 unsigned int update_dummy;
2073 currently_updating = (word)(&update_dummy);
2074 set_pht_entry_from_index(db, index);
2075 /* If we get contention in the 10 or so instruction window here, */
2076 /* and we get stopped by a GC between the two updates, we lose! */
2077 if (currently_updating != (word)(&update_dummy)) {
2078 set_pht_entry_from_index_safe(db, index);
2079 /* We claim that if two threads concurrently try to update the */
2080 /* dirty bit vector, the first one to execute UPDATE_START */
2081 /* will see it changed when UPDATE_END is executed. (Note that */
2082 /* &update_dummy must differ in two distinct threads.) It */
2083 /* will then execute set_pht_entry_from_index_safe, thus */
2084 /* returning us to a safe state, though not soon enough. */
2087 #endif /* !GC_TEST_AND_SET_DEFINED */
2088 #else /* !THREADS */
2089 # define async_set_pht_entry_from_index(db, index) \
2090 set_pht_entry_from_index(db, index)
2091 #endif /* !THREADS */
2093 /*ARGSUSED*/
2094 # if defined (SUNOS4) || defined(FREEBSD)
2095 void GC_write_fault_handler(sig, code, scp, addr)
2096 int sig, code;
2097 struct sigcontext *scp;
2098 char * addr;
2099 # ifdef SUNOS4
2100 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2101 # define CODE_OK (FC_CODE(code) == FC_PROT \
2102 || (FC_CODE(code) == FC_OBJERR \
2103 && FC_ERRNO(code) == FC_PROT))
2104 # endif
2105 # ifdef FREEBSD
2106 # define SIG_OK (sig == SIGBUS)
2107 # define CODE_OK (code == BUS_PAGE_FAULT)
2108 # endif
2109 # endif
2110 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2111 # include <errno.h>
2112 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2113 # ifdef OSF1
2114 # define SIG_OK (sig == SIGSEGV)
2115 # define CODE_OK (code == 2 /* experimentally determined */)
2116 # endif
2117 # ifdef IRIX5
2118 # define SIG_OK (sig == SIGSEGV)
2119 # define CODE_OK (code == EACCES)
2120 # endif
2121 # ifdef HURD
2122 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2123 # define CODE_OK TRUE
2124 # endif
2125 # endif
2126 # if defined(LINUX)
2127 # if defined(ALPHA) || defined(M68K)
2128 void GC_write_fault_handler(int sig, int code, s_c * sc)
2129 # else
2130 # if defined(IA64) || defined(HP_PA)
2131 void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2132 # else
2133 void GC_write_fault_handler(int sig, s_c sc)
2134 # endif
2135 # endif
2136 # define SIG_OK (sig == SIGSEGV)
2137 # define CODE_OK TRUE
2138 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2139 /* Should probably consider alignment issues on other */
2140 /* architectures. */
2141 # endif
2142 # if defined(SUNOS5SIGS)
2143 # ifdef __STDC__
2144 void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
2145 # else
2146 void GC_write_fault_handler(sig, scp, context)
2147 int sig;
2148 struct SIGINFO *scp;
2149 void * context;
2150 # endif
2151 # ifdef HPUX
2152 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2153 # define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2154 || (scp -> si_code == BUS_ADRERR) \
2155 || (scp -> si_code == BUS_UNKNOWN) \
2156 || (scp -> si_code == SEGV_UNKNOWN) \
2157 || (scp -> si_code == BUS_OBJERR)
2158 # else
2159 # define SIG_OK (sig == SIGSEGV)
2160 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
2161 # endif
2162 # endif
2164 # if defined(MACOSX)
2165 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2166 # define SIG_OK (sig == SIGBUS)
2167 # define CODE_OK (code == 0 /* experimentally determined */)
2168 # endif
2170 # if defined(MSWIN32) || defined(MSWINCE)
2171 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2172 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2173 STATUS_ACCESS_VIOLATION)
2174 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2175 /* Write fault */
2176 # endif
2178 register unsigned i;
2179 # if defined(HURD)
2180 char *addr = (char *) code;
2181 # endif
2182 # ifdef IRIX5
2183 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2184 # endif
2185 # if defined(OSF1) && defined(ALPHA)
2186 char * addr = (char *) (scp -> sc_traparg_a0);
2187 # endif
2188 # ifdef SUNOS5SIGS
2189 char * addr = (char *) (scp -> si_addr);
2190 # endif
2191 # ifdef LINUX
2192 # ifdef I386
2193 char * addr = (char *) (sc.cr2);
2194 # else
2195 # if defined(M68K)
2196 char * addr = NULL;
2198 struct sigcontext *scp = (struct sigcontext *)(sc);
2200 int format = (scp->sc_formatvec >> 12) & 0xf;
2201 unsigned long *framedata = (unsigned long *)(scp + 1);
2202 unsigned long ea;
2204 if (format == 0xa || format == 0xb) {
2205 /* 68020/030 */
2206 ea = framedata[2];
2207 } else if (format == 7) {
2208 /* 68040 */
2209 ea = framedata[3];
2210 if (framedata[1] & 0x08000000) {
2211 /* correct addr on misaligned access */
2212 ea = (ea+4095)&(~4095);
2214 } else if (format == 4) {
2215 /* 68060 */
2216 ea = framedata[0];
2217 if (framedata[1] & 0x08000000) {
2218 /* correct addr on misaligned access */
2219 ea = (ea+4095)&(~4095);
2222 addr = (char *)ea;
2223 # else
2224 # ifdef ALPHA
2225 char * addr = get_fault_addr(sc);
2226 # else
2227 # if defined(IA64) || defined(HP_PA)
2228 char * addr = si -> si_addr;
2229 /* I believe this is claimed to work on all platforms for */
2230 /* Linux 2.3.47 and later. Hopefully we don't have to */
2231 /* worry about earlier kernels on IA64. */
2232 # else
2233 # if defined(POWERPC)
2234 char * addr = (char *) (sc.regs->dar);
2235 # else
2236 --> architecture not supported
2237 # endif
2238 # endif
2239 # endif
2240 # endif
2241 # endif
2242 # endif
2243 # if defined(MACOSX)
2244 char * addr = get_fault_addr(scp);
2245 # endif
2246 # if defined(MSWIN32) || defined(MSWINCE)
2247 char * addr = (char *) (exc_info -> ExceptionRecord
2248 -> ExceptionInformation[1]);
2249 # define sig SIGSEGV
2250 # endif
2252 if (SIG_OK && CODE_OK) {
2253 register struct hblk * h =
2254 (struct hblk *)((word)addr & ~(GC_page_size-1));
2255 GC_bool in_allocd_block;
2257 # ifdef SUNOS5SIGS
2258 /* Address is only within the correct physical page. */
2259 in_allocd_block = FALSE;
2260 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2261 if (HDR(h+i) != 0) {
2262 in_allocd_block = TRUE;
2265 # else
2266 in_allocd_block = (HDR(addr) != 0);
2267 # endif
2268 if (!in_allocd_block) {
2269 /* Heap blocks now begin and end on page boundaries */
2270 SIG_PF old_handler;
2272 if (sig == SIGSEGV) {
2273 old_handler = GC_old_segv_handler;
2274 } else {
2275 old_handler = GC_old_bus_handler;
2277 if (old_handler == SIG_DFL) {
2278 # if !defined(MSWIN32) && !defined(MSWINCE)
2279 GC_err_printf1("Segfault at 0x%lx\n", addr);
2280 ABORT("Unexpected bus error or segmentation fault");
2281 # else
2282 return(EXCEPTION_CONTINUE_SEARCH);
2283 # endif
2284 } else {
2285 # if defined (SUNOS4) || defined(FREEBSD)
2286 (*old_handler) (sig, code, scp, addr);
2287 return;
2288 # endif
2289 # if defined (SUNOS5SIGS)
2290 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2291 return;
2292 # endif
2293 # if defined (LINUX)
2294 # if defined(ALPHA) || defined(M68K)
2295 (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2296 # else
2297 # if defined(IA64) || defined(HP_PA)
2298 (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2299 # else
2300 (*(REAL_SIG_PF)old_handler) (sig, sc);
2301 # endif
2302 # endif
2303 return;
2304 # endif
2305 # if defined (IRIX5) || defined(OSF1) || defined(HURD)
2306 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2307 return;
2308 # endif
2309 # ifdef MACOSX
2310 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2311 # endif
2312 # ifdef MSWIN32
2313 return((*old_handler)(exc_info));
2314 # endif
2317 UNPROTECT(h, GC_page_size);
2318 /* We need to make sure that no collection occurs between */
2319 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2320 /* a write by a third thread might go unnoticed. Reversing */
2321 /* the order is just as bad, since we would end up unprotecting */
2322 /* a page in a GC cycle during which it's not marked. */
2323 /* Currently we do this by disabling the thread stopping */
2324 /* signals while this handler is running. An alternative might */
2325 /* be to record the fact that we're about to unprotect, or */
2326 /* have just unprotected a page in the GC's thread structure, */
2327 /* and then to have the thread stopping code set the dirty */
2328 /* flag, if necessary. */
2329 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2330 register int index = PHT_HASH(h+i);
2332 async_set_pht_entry_from_index(GC_dirty_pages, index);
2334 # if defined(OSF1)
2335 /* These reset the signal handler each time by default. */
2336 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2337 # endif
2338 /* The write may not take place before dirty bits are read. */
2339 /* But then we'll fault again ... */
2340 # if defined(MSWIN32) || defined(MSWINCE)
2341 return(EXCEPTION_CONTINUE_EXECUTION);
2342 # else
2343 return;
2344 # endif
2346 #if defined(MSWIN32) || defined(MSWINCE)
2347 return EXCEPTION_CONTINUE_SEARCH;
2348 #else
2349 GC_err_printf1("Segfault at 0x%lx\n", addr);
2350 ABORT("Unexpected bus error or segmentation fault");
2351 #endif
2355 * We hold the allocation lock. We expect block h to be written
2356 * shortly.
2358 void GC_write_hint(h)
2359 struct hblk *h;
2361 register struct hblk * h_trunc;
2362 register unsigned i;
2363 register GC_bool found_clean;
2365 if (!GC_dirty_maintained) return;
2366 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
2367 found_clean = FALSE;
2368 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2369 register int index = PHT_HASH(h_trunc+i);
2371 if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
2372 found_clean = TRUE;
2373 async_set_pht_entry_from_index(GC_dirty_pages, index);
2376 if (found_clean) {
2377 UNPROTECT(h_trunc, GC_page_size);
2381 void GC_dirty_init()
2383 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2384 defined(OSF1) || defined(HURD)
2385 struct sigaction act, oldact;
2386 /* We should probably specify SA_SIGINFO for Linux, and handle */
2387 /* the different architectures more uniformly. */
2388 # if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
2389 act.sa_flags = SA_RESTART;
2390 act.sa_handler = (SIG_PF)GC_write_fault_handler;
2391 # else
2392 act.sa_flags = SA_RESTART | SA_SIGINFO;
2393 act.sa_sigaction = GC_write_fault_handler;
2394 # endif
2395 (void)sigemptyset(&act.sa_mask);
2396 # ifdef SIG_SUSPEND
2397 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2398 /* handler. This effectively makes the handler atomic w.r.t. */
2399 /* stopping the world for GC. */
2400 (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
2401 # endif /* SIG_SUSPEND */
2402 # endif
2403 # if defined(MACOSX)
2404 struct sigaction act, oldact;
2406 act.sa_flags = SA_RESTART;
2407 act.sa_handler = GC_write_fault_handler;
2408 sigemptyset(&act.sa_mask);
2409 # endif
2410 # ifdef PRINTSTATS
2411 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2412 # endif
2413 GC_dirty_maintained = TRUE;
2414 if (GC_page_size % HBLKSIZE != 0) {
2415 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2416 ABORT("Page size not multiple of HBLKSIZE");
2418 # if defined(SUNOS4) || defined(FREEBSD)
2419 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2420 if (GC_old_bus_handler == SIG_IGN) {
2421 GC_err_printf0("Previously ignored bus error!?");
2422 GC_old_bus_handler = SIG_DFL;
2424 if (GC_old_bus_handler != SIG_DFL) {
2425 # ifdef PRINTSTATS
2426 GC_err_printf0("Replaced other SIGBUS handler\n");
2427 # endif
2429 # endif
2430 # if defined(SUNOS4)
2431 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2432 if (GC_old_segv_handler == SIG_IGN) {
2433 GC_err_printf0("Previously ignored segmentation violation!?");
2434 GC_old_segv_handler = SIG_DFL;
2436 if (GC_old_segv_handler != SIG_DFL) {
2437 # ifdef PRINTSTATS
2438 GC_err_printf0("Replaced other SIGSEGV handler\n");
2439 # endif
2441 # endif
2442 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
2443 || defined(OSF1) || defined(HURD)
2444 /* SUNOS5SIGS includes HPUX */
2445 # if defined(GC_IRIX_THREADS)
2446 sigaction(SIGSEGV, 0, &oldact);
2447 sigaction(SIGSEGV, &act, 0);
2448 # else
2449 sigaction(SIGSEGV, &act, &oldact);
2450 # endif
2451 # if defined(_sigargs) || defined(HURD)
2452 /* This is Irix 5.x, not 6.x. Irix 5.x does not have */
2453 /* sa_sigaction. */
2454 GC_old_segv_handler = oldact.sa_handler;
2455 # else /* Irix 6.x or SUNOS5SIGS or LINUX */
2456 if (oldact.sa_flags & SA_SIGINFO) {
2457 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2458 } else {
2459 GC_old_segv_handler = oldact.sa_handler;
2461 # endif
2462 if (GC_old_segv_handler == SIG_IGN) {
2463 GC_err_printf0("Previously ignored segmentation violation!?");
2464 GC_old_segv_handler = SIG_DFL;
2466 if (GC_old_segv_handler != SIG_DFL) {
2467 # ifdef PRINTSTATS
2468 GC_err_printf0("Replaced other SIGSEGV handler\n");
2469 # endif
2471 # endif
2472 # if defined(MACOSX) || defined(HPUX) || defined(LINUX) || defined(HURD)
2473 sigaction(SIGBUS, &act, &oldact);
2474 GC_old_bus_handler = oldact.sa_handler;
2475 if (GC_old_bus_handler == SIG_IGN) {
2476 GC_err_printf0("Previously ignored bus error!?");
2477 GC_old_bus_handler = SIG_DFL;
2479 if (GC_old_bus_handler != SIG_DFL) {
2480 # ifdef PRINTSTATS
2481 GC_err_printf0("Replaced other SIGBUS handler\n");
2482 # endif
2484 # endif /* MACOS || HPUX || LINUX */
2485 # if defined(MSWIN32)
2486 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2487 if (GC_old_segv_handler != NULL) {
2488 # ifdef PRINTSTATS
2489 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2490 # endif
2491 } else {
2492 GC_old_segv_handler = SIG_DFL;
2494 # endif
2499 void GC_protect_heap()
2501 ptr_t start;
2502 word len;
2503 unsigned i;
2505 for (i = 0; i < GC_n_heap_sects; i++) {
2506 start = GC_heap_sects[i].hs_start;
2507 len = GC_heap_sects[i].hs_bytes;
2508 PROTECT(start, len);
2512 /* We assume that either the world is stopped or its OK to lose dirty */
2513 /* bits while this is happenning (as in GC_enable_incremental). */
2514 void GC_read_dirty()
2516 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2517 (sizeof GC_dirty_pages));
2518 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2519 GC_protect_heap();
2522 GC_bool GC_page_was_dirty(h)
2523 struct hblk * h;
2525 register word index = PHT_HASH(h);
2527 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2531 * Acquiring the allocation lock here is dangerous, since this
2532 * can be called from within GC_call_with_alloc_lock, and the cord
2533 * package does so. On systems that allow nested lock acquisition, this
2534 * happens to work.
2535 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2538 static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
2540 void GC_begin_syscall()
2542 if (!I_HOLD_LOCK()) {
2543 LOCK();
2544 syscall_acquired_lock = TRUE;
2548 void GC_end_syscall()
2550 if (syscall_acquired_lock) {
2551 syscall_acquired_lock = FALSE;
2552 UNLOCK();
2556 void GC_unprotect_range(addr, len)
2557 ptr_t addr;
2558 word len;
2560 struct hblk * start_block;
2561 struct hblk * end_block;
2562 register struct hblk *h;
2563 ptr_t obj_start;
2565 if (!GC_incremental) return;
2566 obj_start = GC_base(addr);
2567 if (obj_start == 0) return;
2568 if (GC_base(addr + len - 1) != obj_start) {
2569 ABORT("GC_unprotect_range(range bigger than object)");
2571 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2572 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2573 end_block += GC_page_size/HBLKSIZE - 1;
2574 for (h = start_block; h <= end_block; h++) {
2575 register word index = PHT_HASH(h);
2577 async_set_pht_entry_from_index(GC_dirty_pages, index);
2579 UNPROTECT(start_block,
2580 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2583 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_LINUX_THREADS) \
2584 && !defined(GC_USE_LD_WRAP)
2585 /* Replacement for UNIX system call. */
2586 /* Other calls that write to the heap */
2587 /* should be handled similarly. */
2588 # if defined(__STDC__) && !defined(SUNOS4)
2589 # include <unistd.h>
2590 # include <sys/uio.h>
2591 ssize_t read(int fd, void *buf, size_t nbyte)
2592 # else
2593 # ifndef LINT
2594 int read(fd, buf, nbyte)
2595 # else
2596 int GC_read(fd, buf, nbyte)
2597 # endif
2598 int fd;
2599 char *buf;
2600 int nbyte;
2601 # endif
2603 int result;
2605 GC_begin_syscall();
2606 GC_unprotect_range(buf, (word)nbyte);
2607 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
2608 /* Indirect system call may not always be easily available. */
2609 /* We could call _read, but that would interfere with the */
2610 /* libpthread interception of read. */
2611 /* On Linux, we have to be careful with the linuxthreads */
2612 /* read interception. */
2614 struct iovec iov;
2616 iov.iov_base = buf;
2617 iov.iov_len = nbyte;
2618 result = readv(fd, &iov, 1);
2620 # else
2621 # if defined(HURD)
2622 result = __read(fd, buf, nbyte);
2623 # else
2624 /* The two zero args at the end of this list are because one
2625 IA-64 syscall() implementation actually requires six args
2626 to be passed, even though they aren't always used. */
2627 result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
2628 # endif /* !HURD */
2629 # endif
2630 GC_end_syscall();
2631 return(result);
2633 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
2635 #ifdef GC_USE_LD_WRAP
2636 /* We use the GNU ld call wrapping facility. */
2637 /* This requires that the linker be invoked with "--wrap read". */
2638 /* This can be done by passing -Wl,"--wrap read" to gcc. */
2639 /* I'm not sure that this actually wraps whatever version of read */
2640 /* is called by stdio. That code also mentions __read. */
2641 # include <unistd.h>
2642 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2644 int result;
2646 GC_begin_syscall();
2647 GC_unprotect_range(buf, (word)nbyte);
2648 result = __real_read(fd, buf, nbyte);
2649 GC_end_syscall();
2650 return(result);
2653 /* We should probably also do this for __read, or whatever stdio */
2654 /* actually calls. */
2655 #endif
2657 /*ARGSUSED*/
2658 GC_bool GC_page_was_ever_dirty(h)
2659 struct hblk *h;
2661 return(TRUE);
2664 /* Reset the n pages starting at h to "was never dirty" status. */
2665 /*ARGSUSED*/
2666 void GC_is_fresh(h, n)
2667 struct hblk *h;
2668 word n;
2672 # else /* !MPROTECT_VDB */
2674 # ifdef GC_USE_LD_WRAP
2675 ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2676 { return __real_read(fd, buf, nbyte); }
2677 # endif
2679 # endif /* MPROTECT_VDB */
2681 # ifdef PROC_VDB
2684 * See DEFAULT_VDB for interface descriptions.
2688 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2689 * from which we can read page modified bits. This facility is far from
2690 * optimal (e.g. we would like to get the info for only some of the
2691 * address space), but it avoids intercepting system calls.
2694 #include <errno.h>
2695 #include <sys/types.h>
2696 #include <sys/signal.h>
2697 #include <sys/fault.h>
2698 #include <sys/syscall.h>
2699 #include <sys/procfs.h>
2700 #include <sys/stat.h>
2702 #define INITIAL_BUF_SZ 4096
2703 word GC_proc_buf_size = INITIAL_BUF_SZ;
2704 char *GC_proc_buf;
2706 #ifdef GC_SOLARIS_THREADS
2707 /* We don't have exact sp values for threads. So we count on */
2708 /* occasionally declaring stack pages to be fresh. Thus we */
2709 /* need a real implementation of GC_is_fresh. We can't clear */
2710 /* entries in GC_written_pages, since that would declare all */
2711 /* pages with the given hash address to be fresh. */
2712 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
2713 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
2714 /* Collisions are dropped. */
2716 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2717 # define ADD_FRESH_PAGE(h) \
2718 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2719 # define PAGE_IS_FRESH(h) \
2720 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2721 #endif
2723 /* Add all pages in pht2 to pht1 */
2724 void GC_or_pages(pht1, pht2)
2725 page_hash_table pht1, pht2;
2727 register int i;
2729 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2732 int GC_proc_fd;
2734 void GC_dirty_init()
2736 int fd;
2737 char buf[30];
2739 GC_dirty_maintained = TRUE;
2740 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
2741 register int i;
2743 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
2744 # ifdef PRINTSTATS
2745 GC_printf1("Allocated words:%lu:all pages may have been written\n",
2746 (unsigned long)
2747 (GC_words_allocd + GC_words_allocd_before_gc));
2748 # endif
2750 sprintf(buf, "/proc/%d", getpid());
2751 fd = open(buf, O_RDONLY);
2752 if (fd < 0) {
2753 ABORT("/proc open failed");
2755 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
2756 close(fd);
2757 if (GC_proc_fd < 0) {
2758 ABORT("/proc ioctl failed");
2760 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
2761 # ifdef GC_SOLARIS_THREADS
2762 GC_fresh_pages = (struct hblk **)
2763 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
2764 if (GC_fresh_pages == 0) {
2765 GC_err_printf0("No space for fresh pages\n");
2766 EXIT();
2768 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
2769 # endif
2772 /* Ignore write hints. They don't help us here. */
2773 /*ARGSUSED*/
2774 void GC_write_hint(h)
2775 struct hblk *h;
2779 #ifdef GC_SOLARIS_THREADS
2780 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
2781 #else
2782 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
2783 #endif
2785 void GC_read_dirty()
2787 unsigned long ps, np;
2788 int nmaps;
2789 ptr_t vaddr;
2790 struct prasmap * map;
2791 char * bufp;
2792 ptr_t current_addr, limit;
2793 int i;
2794 int dummy;
2796 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
2798 bufp = GC_proc_buf;
2799 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2800 # ifdef PRINTSTATS
2801 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
2802 GC_proc_buf_size);
2803 # endif
2805 /* Retry with larger buffer. */
2806 word new_size = 2 * GC_proc_buf_size;
2807 char * new_buf = GC_scratch_alloc(new_size);
2809 if (new_buf != 0) {
2810 GC_proc_buf = bufp = new_buf;
2811 GC_proc_buf_size = new_size;
2813 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
2814 WARN("Insufficient space for /proc read\n", 0);
2815 /* Punt: */
2816 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
2817 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
2818 # ifdef GC_SOLARIS_THREADS
2819 BZERO(GC_fresh_pages,
2820 MAX_FRESH_PAGES * sizeof (struct hblk *));
2821 # endif
2822 return;
2826 /* Copy dirty bits into GC_grungy_pages */
2827 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
2828 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
2829 nmaps, PG_REFERENCED, PG_MODIFIED); */
2830 bufp = bufp + sizeof(struct prpageheader);
2831 for (i = 0; i < nmaps; i++) {
2832 map = (struct prasmap *)bufp;
2833 vaddr = (ptr_t)(map -> pr_vaddr);
2834 ps = map -> pr_pagesize;
2835 np = map -> pr_npage;
2836 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
2837 limit = vaddr + ps * np;
2838 bufp += sizeof (struct prasmap);
2839 for (current_addr = vaddr;
2840 current_addr < limit; current_addr += ps){
2841 if ((*bufp++) & PG_MODIFIED) {
2842 register struct hblk * h = (struct hblk *) current_addr;
2844 while ((ptr_t)h < current_addr + ps) {
2845 register word index = PHT_HASH(h);
2847 set_pht_entry_from_index(GC_grungy_pages, index);
2848 # ifdef GC_SOLARIS_THREADS
2850 register int slot = FRESH_PAGE_SLOT(h);
2852 if (GC_fresh_pages[slot] == h) {
2853 GC_fresh_pages[slot] = 0;
2856 # endif
2857 h++;
2861 bufp += sizeof(long) - 1;
2862 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
2864 /* Update GC_written_pages. */
2865 GC_or_pages(GC_written_pages, GC_grungy_pages);
2866 # ifdef GC_SOLARIS_THREADS
2867 /* Make sure that old stacks are considered completely clean */
2868 /* unless written again. */
2869 GC_old_stacks_are_fresh();
2870 # endif
2873 #undef READ
2875 GC_bool GC_page_was_dirty(h)
2876 struct hblk *h;
2878 register word index = PHT_HASH(h);
2879 register GC_bool result;
2881 result = get_pht_entry_from_index(GC_grungy_pages, index);
2882 # ifdef GC_SOLARIS_THREADS
2883 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2884 /* This happens only if page was declared fresh since */
2885 /* the read_dirty call, e.g. because it's in an unused */
2886 /* thread stack. It's OK to treat it as clean, in */
2887 /* that case. And it's consistent with */
2888 /* GC_page_was_ever_dirty. */
2889 # endif
2890 return(result);
2893 GC_bool GC_page_was_ever_dirty(h)
2894 struct hblk *h;
2896 register word index = PHT_HASH(h);
2897 register GC_bool result;
2899 result = get_pht_entry_from_index(GC_written_pages, index);
2900 # ifdef GC_SOLARIS_THREADS
2901 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2902 # endif
2903 return(result);
2906 /* Caller holds allocation lock. */
2907 void GC_is_fresh(h, n)
2908 struct hblk *h;
2909 word n;
2912 register word index;
2914 # ifdef GC_SOLARIS_THREADS
2915 register word i;
2917 if (GC_fresh_pages != 0) {
2918 for (i = 0; i < n; i++) {
2919 ADD_FRESH_PAGE(h + i);
2922 # endif
2925 # endif /* PROC_VDB */
2928 # ifdef PCR_VDB
2930 # include "vd/PCR_VD.h"
2932 # define NPAGES (32*1024) /* 128 MB */
2934 PCR_VD_DB GC_grungy_bits[NPAGES];
2936 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2937 /* HBLKSIZE aligned. */
2939 void GC_dirty_init()
2941 GC_dirty_maintained = TRUE;
2942 /* For the time being, we assume the heap generally grows up */
2943 GC_vd_base = GC_heap_sects[0].hs_start;
2944 if (GC_vd_base == 0) {
2945 ABORT("Bad initial heap segment");
2947 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2948 != PCR_ERes_okay) {
2949 ABORT("dirty bit initialization failed");
2953 void GC_read_dirty()
2955 /* lazily enable dirty bits on newly added heap sects */
2957 static int onhs = 0;
2958 int nhs = GC_n_heap_sects;
2959 for( ; onhs < nhs; onhs++ ) {
2960 PCR_VD_WriteProtectEnable(
2961 GC_heap_sects[onhs].hs_start,
2962 GC_heap_sects[onhs].hs_bytes );
2967 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2968 != PCR_ERes_okay) {
2969 ABORT("dirty bit read failed");
2973 GC_bool GC_page_was_dirty(h)
2974 struct hblk *h;
2976 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2977 return(TRUE);
2979 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2982 /*ARGSUSED*/
2983 void GC_write_hint(h)
2984 struct hblk *h;
2986 PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2987 PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2990 # endif /* PCR_VDB */
2993 * Call stack save code for debugging.
2994 * Should probably be in mach_dep.c, but that requires reorganization.
2997 /* I suspect the following works for most X86 *nix variants, so */
2998 /* long as the frame pointer is explicitly stored. In the case of gcc, */
2999 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
3000 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
3001 struct frame {
3002 struct frame *fr_savfp;
3003 long fr_savpc;
3004 long fr_arg[NARGS]; /* All the arguments go here. */
3006 #endif
3008 #if defined(SPARC)
3009 # if defined(LINUX)
3010 struct frame {
3011 long fr_local[8];
3012 long fr_arg[6];
3013 struct frame *fr_savfp;
3014 long fr_savpc;
3015 # ifndef __arch64__
3016 char *fr_stret;
3017 # endif
3018 long fr_argd[6];
3019 long fr_argx[0];
3021 # else
3022 # if defined(SUNOS4)
3023 # include <machine/frame.h>
3024 # else
3025 # if defined (DRSNX)
3026 # include <sys/sparc/frame.h>
3027 # else
3028 # if defined(OPENBSD) || defined(NETBSD)
3029 # include <frame.h>
3030 # else
3031 # include <sys/frame.h>
3032 # endif
3033 # endif
3034 # endif
3035 # endif
3036 # if NARGS > 6
3037 --> We only know how to to get the first 6 arguments
3038 # endif
3039 #endif /* SPARC */
3041 #ifdef SAVE_CALL_CHAIN
3042 /* Fill in the pc and argument information for up to NFRAMES of my */
3043 /* callers. Ignore my frame and my callers frame. */
3045 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
3046 # define FR_SAVFP fr_fp
3047 # define FR_SAVPC fr_pc
3048 #else
3049 # define FR_SAVFP fr_savfp
3050 # define FR_SAVPC fr_savpc
3051 #endif
3053 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3054 # define BIAS 2047
3055 #else
3056 # define BIAS 0
3057 #endif
3059 void GC_save_callers (info)
3060 struct callinfo info[NFRAMES];
3062 struct frame *frame;
3063 struct frame *fp;
3064 int nframes = 0;
3065 # ifdef I386
3066 /* We assume this is turned on only with gcc as the compiler. */
3067 asm("movl %%ebp,%0" : "=r"(frame));
3068 fp = frame;
3069 # else
3070 word GC_save_regs_in_stack();
3072 frame = (struct frame *) GC_save_regs_in_stack ();
3073 fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
3074 #endif
3076 for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
3077 && (nframes < NFRAMES));
3078 fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
3079 register int i;
3081 info[nframes].ci_pc = fp->FR_SAVPC;
3082 # if NARGS > 0
3083 for (i = 0; i < NARGS; i++) {
3084 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
3086 # endif /* NARGS > 0 */
3088 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
3091 #endif /* SAVE_CALL_CHAIN */
3093 #if defined(LINUX) && defined(__ELF__) && \
3094 (!defined(SMALL_CONFIG) || defined(USE_PROC_FOR_LIBRARIES))
3095 #ifdef GC_USE_LD_WRAP
3096 # define READ __real_read
3097 #else
3098 # define READ read
3099 #endif
3102 /* Repeatedly perform a read call until the buffer is filled or */
3103 /* we encounter EOF. */
3104 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
3106 ssize_t num_read = 0;
3107 ssize_t result;
3109 while (num_read < count) {
3110 result = READ(fd, buf + num_read, count - num_read);
3111 if (result < 0) return result;
3112 if (result == 0) break;
3113 num_read += result;
3115 return num_read;
3117 #endif /* LINUX && ... */
3120 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3122 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
3123 addresses in FIND_LEAK output. */
3125 void GC_print_address_map()
3127 int f;
3128 int result;
3129 char maps_temp[32768];
3130 GC_err_printf0("---------- Begin address map ----------\n");
3131 f = open("/proc/self/maps", O_RDONLY);
3132 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
3133 do {
3134 result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
3135 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
3136 GC_err_write(maps_temp, result);
3137 } while (result == sizeof(maps_temp));
3139 GC_err_printf0("---------- End address map ----------\n");
3142 #endif