* config/sparc/sparc.md (prefetch): New.
[official-gcc.git] / boehm-gc / dyn_load.c
blob7dfe66712302110c7aad2bfff1c800e7bbfc8fc6
1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #if defined(__linux__) && !defined(_GNU_SOURCE)
30 /* Can't test LINUX, since this must be define before other includes */
31 # define _GNU_SOURCE
32 #endif
33 #if !defined(MACOS) && !defined(_WIN32_WCE)
34 # include <sys/types.h>
35 #endif
36 #include "private/gc_priv.h"
38 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
39 # if (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) \
40 && defined(dlopen) && !defined(GC_USE_LD_WRAP)
41 /* To support threads in Solaris, gc.h interposes on dlopen by */
42 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
43 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
44 /* real system dlopen() in their implementation. We first remove */
45 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
46 # undef dlopen
47 # define GC_must_restore_redefined_dlopen
48 # else
49 # undef GC_must_restore_redefined_dlopen
50 # endif
52 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
53 && !defined(PCR)
54 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
55 !defined(MSWIN32) && !defined(MSWINCE) && \
56 !(defined(ALPHA) && defined(OSF1)) && \
57 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
58 !defined(RS6000) && !defined(SCO_ELF) && \
59 !(defined(FREEBSD) && defined(__ELF__)) && \
60 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD)
61 --> We only know how to find data segments of dynamic libraries for the
62 --> above. Additional SVR4 variants might not be too
63 --> hard to add.
64 #endif
66 #include <stdio.h>
67 #ifdef SUNOS5DL
68 # include <sys/elf.h>
69 # include <dlfcn.h>
70 # include <link.h>
71 #endif
72 #ifdef SUNOS4
73 # include <dlfcn.h>
74 # include <link.h>
75 # include <a.out.h>
76 /* struct link_map field overrides */
77 # define l_next lm_next
78 # define l_addr lm_addr
79 # define l_name lm_name
80 #endif
83 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
85 #ifdef LINT
86 Elf32_Dyn _DYNAMIC;
87 #endif
89 static struct link_map *
90 GC_FirstDLOpenedLinkMap()
92 extern Elf32_Dyn _DYNAMIC;
93 Elf32_Dyn *dp;
94 struct r_debug *r;
95 static struct link_map * cachedResult = 0;
96 static Elf32_Dyn *dynStructureAddr = 0;
97 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
99 # ifdef SUNOS53_SHARED_LIB
100 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
101 /* up properly in dynamically linked .so's. This means we have */
102 /* to use its value in the set of original object files loaded */
103 /* at program startup. */
104 if( dynStructureAddr == 0 ) {
105 void* startupSyms = dlopen(0, RTLD_LAZY);
106 dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
108 # else
109 dynStructureAddr = &_DYNAMIC;
110 # endif
112 if( dynStructureAddr == 0) {
113 return(0);
115 if( cachedResult == 0 ) {
116 int tag;
117 for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
118 if( tag == DT_DEBUG ) {
119 struct link_map *lm
120 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
121 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
122 break;
126 return cachedResult;
129 #endif /* SUNOS5DL ... */
131 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
132 # if defined(GC_must_restore_redefined_dlopen)
133 # define dlopen GC_dlopen
134 # endif
136 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
138 #ifdef LINT
139 struct link_dynamic _DYNAMIC;
140 #endif
142 static struct link_map *
143 GC_FirstDLOpenedLinkMap()
145 extern struct link_dynamic _DYNAMIC;
147 if( &_DYNAMIC == 0) {
148 return(0);
150 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
153 /* Return the address of the ld.so allocated common symbol */
154 /* with the least address, or 0 if none. */
155 static ptr_t GC_first_common()
157 ptr_t result = 0;
158 extern struct link_dynamic _DYNAMIC;
159 struct rtc_symb * curr_symbol;
161 if( &_DYNAMIC == 0) {
162 return(0);
164 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
165 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
166 if (result == 0
167 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
168 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
171 return(result);
174 #endif /* SUNOS4 ... */
176 # if defined(SUNOS4) || defined(SUNOS5DL)
177 /* Add dynamic library data sections to the root set. */
178 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
179 # ifndef SRC_M3
180 --> fix mutual exclusion with dlopen
181 # endif /* We assume M3 programs don't call dlopen for now */
182 # endif
184 # ifndef USE_PROC_FOR_LIBRARIES
185 void GC_register_dynamic_libraries()
187 struct link_map *lm = GC_FirstDLOpenedLinkMap();
190 for (lm = GC_FirstDLOpenedLinkMap();
191 lm != (struct link_map *) 0; lm = lm->l_next)
193 # ifdef SUNOS4
194 struct exec *e;
196 e = (struct exec *) lm->lm_addr;
197 GC_add_roots_inner(
198 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
199 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
200 TRUE);
201 # endif
202 # ifdef SUNOS5DL
203 Elf32_Ehdr * e;
204 Elf32_Phdr * p;
205 unsigned long offset;
206 char * start;
207 register int i;
209 e = (Elf32_Ehdr *) lm->l_addr;
210 p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
211 offset = ((unsigned long)(lm->l_addr));
212 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
213 switch( p->p_type ) {
214 case PT_LOAD:
216 if( !(p->p_flags & PF_W) ) break;
217 start = ((char *)(p->p_vaddr)) + offset;
218 GC_add_roots_inner(
219 start,
220 start + p->p_memsz,
221 TRUE
224 break;
225 default:
226 break;
229 # endif
231 # ifdef SUNOS4
233 static ptr_t common_start = 0;
234 ptr_t common_end;
235 extern ptr_t GC_find_limit();
237 if (common_start == 0) common_start = GC_first_common();
238 if (common_start != 0) {
239 common_end = GC_find_limit(common_start, TRUE);
240 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
243 # endif
246 # endif /* !USE_PROC ... */
247 # endif /* SUNOS */
249 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
250 (defined(FREEBSD) && defined(__ELF__)) || \
251 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
254 #ifdef USE_PROC_FOR_LIBRARIES
256 #include <string.h>
258 #include <sys/stat.h>
259 #include <fcntl.h>
260 #include <unistd.h>
262 #define MAPS_BUF_SIZE (32*1024)
264 extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
265 /* Repeatedly read until buffer is filled, or EOF is encountered */
266 /* Defined in os_dep.c. */
268 static char *parse_map_entry(char *buf_ptr, word *start, word *end,
269 char *prot_buf, unsigned int *maj_dev);
271 void GC_register_dynamic_libraries()
273 int f;
274 int result;
275 char prot_buf[5];
276 int maps_size;
277 char maps_temp[32768];
278 char *maps_buf;
279 char *buf_ptr;
280 int count;
281 word start, end;
282 unsigned int maj_dev, min_dev;
283 word least_ha, greatest_ha;
284 unsigned i;
285 word datastart = (word)(DATASTART);
287 /* Read /proc/self/maps */
288 /* Note that we may not allocate, and thus can't use stdio. */
289 f = open("/proc/self/maps", O_RDONLY);
290 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
291 /* stat() doesn't work for /proc/self/maps, so we have to
292 read it to find out how large it is... */
293 maps_size = 0;
294 do {
295 result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
296 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
297 maps_size += result;
298 } while (result == sizeof(maps_temp));
300 if (maps_size > sizeof(maps_temp)) {
301 /* If larger than our buffer, close and re-read it. */
302 close(f);
303 f = open("/proc/self/maps", O_RDONLY);
304 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
305 maps_buf = alloca(maps_size);
306 if (NULL == maps_buf) ABORT("/proc/self/maps alloca failed");
307 result = GC_repeat_read(f, maps_buf, maps_size);
308 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
309 } else {
310 /* Otherwise use the fixed size buffer */
311 maps_buf = maps_temp;
314 close(f);
315 maps_buf[result] = '\0';
316 buf_ptr = maps_buf;
317 /* Compute heap bounds. Should be done by add_to_heap? */
318 least_ha = (word)(-1);
319 greatest_ha = 0;
320 for (i = 0; i < GC_n_heap_sects; ++i) {
321 word sect_start = (word)GC_heap_sects[i].hs_start;
322 word sect_end = sect_start + GC_heap_sects[i].hs_bytes;
323 if (sect_start < least_ha) least_ha = sect_start;
324 if (sect_end > greatest_ha) greatest_ha = sect_end;
326 if (greatest_ha < (word)GC_scratch_last_end_ptr)
327 greatest_ha = (word)GC_scratch_last_end_ptr;
328 for (;;) {
330 buf_ptr = parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
331 if (buf_ptr == NULL) return;
333 if (prot_buf[1] == 'w') {
334 /* This is a writable mapping. Add it to */
335 /* the root set unless it is already otherwise */
336 /* accounted for. */
337 if (start <= (word)GC_stackbottom && end >= (word)GC_stackbottom) {
338 /* Stack mapping; discard */
339 continue;
341 if (start <= datastart && end > datastart && maj_dev != 0) {
342 /* Main data segment; discard */
343 continue;
345 # ifdef THREADS
346 if (GC_segment_is_thread_stack(start, end)) continue;
347 # endif
348 /* The rest of this assumes that there is no mapping */
349 /* spanning the beginning of the data segment, or extending */
350 /* beyond the entire heap at both ends. */
351 /* Empirically these assumptions hold. */
353 if (start < (word)DATAEND && end > (word)DATAEND) {
354 /* Rld may use space at the end of the main data */
355 /* segment. Thus we add that in. */
356 start = (word)DATAEND;
358 if (start < least_ha && end > least_ha) {
359 end = least_ha;
361 if (start < greatest_ha && end > greatest_ha) {
362 start = greatest_ha;
364 if (start >= least_ha && end <= greatest_ha) continue;
365 GC_add_roots_inner((char *)start, (char *)end, TRUE);
371 // parse_map_entry parses an entry from /proc/self/maps so we can
372 // locate all writable data segments that belong to shared libraries.
373 // The format of one of these entries and the fields we care about
374 // is as follows:
375 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
376 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
377 // start end prot maj_dev
378 // 0 9 18 32
380 // The parser is called with a pointer to the entry and the return value
381 // is either NULL or is advanced to the next entry(the byte after the
382 // trailing '\n'.)
384 #define OFFSET_MAP_START 0
385 #define OFFSET_MAP_END 9
386 #define OFFSET_MAP_PROT 18
387 #define OFFSET_MAP_MAJDEV 32
389 static char *parse_map_entry(char *buf_ptr, word *start, word *end,
390 char *prot_buf, unsigned int *maj_dev)
392 int i;
393 unsigned int val;
394 char *tok;
396 if (buf_ptr == NULL || *buf_ptr == '\0') {
397 return NULL;
400 memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4); // do the protections first
401 prot_buf[4] = '\0';
403 if (prot_buf[1] == 'w') { // we can skip all of this if it's not writable
405 tok = buf_ptr;
406 buf_ptr[OFFSET_MAP_START+8] = '\0';
407 *start = strtoul(tok, NULL, 16);
409 tok = buf_ptr+OFFSET_MAP_END;
410 buf_ptr[OFFSET_MAP_END+8] = '\0';
411 *end = strtoul(tok, NULL, 16);
413 buf_ptr += OFFSET_MAP_MAJDEV;
414 tok = buf_ptr;
415 while (*buf_ptr != ':') buf_ptr++;
416 *buf_ptr++ = '\0';
417 *maj_dev = strtoul(tok, NULL, 16);
420 while (*buf_ptr && *buf_ptr++ != '\n');
422 return buf_ptr;
425 #endif /* USE_PROC_FOR_LIBRARIES */
427 #if !defined(USE_PROC_FOR_LIBRARIES)
428 /* The following is the preferred way to walk dynamic libraries */
429 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
430 /* versions. Thanks to Jakub Jelinek for most of the code. */
432 #include <stddef.h>
433 #include <elf.h>
434 #include <link.h>
436 # if defined(LINUX) /* Are others OK here, too? */ \
437 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
438 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
440 /* We have the header files for a glibc that includes dl_iterate_phdr. */
441 /* It may still not be available in the library on the target system. */
442 /* Thus we also treat it as a weak symbol. */
443 #define HAVE_DL_ITERATE_PHDR
445 static int GC_register_dynlib_callback(info, size, ptr)
446 struct dl_phdr_info * info;
447 size_t size;
448 void * ptr;
450 const ElfW(Phdr) * p;
451 char * start;
452 register int i;
454 /* Make sure struct dl_phdr_info is at least as big as we need. */
455 if (size < offsetof (struct dl_phdr_info, dlpi_phnum)
456 + sizeof (info->dlpi_phnum))
457 return -1;
459 /* Skip the first object - it is the main program. */
460 if (*(int *)ptr == 0)
462 *(int *)ptr = 1;
463 return 0;
466 p = info->dlpi_phdr;
467 for( i = 0; i < (int)(info->dlpi_phnum); ((i++),(p++)) ) {
468 switch( p->p_type ) {
469 case PT_LOAD:
471 if( !(p->p_flags & PF_W) ) break;
472 start = ((char *)(p->p_vaddr)) + info->dlpi_addr;
473 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
475 break;
476 default:
477 break;
481 return 0;
484 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
486 #pragma weak dl_iterate_phdr
488 GC_bool GC_register_dynamic_libraries_dl_iterate_phdr()
490 int tmp = 0;
492 if (dl_iterate_phdr) {
493 dl_iterate_phdr(GC_register_dynlib_callback, &tmp);
494 return TRUE;
495 } else {
496 return FALSE;
500 # else /* !LINUX || version(glibc) < 2.2.4 */
502 /* Dynamic loading code for Linux running ELF. Somewhat tested on
503 * Linux/x86, untested but hopefully should work on Linux/Alpha.
504 * This code was derived from the Solaris/ELF support. Thanks to
505 * whatever kind soul wrote that. - Patrick Bridges */
507 /* This doesn't necessarily work in all cases, e.g. with preloaded
508 * dynamic libraries. */
510 #if defined(NETBSD)
511 # include <sys/exec_elf.h>
512 #else
513 # include <elf.h>
514 #endif
515 #include <link.h>
517 # endif
519 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
520 * define it for those older versions that don't. */
521 # ifndef ElfW
522 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
523 # define ElfW(type) Elf32_##type
524 # else
525 # define ElfW(type) Elf64_##type
526 # endif
527 # endif
529 static struct link_map *
530 GC_FirstDLOpenedLinkMap()
532 # ifdef __GNUC__
533 # pragma weak _DYNAMIC
534 # endif
535 extern ElfW(Dyn) _DYNAMIC[];
536 ElfW(Dyn) *dp;
537 struct r_debug *r;
538 static struct link_map *cachedResult = 0;
540 if( _DYNAMIC == 0) {
541 return(0);
543 if( cachedResult == 0 ) {
544 int tag;
545 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
546 if( tag == DT_DEBUG ) {
547 struct link_map *lm
548 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
549 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
550 break;
554 return cachedResult;
558 void GC_register_dynamic_libraries()
560 struct link_map *lm;
563 # ifdef HAVE_DL_ITERATE_PHDR
564 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
565 return;
567 # endif
568 lm = GC_FirstDLOpenedLinkMap();
569 for (lm = GC_FirstDLOpenedLinkMap();
570 lm != (struct link_map *) 0; lm = lm->l_next)
572 ElfW(Ehdr) * e;
573 ElfW(Phdr) * p;
574 unsigned long offset;
575 char * start;
576 register int i;
578 e = (ElfW(Ehdr) *) lm->l_addr;
579 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
580 offset = ((unsigned long)(lm->l_addr));
581 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
582 switch( p->p_type ) {
583 case PT_LOAD:
585 if( !(p->p_flags & PF_W) ) break;
586 start = ((char *)(p->p_vaddr)) + offset;
587 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
589 break;
590 default:
591 break;
597 #endif /* !USE_PROC_FOR_LIBRARIES */
599 #endif /* LINUX */
601 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
603 #include <sys/procfs.h>
604 #include <sys/stat.h>
605 #include <fcntl.h>
606 #include <elf.h>
607 #include <errno.h>
608 #include <signal.h> /* Only for the following test. */
609 #ifndef _sigargs
610 # define IRIX6
611 #endif
613 extern void * GC_roots_present();
614 /* The type is a lie, since the real type doesn't make sense here, */
615 /* and we only test for NULL. */
617 /* We use /proc to track down all parts of the address space that are */
618 /* mapped by the process, and throw out regions we know we shouldn't */
619 /* worry about. This may also work under other SVR4 variants. */
620 void GC_register_dynamic_libraries()
622 static int fd = -1;
623 char buf[30];
624 static prmap_t * addr_map = 0;
625 static int current_sz = 0; /* Number of records currently in addr_map */
626 static int needed_sz; /* Required size of addr_map */
627 register int i;
628 register long flags;
629 register ptr_t start;
630 register ptr_t limit;
631 ptr_t heap_start = (ptr_t)HEAP_START;
632 ptr_t heap_end = heap_start;
634 # ifdef SUNOS5DL
635 # define MA_PHYS 0
636 # endif /* SUNOS5DL */
638 if (fd < 0) {
639 sprintf(buf, "/proc/%d", getpid());
640 /* The above generates a lint complaint, since pid_t varies. */
641 /* It's unclear how to improve this. */
642 fd = open(buf, O_RDONLY);
643 if (fd < 0) {
644 ABORT("/proc open failed");
647 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
648 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
649 ABORT("/proc PIOCNMAP ioctl failed");
651 if (needed_sz >= current_sz) {
652 current_sz = needed_sz * 2 + 1;
653 /* Expansion, plus room for 0 record */
654 addr_map = (prmap_t *)GC_scratch_alloc((word)
655 (current_sz * sizeof(prmap_t)));
657 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
658 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
659 fd, errno, needed_sz, addr_map);
660 ABORT("/proc PIOCMAP ioctl failed");
662 if (GC_n_heap_sects > 0) {
663 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
664 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
665 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
667 for (i = 0; i < needed_sz; i++) {
668 flags = addr_map[i].pr_mflags;
669 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
670 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
671 goto irrelevant;
672 /* The latter test is empirically useless in very old Irix */
673 /* versions. Other than the */
674 /* main data and stack segments, everything appears to be */
675 /* mapped readable, writable, executable, and shared(!!). */
676 /* This makes no sense to me. - HB */
677 start = (ptr_t)(addr_map[i].pr_vaddr);
678 if (GC_roots_present(start)) goto irrelevant;
679 if (start < heap_end && start >= heap_start)
680 goto irrelevant;
681 # ifdef MMAP_STACKS
682 if (GC_is_thread_stack(start)) goto irrelevant;
683 # endif /* MMAP_STACKS */
685 limit = start + addr_map[i].pr_size;
686 /* The following seemed to be necessary for very old versions */
687 /* of Irix, but it has been reported to discard relevant */
688 /* segments under Irix 6.5. */
689 # ifndef IRIX6
690 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
691 /* Discard text segments, i.e. 0-offset mappings against */
692 /* executable files which appear to have ELF headers. */
693 caddr_t arg;
694 int obj;
695 # define MAP_IRR_SZ 10
696 static ptr_t map_irr[MAP_IRR_SZ];
697 /* Known irrelevant map entries */
698 static int n_irr = 0;
699 struct stat buf;
700 register int i;
702 for (i = 0; i < n_irr; i++) {
703 if (map_irr[i] == start) goto irrelevant;
705 arg = (caddr_t)start;
706 obj = ioctl(fd, PIOCOPENM, &arg);
707 if (obj >= 0) {
708 fstat(obj, &buf);
709 close(obj);
710 if ((buf.st_mode & 0111) != 0) {
711 if (n_irr < MAP_IRR_SZ) {
712 map_irr[n_irr++] = start;
714 goto irrelevant;
718 # endif /* !IRIX6 */
719 GC_add_roots_inner(start, limit, TRUE);
720 irrelevant: ;
722 /* Dont keep cached descriptor, for now. Some kernels don't like us */
723 /* to keep a /proc file descriptor around during kill -9. */
724 if (close(fd) < 0) ABORT("Couldnt close /proc file");
725 fd = -1;
728 # endif /* USE_PROC || IRIX5 */
730 # if defined(MSWIN32) || defined(MSWINCE)
732 # define WIN32_LEAN_AND_MEAN
733 # define NOSERVICE
734 # include <windows.h>
735 # include <stdlib.h>
737 /* We traverse the entire address space and register all segments */
738 /* that could possibly have been written to. */
740 extern GC_bool GC_is_heap_base (ptr_t p);
742 # ifdef GC_WIN32_THREADS
743 extern void GC_get_next_stack(char *start, char **lo, char **hi);
744 void GC_cond_add_roots(char *base, char * limit)
746 char * curr_base = base;
747 char * next_stack_lo;
748 char * next_stack_hi;
750 if (base == limit) return;
751 for(;;) {
752 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
753 if (next_stack_lo >= limit) break;
754 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
755 curr_base = next_stack_hi;
757 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
759 # else
760 void GC_cond_add_roots(char *base, char * limit)
762 char dummy;
763 char * stack_top
764 = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
765 if (base == limit) return;
766 if (limit > stack_top && base < GC_stackbottom) {
767 /* Part of the stack; ignore it. */
768 return;
770 GC_add_roots_inner(base, limit, TRUE);
772 # endif
774 # ifndef MSWINCE
775 extern GC_bool GC_win32s;
776 # endif
778 void GC_register_dynamic_libraries()
780 MEMORY_BASIC_INFORMATION buf;
781 DWORD result;
782 DWORD protect;
783 LPVOID p;
784 char * base;
785 char * limit, * new_limit;
787 # ifdef MSWIN32
788 if (GC_win32s) return;
789 # endif
790 base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
791 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
792 /* Only the first 32 MB of address space belongs to the current process */
793 while (p < (LPVOID)0x02000000) {
794 result = VirtualQuery(p, &buf, sizeof(buf));
795 if (result == 0) {
796 /* Page is free; advance to the next possible allocation base */
797 new_limit = (char *)
798 (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
799 & ~(GC_sysinfo.dwAllocationGranularity-1));
800 } else
801 # else
802 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
803 result = VirtualQuery(p, &buf, sizeof(buf));
804 # endif
806 if (result != sizeof(buf)) {
807 ABORT("Weird VirtualQuery result");
809 new_limit = (char *)p + buf.RegionSize;
810 protect = buf.Protect;
811 if (buf.State == MEM_COMMIT
812 && (protect == PAGE_EXECUTE_READWRITE
813 || protect == PAGE_READWRITE)
814 && !GC_is_heap_base(buf.AllocationBase)) {
815 if ((char *)p != limit) {
816 GC_cond_add_roots(base, limit);
817 base = p;
819 limit = new_limit;
822 if (p > (LPVOID)new_limit /* overflow */) break;
823 p = (LPVOID)new_limit;
825 GC_cond_add_roots(base, limit);
828 #endif /* MSWIN32 || MSWINCE */
830 #if defined(ALPHA) && defined(OSF1)
832 #include <loader.h>
834 void GC_register_dynamic_libraries()
836 int status;
837 ldr_process_t mypid;
839 /* module */
840 ldr_module_t moduleid = LDR_NULL_MODULE;
841 ldr_module_info_t moduleinfo;
842 size_t moduleinfosize = sizeof(moduleinfo);
843 size_t modulereturnsize;
845 /* region */
846 ldr_region_t region;
847 ldr_region_info_t regioninfo;
848 size_t regioninfosize = sizeof(regioninfo);
849 size_t regionreturnsize;
851 /* Obtain id of this process */
852 mypid = ldr_my_process();
854 /* For each module */
855 while (TRUE) {
857 /* Get the next (first) module */
858 status = ldr_next_module(mypid, &moduleid);
860 /* Any more modules? */
861 if (moduleid == LDR_NULL_MODULE)
862 break; /* No more modules */
864 /* Check status AFTER checking moduleid because */
865 /* of a bug in the non-shared ldr_next_module stub */
866 if (status != 0 ) {
867 GC_printf1("dynamic_load: status = %ld\n", (long)status);
869 extern char *sys_errlist[];
870 extern int sys_nerr;
871 extern int errno;
872 if (errno <= sys_nerr) {
873 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
874 } else {
875 GC_printf1("dynamic_load: %d\n", (long)errno);
878 ABORT("ldr_next_module failed");
881 /* Get the module information */
882 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
883 moduleinfosize, &modulereturnsize);
884 if (status != 0 )
885 ABORT("ldr_inq_module failed");
887 /* is module for the main program (i.e. nonshared portion)? */
888 if (moduleinfo.lmi_flags & LDR_MAIN)
889 continue; /* skip the main module */
891 # ifdef VERBOSE
892 GC_printf("---Module---\n");
893 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
894 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
895 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
896 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
897 # endif
899 /* For each region in this module */
900 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
902 /* Get the region information */
903 status = ldr_inq_region(mypid, moduleid, region, &regioninfo,
904 regioninfosize, &regionreturnsize);
905 if (status != 0 )
906 ABORT("ldr_inq_region failed");
908 /* only process writable (data) regions */
909 if (! (regioninfo.lri_prot & LDR_W))
910 continue;
912 # ifdef VERBOSE
913 GC_printf("--- Region ---\n");
914 GC_printf("Region number = %16ld\n",
915 regioninfo.lri_region_no);
916 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
917 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
918 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
919 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
920 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
921 # endif
923 /* register region as a garbage collection root */
924 GC_add_roots_inner (
925 (char *)regioninfo.lri_mapaddr,
926 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
927 TRUE);
932 #endif
934 #if defined(HPUX)
936 #include <errno.h>
937 #include <dl.h>
939 extern int errno;
940 extern char *sys_errlist[];
941 extern int sys_nerr;
943 void GC_register_dynamic_libraries()
945 int status;
946 int index = 1; /* Ordinal position in shared library search list */
947 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
949 /* For each dynamic library loaded */
950 while (TRUE) {
952 /* Get info about next shared library */
953 status = shl_get(index, &shl_desc);
955 /* Check if this is the end of the list or if some error occured */
956 if (status != 0) {
957 # ifdef GC_HPUX_THREADS
958 /* I've seen errno values of 0. The man page is not clear */
959 /* as to whether errno should get set on a -1 return. */
960 break;
961 # else
962 if (errno == EINVAL) {
963 break; /* Moved past end of shared library list --> finished */
964 } else {
965 if (errno <= sys_nerr) {
966 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
967 } else {
968 GC_printf1("dynamic_load: %d\n", (long) errno);
970 ABORT("shl_get failed");
972 # endif
975 # ifdef VERBOSE
976 GC_printf0("---Shared library---\n");
977 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
978 GC_printf1("\tindex = %d\n", index);
979 GC_printf1("\thandle = %08x\n",
980 (unsigned long) shl_desc->handle);
981 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
982 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
983 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
984 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
985 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
986 # endif
988 /* register shared library's data segment as a garbage collection root */
989 GC_add_roots_inner((char *) shl_desc->dstart,
990 (char *) shl_desc->dend, TRUE);
992 index++;
995 #endif /* HPUX */
997 #ifdef RS6000
998 #pragma alloca
999 #include <sys/ldr.h>
1000 #include <sys/errno.h>
1001 void GC_register_dynamic_libraries()
1003 int len;
1004 char *ldibuf;
1005 int ldibuflen;
1006 struct ld_info *ldi;
1008 ldibuf = alloca(ldibuflen = 8192);
1010 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
1011 if (errno != ENOMEM) {
1012 ABORT("loadquery failed");
1014 ldibuf = alloca(ldibuflen *= 2);
1017 ldi = (struct ld_info *)ldibuf;
1018 while (ldi) {
1019 len = ldi->ldinfo_next;
1020 GC_add_roots_inner(
1021 ldi->ldinfo_dataorg,
1022 (unsigned long)ldi->ldinfo_dataorg
1023 + ldi->ldinfo_datasize,
1024 TRUE);
1025 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
1028 #endif /* RS6000 */
1032 #else /* !DYNAMIC_LOADING */
1034 #ifdef PCR
1036 # include "il/PCR_IL.h"
1037 # include "th/PCR_ThCtl.h"
1038 # include "mm/PCR_MM.h"
1040 void GC_register_dynamic_libraries()
1042 /* Add new static data areas of dynamically loaded modules. */
1044 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
1045 PCR_IL_LoadedSegment * q;
1047 /* Skip uncommited files */
1048 while (p != NIL && !(p -> lf_commitPoint)) {
1049 /* The loading of this file has not yet been committed */
1050 /* Hence its description could be inconsistent. */
1051 /* Furthermore, it hasn't yet been run. Hence its data */
1052 /* segments can't possibly reference heap allocated */
1053 /* objects. */
1054 p = p -> lf_prev;
1056 for (; p != NIL; p = p -> lf_prev) {
1057 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
1058 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
1059 == PCR_IL_SegFlags_Traced_on) {
1060 GC_add_roots_inner
1061 ((char *)(q -> ls_addr),
1062 (char *)(q -> ls_addr) + q -> ls_bytes,
1063 TRUE);
1071 #else /* !PCR */
1073 void GC_register_dynamic_libraries(){}
1075 int GC_no_dynamic_loading;
1077 #endif /* !PCR */
1078 #endif /* !DYNAMIC_LOADING */