2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #if defined(__linux__) && !defined(_GNU_SOURCE)
30 /* Can't test LINUX, since this must be define before other includes */
33 #if !defined(MACOS) && !defined(_WIN32_WCE)
34 # include <sys/types.h>
36 #include "private/gc_priv.h"
38 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
39 # if (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) \
40 && defined(dlopen) && !defined(GC_USE_LD_WRAP)
41 /* To support threads in Solaris, gc.h interposes on dlopen by */
42 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
43 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
44 /* real system dlopen() in their implementation. We first remove */
45 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
47 # define GC_must_restore_redefined_dlopen
49 # undef GC_must_restore_redefined_dlopen
52 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
54 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
55 !defined(MSWIN32) && !defined(MSWINCE) && \
56 !(defined(ALPHA) && defined(OSF1)) && \
57 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
58 !defined(RS6000) && !defined(SCO_ELF) && \
59 !(defined(FREEBSD) && defined(__ELF__)) && \
60 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD)
61 --> We only know how to find data segments of dynamic libraries
for the
62 --> above
. Additional SVR4 variants might
not be too
76 /* struct link_map field overrides */
77 # define l_next lm_next
78 # define l_addr lm_addr
79 # define l_name lm_name
82 /* Newer versions of GNU/Linux define this macro. We
83 * define it similarly for any ELF systems that don't. */
85 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
86 # define ElfW(type) Elf32_##type
88 # define ElfW(type) Elf64_##type
92 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
98 static struct link_map
*
99 GC_FirstDLOpenedLinkMap()
101 extern ElfW(Dyn
) _DYNAMIC
;
104 static struct link_map
* cachedResult
= 0;
105 static ElfW(Dyn
) *dynStructureAddr
= 0;
106 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
108 # ifdef SUNOS53_SHARED_LIB
109 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
110 /* up properly in dynamically linked .so's. This means we have */
111 /* to use its value in the set of original object files loaded */
112 /* at program startup. */
113 if( dynStructureAddr
== 0 ) {
114 void* startupSyms
= dlopen(0, RTLD_LAZY
);
115 dynStructureAddr
= (ElfW(Dyn
)*)dlsym(startupSyms
, "_DYNAMIC");
118 dynStructureAddr
= &_DYNAMIC
;
121 if( dynStructureAddr
== 0) {
124 if( cachedResult
== 0 ) {
126 for( dp
= ((ElfW(Dyn
) *)(&_DYNAMIC
)); (tag
= dp
->d_tag
) != 0; dp
++ ) {
127 if( tag
== DT_DEBUG
) {
129 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
130 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
138 #endif /* SUNOS5DL ... */
140 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
141 # if defined(GC_must_restore_redefined_dlopen)
142 # define dlopen GC_dlopen
145 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
148 struct link_dynamic _DYNAMIC
;
151 static struct link_map
*
152 GC_FirstDLOpenedLinkMap()
154 extern struct link_dynamic _DYNAMIC
;
156 if( &_DYNAMIC
== 0) {
159 return(_DYNAMIC
.ld_un
.ld_1
->ld_loaded
);
162 /* Return the address of the ld.so allocated common symbol */
163 /* with the least address, or 0 if none. */
164 static ptr_t
GC_first_common()
167 extern struct link_dynamic _DYNAMIC
;
168 struct rtc_symb
* curr_symbol
;
170 if( &_DYNAMIC
== 0) {
173 curr_symbol
= _DYNAMIC
.ldd
-> ldd_cp
;
174 for (; curr_symbol
!= 0; curr_symbol
= curr_symbol
-> rtc_next
) {
176 || (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
) < result
) {
177 result
= (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
);
183 #endif /* SUNOS4 ... */
185 # if defined(SUNOS4) || defined(SUNOS5DL)
186 /* Add dynamic library data sections to the root set. */
187 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
189 --> fix mutual exclusion with dlopen
190 # endif /* We assume M3 programs don't call dlopen for now */
193 # ifndef USE_PROC_FOR_LIBRARIES
194 void GC_register_dynamic_libraries()
196 struct link_map
*lm
= GC_FirstDLOpenedLinkMap();
199 for (lm
= GC_FirstDLOpenedLinkMap();
200 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
205 e
= (struct exec
*) lm
->lm_addr
;
207 ((char *) (N_DATOFF(*e
) + lm
->lm_addr
)),
208 ((char *) (N_BSSADDR(*e
) + e
->a_bss
+ lm
->lm_addr
)),
214 unsigned long offset
;
218 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
219 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
220 offset
= ((unsigned long)(lm
->l_addr
));
221 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
222 switch( p
->p_type
) {
225 if( !(p
->p_flags
& PF_W
) ) break;
226 start
= ((char *)(p
->p_vaddr
)) + offset
;
242 static ptr_t common_start
= 0;
244 extern ptr_t
GC_find_limit();
246 if (common_start
== 0) common_start
= GC_first_common();
247 if (common_start
!= 0) {
248 common_end
= GC_find_limit(common_start
, TRUE
);
249 GC_add_roots_inner((char *)common_start
, (char *)common_end
, TRUE
);
255 # endif /* !USE_PROC ... */
258 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
259 (defined(FREEBSD) && defined(__ELF__)) || \
260 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
263 #ifdef USE_PROC_FOR_LIBRARIES
267 #include <sys/stat.h>
271 #define MAPS_BUF_SIZE (32*1024)
273 extern ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
);
274 /* Repeatedly read until buffer is filled, or EOF is encountered */
275 /* Defined in os_dep.c. */
277 static char *parse_map_entry(char *buf_ptr
, word
*start
, word
*end
,
278 char *prot_buf
, unsigned int *maj_dev
);
280 void GC_register_dynamic_libraries()
286 char maps_temp
[32768];
291 unsigned int maj_dev
, min_dev
;
292 word least_ha
, greatest_ha
;
294 word datastart
= (word
)(DATASTART
);
296 /* Read /proc/self/maps */
297 /* Note that we may not allocate, and thus can't use stdio. */
298 f
= open("/proc/self/maps", O_RDONLY
);
299 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
300 /* stat() doesn't work for /proc/self/maps, so we have to
301 read it to find out how large it is... */
304 result
= GC_repeat_read(f
, maps_temp
, sizeof(maps_temp
));
305 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
307 } while (result
== sizeof(maps_temp
));
309 if (maps_size
> sizeof(maps_temp
)) {
310 /* If larger than our buffer, close and re-read it. */
312 f
= open("/proc/self/maps", O_RDONLY
);
313 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
314 maps_buf
= alloca(maps_size
);
315 if (NULL
== maps_buf
) ABORT("/proc/self/maps alloca failed");
316 result
= GC_repeat_read(f
, maps_buf
, maps_size
);
317 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
319 /* Otherwise use the fixed size buffer */
320 maps_buf
= maps_temp
;
324 maps_buf
[result
] = '\0';
326 /* Compute heap bounds. Should be done by add_to_heap? */
327 least_ha
= (word
)(-1);
329 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
330 word sect_start
= (word
)GC_heap_sects
[i
].hs_start
;
331 word sect_end
= sect_start
+ GC_heap_sects
[i
].hs_bytes
;
332 if (sect_start
< least_ha
) least_ha
= sect_start
;
333 if (sect_end
> greatest_ha
) greatest_ha
= sect_end
;
335 if (greatest_ha
< (word
)GC_scratch_last_end_ptr
)
336 greatest_ha
= (word
)GC_scratch_last_end_ptr
;
339 buf_ptr
= parse_map_entry(buf_ptr
, &start
, &end
, prot_buf
, &maj_dev
);
340 if (buf_ptr
== NULL
) return;
342 if (prot_buf
[1] == 'w') {
343 /* This is a writable mapping. Add it to */
344 /* the root set unless it is already otherwise */
346 if (start
<= (word
)GC_stackbottom
&& end
>= (word
)GC_stackbottom
) {
347 /* Stack mapping; discard */
350 if (start
<= datastart
&& end
> datastart
&& maj_dev
!= 0) {
351 /* Main data segment; discard */
355 if (GC_segment_is_thread_stack(start
, end
)) continue;
357 /* The rest of this assumes that there is no mapping */
358 /* spanning the beginning of the data segment, or extending */
359 /* beyond the entire heap at both ends. */
360 /* Empirically these assumptions hold. */
362 if (start
< (word
)DATAEND
&& end
> (word
)DATAEND
) {
363 /* Rld may use space at the end of the main data */
364 /* segment. Thus we add that in. */
365 start
= (word
)DATAEND
;
367 if (start
< least_ha
&& end
> least_ha
) {
370 if (start
< greatest_ha
&& end
> greatest_ha
) {
373 if (start
>= least_ha
&& end
<= greatest_ha
) continue;
374 GC_add_roots_inner((char *)start
, (char *)end
, TRUE
);
380 // parse_map_entry parses an entry from /proc/self/maps so we can
381 // locate all writable data segments that belong to shared libraries.
382 // The format of one of these entries and the fields we care about
384 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
385 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
386 // start end prot maj_dev
389 // The parser is called with a pointer to the entry and the return value
390 // is either NULL or is advanced to the next entry(the byte after the
393 #define OFFSET_MAP_START 0
394 #define OFFSET_MAP_END 9
395 #define OFFSET_MAP_PROT 18
396 #define OFFSET_MAP_MAJDEV 32
398 static char *parse_map_entry(char *buf_ptr
, word
*start
, word
*end
,
399 char *prot_buf
, unsigned int *maj_dev
)
405 if (buf_ptr
== NULL
|| *buf_ptr
== '\0') {
409 memcpy(prot_buf
, buf_ptr
+OFFSET_MAP_PROT
, 4); // do the protections first
412 if (prot_buf
[1] == 'w') { // we can skip all of this if it's not writable
415 buf_ptr
[OFFSET_MAP_START
+8] = '\0';
416 *start
= strtoul(tok
, NULL
, 16);
418 tok
= buf_ptr
+OFFSET_MAP_END
;
419 buf_ptr
[OFFSET_MAP_END
+8] = '\0';
420 *end
= strtoul(tok
, NULL
, 16);
422 buf_ptr
+= OFFSET_MAP_MAJDEV
;
424 while (*buf_ptr
!= ':') buf_ptr
++;
426 *maj_dev
= strtoul(tok
, NULL
, 16);
429 while (*buf_ptr
&& *buf_ptr
++ != '\n');
434 #endif /* USE_PROC_FOR_LIBRARIES */
436 #if !defined(USE_PROC_FOR_LIBRARIES)
437 /* The following is the preferred way to walk dynamic libraries */
438 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
439 /* versions. Thanks to Jakub Jelinek for most of the code. */
445 # if defined(LINUX) /* Are others OK here, too? */ \
446 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
447 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
449 /* We have the header files for a glibc that includes dl_iterate_phdr. */
450 /* It may still not be available in the library on the target system. */
451 /* Thus we also treat it as a weak symbol. */
452 #define HAVE_DL_ITERATE_PHDR
454 static int GC_register_dynlib_callback(info
, size
, ptr
)
455 struct dl_phdr_info
* info
;
459 const ElfW(Phdr
) * p
;
463 /* Make sure struct dl_phdr_info is at least as big as we need. */
464 if (size
< offsetof (struct dl_phdr_info
, dlpi_phnum
)
465 + sizeof (info
->dlpi_phnum
))
468 /* Skip the first object - it is the main program. */
469 if (*(int *)ptr
== 0)
476 for( i
= 0; i
< (int)(info
->dlpi_phnum
); ((i
++),(p
++)) ) {
477 switch( p
->p_type
) {
480 if( !(p
->p_flags
& PF_W
) ) break;
481 start
= ((char *)(p
->p_vaddr
)) + info
->dlpi_addr
;
482 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
493 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
495 #pragma weak dl_iterate_phdr
497 GC_bool
GC_register_dynamic_libraries_dl_iterate_phdr()
501 if (dl_iterate_phdr
) {
502 dl_iterate_phdr(GC_register_dynlib_callback
, &tmp
);
509 # else /* !LINUX || version(glibc) < 2.2.4 */
511 /* Dynamic loading code for Linux running ELF. Somewhat tested on
512 * Linux/x86, untested but hopefully should work on Linux/Alpha.
513 * This code was derived from the Solaris/ELF support. Thanks to
514 * whatever kind soul wrote that. - Patrick Bridges */
516 /* This doesn't necessarily work in all cases, e.g. with preloaded
517 * dynamic libraries. */
520 # include <sys/exec_elf.h>
528 static struct link_map
*
529 GC_FirstDLOpenedLinkMap()
532 # pragma weak _DYNAMIC
534 extern ElfW(Dyn
) _DYNAMIC
[];
537 static struct link_map
*cachedResult
= 0;
542 if( cachedResult
== 0 ) {
544 for( dp
= _DYNAMIC
; (tag
= dp
->d_tag
) != 0; dp
++ ) {
545 if( tag
== DT_DEBUG
) {
547 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
548 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
557 void GC_register_dynamic_libraries()
562 # ifdef HAVE_DL_ITERATE_PHDR
563 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
567 lm
= GC_FirstDLOpenedLinkMap();
568 for (lm
= GC_FirstDLOpenedLinkMap();
569 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
573 unsigned long offset
;
577 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
578 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
579 offset
= ((unsigned long)(lm
->l_addr
));
580 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
581 switch( p
->p_type
) {
584 if( !(p
->p_flags
& PF_W
) ) break;
585 start
= ((char *)(p
->p_vaddr
)) + offset
;
586 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
596 #endif /* !USE_PROC_FOR_LIBRARIES */
600 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
602 #include <sys/procfs.h>
603 #include <sys/stat.h>
607 #include <signal.h> /* Only for the following test. */
612 extern void * GC_roots_present();
613 /* The type is a lie, since the real type doesn't make sense here, */
614 /* and we only test for NULL. */
616 /* We use /proc to track down all parts of the address space that are */
617 /* mapped by the process, and throw out regions we know we shouldn't */
618 /* worry about. This may also work under other SVR4 variants. */
619 void GC_register_dynamic_libraries()
623 static prmap_t
* addr_map
= 0;
624 static int current_sz
= 0; /* Number of records currently in addr_map */
625 static int needed_sz
; /* Required size of addr_map */
628 register ptr_t start
;
629 register ptr_t limit
;
630 ptr_t heap_start
= (ptr_t
)HEAP_START
;
631 ptr_t heap_end
= heap_start
;
635 # endif /* SUNOS5DL */
638 sprintf(buf
, "/proc/%d", getpid());
639 /* The above generates a lint complaint, since pid_t varies. */
640 /* It's unclear how to improve this. */
641 fd
= open(buf
, O_RDONLY
);
643 ABORT("/proc open failed");
646 if (ioctl(fd
, PIOCNMAP
, &needed_sz
) < 0) {
647 GC_err_printf2("fd = %d, errno = %d\n", fd
, errno
);
648 ABORT("/proc PIOCNMAP ioctl failed");
650 if (needed_sz
>= current_sz
) {
651 current_sz
= needed_sz
* 2 + 1;
652 /* Expansion, plus room for 0 record */
653 addr_map
= (prmap_t
*)GC_scratch_alloc((word
)
654 (current_sz
* sizeof(prmap_t
)));
656 if (ioctl(fd
, PIOCMAP
, addr_map
) < 0) {
657 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
658 fd
, errno
, needed_sz
, addr_map
);
659 ABORT("/proc PIOCMAP ioctl failed");
661 if (GC_n_heap_sects
> 0) {
662 heap_end
= GC_heap_sects
[GC_n_heap_sects
-1].hs_start
663 + GC_heap_sects
[GC_n_heap_sects
-1].hs_bytes
;
664 if (heap_end
< GC_scratch_last_end_ptr
) heap_end
= GC_scratch_last_end_ptr
;
666 for (i
= 0; i
< needed_sz
; i
++) {
667 flags
= addr_map
[i
].pr_mflags
;
668 if ((flags
& (MA_BREAK
| MA_STACK
| MA_PHYS
)) != 0) goto irrelevant
;
669 if ((flags
& (MA_READ
| MA_WRITE
)) != (MA_READ
| MA_WRITE
))
671 /* The latter test is empirically useless in very old Irix */
672 /* versions. Other than the */
673 /* main data and stack segments, everything appears to be */
674 /* mapped readable, writable, executable, and shared(!!). */
675 /* This makes no sense to me. - HB */
676 start
= (ptr_t
)(addr_map
[i
].pr_vaddr
);
677 if (GC_roots_present(start
)) goto irrelevant
;
678 if (start
< heap_end
&& start
>= heap_start
)
681 if (GC_is_thread_stack(start
)) goto irrelevant
;
682 # endif /* MMAP_STACKS */
684 limit
= start
+ addr_map
[i
].pr_size
;
685 /* The following seemed to be necessary for very old versions */
686 /* of Irix, but it has been reported to discard relevant */
687 /* segments under Irix 6.5. */
689 if (addr_map
[i
].pr_off
== 0 && strncmp(start
, ELFMAG
, 4) == 0) {
690 /* Discard text segments, i.e. 0-offset mappings against */
691 /* executable files which appear to have ELF headers. */
694 # define MAP_IRR_SZ 10
695 static ptr_t map_irr
[MAP_IRR_SZ
];
696 /* Known irrelevant map entries */
697 static int n_irr
= 0;
701 for (i
= 0; i
< n_irr
; i
++) {
702 if (map_irr
[i
] == start
) goto irrelevant
;
704 arg
= (caddr_t
)start
;
705 obj
= ioctl(fd
, PIOCOPENM
, &arg
);
709 if ((buf
.st_mode
& 0111) != 0) {
710 if (n_irr
< MAP_IRR_SZ
) {
711 map_irr
[n_irr
++] = start
;
718 GC_add_roots_inner(start
, limit
, TRUE
);
721 /* Dont keep cached descriptor, for now. Some kernels don't like us */
722 /* to keep a /proc file descriptor around during kill -9. */
723 if (close(fd
) < 0) ABORT("Couldnt close /proc file");
727 # endif /* USE_PROC || IRIX5 */
729 # if defined(MSWIN32) || defined(MSWINCE)
731 # define WIN32_LEAN_AND_MEAN
733 # include <windows.h>
736 /* We traverse the entire address space and register all segments */
737 /* that could possibly have been written to. */
739 extern GC_bool
GC_is_heap_base (ptr_t p
);
741 # ifdef GC_WIN32_THREADS
742 extern void GC_get_next_stack(char *start
, char **lo
, char **hi
);
743 void GC_cond_add_roots(char *base
, char * limit
)
745 char * curr_base
= base
;
746 char * next_stack_lo
;
747 char * next_stack_hi
;
749 if (base
== limit
) return;
751 GC_get_next_stack(curr_base
, &next_stack_lo
, &next_stack_hi
);
752 if (next_stack_lo
>= limit
) break;
753 GC_add_roots_inner(curr_base
, next_stack_lo
, TRUE
);
754 curr_base
= next_stack_hi
;
756 if (curr_base
< limit
) GC_add_roots_inner(curr_base
, limit
, TRUE
);
759 void GC_cond_add_roots(char *base
, char * limit
)
763 = (char *) ((word
)(&dummy
) & ~(GC_sysinfo
.dwAllocationGranularity
-1));
764 if (base
== limit
) return;
765 if (limit
> stack_top
&& base
< GC_stackbottom
) {
766 /* Part of the stack; ignore it. */
769 GC_add_roots_inner(base
, limit
, TRUE
);
774 extern GC_bool GC_win32s
;
777 void GC_register_dynamic_libraries()
779 MEMORY_BASIC_INFORMATION buf
;
784 char * limit
, * new_limit
;
787 if (GC_win32s
) return;
789 base
= limit
= p
= GC_sysinfo
.lpMinimumApplicationAddress
;
790 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
791 /* Only the first 32 MB of address space belongs to the current process */
792 while (p
< (LPVOID
)0x02000000) {
793 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
795 /* Page is free; advance to the next possible allocation base */
797 (((DWORD
) p
+ GC_sysinfo
.dwAllocationGranularity
)
798 & ~(GC_sysinfo
.dwAllocationGranularity
-1));
801 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
802 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
805 if (result
!= sizeof(buf
)) {
806 ABORT("Weird VirtualQuery result");
808 new_limit
= (char *)p
+ buf
.RegionSize
;
809 protect
= buf
.Protect
;
810 if (buf
.State
== MEM_COMMIT
811 && (protect
== PAGE_EXECUTE_READWRITE
812 || protect
== PAGE_READWRITE
)
813 && !GC_is_heap_base(buf
.AllocationBase
)) {
814 if ((char *)p
!= limit
) {
815 GC_cond_add_roots(base
, limit
);
821 if (p
> (LPVOID
)new_limit
/* overflow */) break;
822 p
= (LPVOID
)new_limit
;
824 GC_cond_add_roots(base
, limit
);
827 #endif /* MSWIN32 || MSWINCE */
829 #if defined(ALPHA) && defined(OSF1)
833 void GC_register_dynamic_libraries()
839 ldr_module_t moduleid
= LDR_NULL_MODULE
;
840 ldr_module_info_t moduleinfo
;
841 size_t moduleinfosize
= sizeof(moduleinfo
);
842 size_t modulereturnsize
;
846 ldr_region_info_t regioninfo
;
847 size_t regioninfosize
= sizeof(regioninfo
);
848 size_t regionreturnsize
;
850 /* Obtain id of this process */
851 mypid
= ldr_my_process();
853 /* For each module */
856 /* Get the next (first) module */
857 status
= ldr_next_module(mypid
, &moduleid
);
859 /* Any more modules? */
860 if (moduleid
== LDR_NULL_MODULE
)
861 break; /* No more modules */
863 /* Check status AFTER checking moduleid because */
864 /* of a bug in the non-shared ldr_next_module stub */
866 GC_printf1("dynamic_load: status = %ld\n", (long)status
);
868 extern char *sys_errlist
[];
871 if (errno
<= sys_nerr
) {
872 GC_printf1("dynamic_load: %s\n", (long)sys_errlist
[errno
]);
874 GC_printf1("dynamic_load: %d\n", (long)errno
);
877 ABORT("ldr_next_module failed");
880 /* Get the module information */
881 status
= ldr_inq_module(mypid
, moduleid
, &moduleinfo
,
882 moduleinfosize
, &modulereturnsize
);
884 ABORT("ldr_inq_module failed");
886 /* is module for the main program (i.e. nonshared portion)? */
887 if (moduleinfo
.lmi_flags
& LDR_MAIN
)
888 continue; /* skip the main module */
891 GC_printf("---Module---\n");
892 GC_printf("Module ID = %16ld\n", moduleinfo
.lmi_modid
);
893 GC_printf("Count of regions = %16d\n", moduleinfo
.lmi_nregion
);
894 GC_printf("flags for module = %16lx\n", moduleinfo
.lmi_flags
);
895 GC_printf("pathname of module = \"%s\"\n", moduleinfo
.lmi_name
);
898 /* For each region in this module */
899 for (region
= 0; region
< moduleinfo
.lmi_nregion
; region
++) {
901 /* Get the region information */
902 status
= ldr_inq_region(mypid
, moduleid
, region
, ®ioninfo
,
903 regioninfosize
, ®ionreturnsize
);
905 ABORT("ldr_inq_region failed");
907 /* only process writable (data) regions */
908 if (! (regioninfo
.lri_prot
& LDR_W
))
912 GC_printf("--- Region ---\n");
913 GC_printf("Region number = %16ld\n",
914 regioninfo
.lri_region_no
);
915 GC_printf("Protection flags = %016x\n", regioninfo
.lri_prot
);
916 GC_printf("Virtual address = %16p\n", regioninfo
.lri_vaddr
);
917 GC_printf("Mapped address = %16p\n", regioninfo
.lri_mapaddr
);
918 GC_printf("Region size = %16ld\n", regioninfo
.lri_size
);
919 GC_printf("Region name = \"%s\"\n", regioninfo
.lri_name
);
922 /* register region as a garbage collection root */
924 (char *)regioninfo
.lri_mapaddr
,
925 (char *)regioninfo
.lri_mapaddr
+ regioninfo
.lri_size
,
939 extern char *sys_errlist
[];
942 void GC_register_dynamic_libraries()
945 int index
= 1; /* Ordinal position in shared library search list */
946 struct shl_descriptor
*shl_desc
; /* Shared library info, see dl.h */
948 /* For each dynamic library loaded */
951 /* Get info about next shared library */
952 status
= shl_get(index
, &shl_desc
);
954 /* Check if this is the end of the list or if some error occured */
956 # ifdef GC_HPUX_THREADS
957 /* I've seen errno values of 0. The man page is not clear */
958 /* as to whether errno should get set on a -1 return. */
961 if (errno
== EINVAL
) {
962 break; /* Moved past end of shared library list --> finished */
964 if (errno
<= sys_nerr
) {
965 GC_printf1("dynamic_load: %s\n", (long) sys_errlist
[errno
]);
967 GC_printf1("dynamic_load: %d\n", (long) errno
);
969 ABORT("shl_get failed");
975 GC_printf0("---Shared library---\n");
976 GC_printf1("\tfilename = \"%s\"\n", shl_desc
->filename
);
977 GC_printf1("\tindex = %d\n", index
);
978 GC_printf1("\thandle = %08x\n",
979 (unsigned long) shl_desc
->handle
);
980 GC_printf1("\ttext seg. start = %08x\n", shl_desc
->tstart
);
981 GC_printf1("\ttext seg. end = %08x\n", shl_desc
->tend
);
982 GC_printf1("\tdata seg. start = %08x\n", shl_desc
->dstart
);
983 GC_printf1("\tdata seg. end = %08x\n", shl_desc
->dend
);
984 GC_printf1("\tref. count = %lu\n", shl_desc
->ref_count
);
987 /* register shared library's data segment as a garbage collection root */
988 GC_add_roots_inner((char *) shl_desc
->dstart
,
989 (char *) shl_desc
->dend
, TRUE
);
999 #include <sys/errno.h>
1000 void GC_register_dynamic_libraries()
1005 struct ld_info
*ldi
;
1007 ldibuf
= alloca(ldibuflen
= 8192);
1009 while ( (len
= loadquery(L_GETINFO
,ldibuf
,ldibuflen
)) < 0) {
1010 if (errno
!= ENOMEM
) {
1011 ABORT("loadquery failed");
1013 ldibuf
= alloca(ldibuflen
*= 2);
1016 ldi
= (struct ld_info
*)ldibuf
;
1018 len
= ldi
->ldinfo_next
;
1020 ldi
->ldinfo_dataorg
,
1021 (unsigned long)ldi
->ldinfo_dataorg
1022 + ldi
->ldinfo_datasize
,
1024 ldi
= len
? (struct ld_info
*)((char *)ldi
+ len
) : 0;
1031 #else /* !DYNAMIC_LOADING */
1035 # include "il/PCR_IL.h"
1036 # include "th/PCR_ThCtl.h"
1037 # include "mm/PCR_MM.h"
1039 void GC_register_dynamic_libraries()
1041 /* Add new static data areas of dynamically loaded modules. */
1043 PCR_IL_LoadedFile
* p
= PCR_IL_GetLastLoadedFile();
1044 PCR_IL_LoadedSegment
* q
;
1046 /* Skip uncommited files */
1047 while (p
!= NIL
&& !(p
-> lf_commitPoint
)) {
1048 /* The loading of this file has not yet been committed */
1049 /* Hence its description could be inconsistent. */
1050 /* Furthermore, it hasn't yet been run. Hence its data */
1051 /* segments can't possibly reference heap allocated */
1055 for (; p
!= NIL
; p
= p
-> lf_prev
) {
1056 for (q
= p
-> lf_ls
; q
!= NIL
; q
= q
-> ls_next
) {
1057 if ((q
-> ls_flags
& PCR_IL_SegFlags_Traced_MASK
)
1058 == PCR_IL_SegFlags_Traced_on
) {
1060 ((char *)(q
-> ls_addr
),
1061 (char *)(q
-> ls_addr
) + q
-> ls_bytes
,
1072 void GC_register_dynamic_libraries(){}
1074 int GC_no_dynamic_loading
;
1077 #endif /* !DYNAMIC_LOADING */