2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #if defined(__linux__) && !defined(_GNU_SOURCE)
30 /* Can't test LINUX, since this must be define before other includes */
33 #if !defined(MACOS) && !defined(_WIN32_WCE)
34 # include <sys/types.h>
36 #include "private/gc_priv.h"
38 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
39 # if (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS)) \
40 && defined(dlopen) && !defined(GC_USE_LD_WRAP)
41 /* To support threads in Solaris, gc.h interposes on dlopen by */
42 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
43 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
44 /* real system dlopen() in their implementation. We first remove */
45 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
47 # define GC_must_restore_redefined_dlopen
49 # undef GC_must_restore_redefined_dlopen
52 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
54 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
55 !defined(MSWIN32) && !defined(MSWINCE) && \
56 !(defined(ALPHA) && defined(OSF1)) && \
57 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
58 !defined(RS6000) && !defined(SCO_ELF) && \
59 !(defined(FREEBSD) && defined(__ELF__)) && \
60 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD)
61 --> We only know how to find data segments of dynamic libraries
for the
62 --> above
. Additional SVR4 variants might
not be too
76 /* struct link_map field overrides */
77 # define l_next lm_next
78 # define l_addr lm_addr
79 # define l_name lm_name
83 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
89 static struct link_map
*
90 GC_FirstDLOpenedLinkMap()
92 extern Elf32_Dyn _DYNAMIC
;
95 static struct link_map
* cachedResult
= 0;
96 static Elf32_Dyn
*dynStructureAddr
= 0;
97 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
99 # ifdef SUNOS53_SHARED_LIB
100 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
101 /* up properly in dynamically linked .so's. This means we have */
102 /* to use its value in the set of original object files loaded */
103 /* at program startup. */
104 if( dynStructureAddr
== 0 ) {
105 void* startupSyms
= dlopen(0, RTLD_LAZY
);
106 dynStructureAddr
= (Elf32_Dyn
*)dlsym(startupSyms
, "_DYNAMIC");
109 dynStructureAddr
= &_DYNAMIC
;
112 if( dynStructureAddr
== 0) {
115 if( cachedResult
== 0 ) {
117 for( dp
= ((Elf32_Dyn
*)(&_DYNAMIC
)); (tag
= dp
->d_tag
) != 0; dp
++ ) {
118 if( tag
== DT_DEBUG
) {
120 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
121 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
129 #endif /* SUNOS5DL ... */
131 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
132 # if defined(GC_must_restore_redefined_dlopen)
133 # define dlopen GC_dlopen
136 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
139 struct link_dynamic _DYNAMIC
;
142 static struct link_map
*
143 GC_FirstDLOpenedLinkMap()
145 extern struct link_dynamic _DYNAMIC
;
147 if( &_DYNAMIC
== 0) {
150 return(_DYNAMIC
.ld_un
.ld_1
->ld_loaded
);
153 /* Return the address of the ld.so allocated common symbol */
154 /* with the least address, or 0 if none. */
155 static ptr_t
GC_first_common()
158 extern struct link_dynamic _DYNAMIC
;
159 struct rtc_symb
* curr_symbol
;
161 if( &_DYNAMIC
== 0) {
164 curr_symbol
= _DYNAMIC
.ldd
-> ldd_cp
;
165 for (; curr_symbol
!= 0; curr_symbol
= curr_symbol
-> rtc_next
) {
167 || (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
) < result
) {
168 result
= (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
);
174 #endif /* SUNOS4 ... */
176 # if defined(SUNOS4) || defined(SUNOS5DL)
177 /* Add dynamic library data sections to the root set. */
178 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
180 --> fix mutual exclusion with dlopen
181 # endif /* We assume M3 programs don't call dlopen for now */
184 # ifndef USE_PROC_FOR_LIBRARIES
185 void GC_register_dynamic_libraries()
187 struct link_map
*lm
= GC_FirstDLOpenedLinkMap();
190 for (lm
= GC_FirstDLOpenedLinkMap();
191 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
196 e
= (struct exec
*) lm
->lm_addr
;
198 ((char *) (N_DATOFF(*e
) + lm
->lm_addr
)),
199 ((char *) (N_BSSADDR(*e
) + e
->a_bss
+ lm
->lm_addr
)),
205 unsigned long offset
;
209 e
= (Elf32_Ehdr
*) lm
->l_addr
;
210 p
= ((Elf32_Phdr
*)(((char *)(e
)) + e
->e_phoff
));
211 offset
= ((unsigned long)(lm
->l_addr
));
212 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
213 switch( p
->p_type
) {
216 if( !(p
->p_flags
& PF_W
) ) break;
217 start
= ((char *)(p
->p_vaddr
)) + offset
;
233 static ptr_t common_start
= 0;
235 extern ptr_t
GC_find_limit();
237 if (common_start
== 0) common_start
= GC_first_common();
238 if (common_start
!= 0) {
239 common_end
= GC_find_limit(common_start
, TRUE
);
240 GC_add_roots_inner((char *)common_start
, (char *)common_end
, TRUE
);
246 # endif /* !USE_PROC ... */
249 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
250 (defined(FREEBSD) && defined(__ELF__)) || \
251 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
254 #ifdef USE_PROC_FOR_LIBRARIES
258 #include <sys/stat.h>
262 #define MAPS_BUF_SIZE (32*1024)
264 extern ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
);
265 /* Repeatedly read until buffer is filled, or EOF is encountered */
266 /* Defined in os_dep.c. */
268 static char *parse_map_entry(char *buf_ptr
, word
*start
, word
*end
,
269 char *prot_buf
, unsigned int *maj_dev
);
271 void GC_register_dynamic_libraries()
277 char maps_temp
[32768];
282 unsigned int maj_dev
, min_dev
;
283 word least_ha
, greatest_ha
;
285 word datastart
= (word
)(DATASTART
);
287 /* Read /proc/self/maps */
288 /* Note that we may not allocate, and thus can't use stdio. */
289 f
= open("/proc/self/maps", O_RDONLY
);
290 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
291 /* stat() doesn't work for /proc/self/maps, so we have to
292 read it to find out how large it is... */
295 result
= GC_repeat_read(f
, maps_temp
, sizeof(maps_temp
));
296 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
298 } while (result
== sizeof(maps_temp
));
300 if (maps_size
> sizeof(maps_temp
)) {
301 /* If larger than our buffer, close and re-read it. */
303 f
= open("/proc/self/maps", O_RDONLY
);
304 if (-1 == f
) ABORT("Couldn't open /proc/self/maps");
305 maps_buf
= alloca(maps_size
);
306 if (NULL
== maps_buf
) ABORT("/proc/self/maps alloca failed");
307 result
= GC_repeat_read(f
, maps_buf
, maps_size
);
308 if (result
<= 0) ABORT("Couldn't read /proc/self/maps");
310 /* Otherwise use the fixed size buffer */
311 maps_buf
= maps_temp
;
315 maps_buf
[result
] = '\0';
317 /* Compute heap bounds. Should be done by add_to_heap? */
318 least_ha
= (word
)(-1);
320 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
321 word sect_start
= (word
)GC_heap_sects
[i
].hs_start
;
322 word sect_end
= sect_start
+ GC_heap_sects
[i
].hs_bytes
;
323 if (sect_start
< least_ha
) least_ha
= sect_start
;
324 if (sect_end
> greatest_ha
) greatest_ha
= sect_end
;
326 if (greatest_ha
< (word
)GC_scratch_last_end_ptr
)
327 greatest_ha
= (word
)GC_scratch_last_end_ptr
;
330 buf_ptr
= parse_map_entry(buf_ptr
, &start
, &end
, prot_buf
, &maj_dev
);
331 if (buf_ptr
== NULL
) return;
333 if (prot_buf
[1] == 'w') {
334 /* This is a writable mapping. Add it to */
335 /* the root set unless it is already otherwise */
337 if (start
<= (word
)GC_stackbottom
&& end
>= (word
)GC_stackbottom
) {
338 /* Stack mapping; discard */
341 if (start
<= datastart
&& end
> datastart
&& maj_dev
!= 0) {
342 /* Main data segment; discard */
346 if (GC_segment_is_thread_stack(start
, end
)) continue;
348 /* The rest of this assumes that there is no mapping */
349 /* spanning the beginning of the data segment, or extending */
350 /* beyond the entire heap at both ends. */
351 /* Empirically these assumptions hold. */
353 if (start
< (word
)DATAEND
&& end
> (word
)DATAEND
) {
354 /* Rld may use space at the end of the main data */
355 /* segment. Thus we add that in. */
356 start
= (word
)DATAEND
;
358 if (start
< least_ha
&& end
> least_ha
) {
361 if (start
< greatest_ha
&& end
> greatest_ha
) {
364 if (start
>= least_ha
&& end
<= greatest_ha
) continue;
365 GC_add_roots_inner((char *)start
, (char *)end
, TRUE
);
371 // parse_map_entry parses an entry from /proc/self/maps so we can
372 // locate all writable data segments that belong to shared libraries.
373 // The format of one of these entries and the fields we care about
375 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
376 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
377 // start end prot maj_dev
380 // The parser is called with a pointer to the entry and the return value
381 // is either NULL or is advanced to the next entry(the byte after the
384 #define OFFSET_MAP_START 0
385 #define OFFSET_MAP_END 9
386 #define OFFSET_MAP_PROT 18
387 #define OFFSET_MAP_MAJDEV 32
389 static char *parse_map_entry(char *buf_ptr
, word
*start
, word
*end
,
390 char *prot_buf
, unsigned int *maj_dev
)
396 if (buf_ptr
== NULL
|| *buf_ptr
== '\0') {
400 memcpy(prot_buf
, buf_ptr
+OFFSET_MAP_PROT
, 4); // do the protections first
403 if (prot_buf
[1] == 'w') { // we can skip all of this if it's not writable
406 buf_ptr
[OFFSET_MAP_START
+8] = '\0';
407 *start
= strtoul(tok
, NULL
, 16);
409 tok
= buf_ptr
+OFFSET_MAP_END
;
410 buf_ptr
[OFFSET_MAP_END
+8] = '\0';
411 *end
= strtoul(tok
, NULL
, 16);
413 buf_ptr
+= OFFSET_MAP_MAJDEV
;
415 while (*buf_ptr
!= ':') buf_ptr
++;
417 *maj_dev
= strtoul(tok
, NULL
, 16);
420 while (*buf_ptr
&& *buf_ptr
++ != '\n');
425 #endif /* USE_PROC_FOR_LIBRARIES */
427 #if !defined(USE_PROC_FOR_LIBRARIES)
428 /* The following is the preferred way to walk dynamic libraries */
429 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
430 /* versions. Thanks to Jakub Jelinek for most of the code. */
436 # if defined(LINUX) /* Are others OK here, too? */ \
437 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
438 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
440 /* We have the header files for a glibc that includes dl_iterate_phdr. */
441 /* It may still not be available in the library on the target system. */
442 /* Thus we also treat it as a weak symbol. */
443 #define HAVE_DL_ITERATE_PHDR
445 static int GC_register_dynlib_callback(info
, size
, ptr
)
446 struct dl_phdr_info
* info
;
450 const ElfW(Phdr
) * p
;
454 /* Make sure struct dl_phdr_info is at least as big as we need. */
455 if (size
< offsetof (struct dl_phdr_info
, dlpi_phnum
)
456 + sizeof (info
->dlpi_phnum
))
459 /* Skip the first object - it is the main program. */
460 if (*(int *)ptr
== 0)
467 for( i
= 0; i
< (int)(info
->dlpi_phnum
); ((i
++),(p
++)) ) {
468 switch( p
->p_type
) {
471 if( !(p
->p_flags
& PF_W
) ) break;
472 start
= ((char *)(p
->p_vaddr
)) + info
->dlpi_addr
;
473 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
484 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
486 #pragma weak dl_iterate_phdr
488 GC_bool
GC_register_dynamic_libraries_dl_iterate_phdr()
492 if (dl_iterate_phdr
) {
493 dl_iterate_phdr(GC_register_dynlib_callback
, &tmp
);
500 # else /* !LINUX || version(glibc) < 2.2.4 */
502 /* Dynamic loading code for Linux running ELF. Somewhat tested on
503 * Linux/x86, untested but hopefully should work on Linux/Alpha.
504 * This code was derived from the Solaris/ELF support. Thanks to
505 * whatever kind soul wrote that. - Patrick Bridges */
507 /* This doesn't necessarily work in all cases, e.g. with preloaded
508 * dynamic libraries. */
511 # include <sys/exec_elf.h>
519 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
520 * define it for those older versions that don't. */
522 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
523 # define ElfW(type) Elf32_##type
525 # define ElfW(type) Elf64_##type
529 static struct link_map
*
530 GC_FirstDLOpenedLinkMap()
533 # pragma weak _DYNAMIC
535 extern ElfW(Dyn
) _DYNAMIC
[];
538 static struct link_map
*cachedResult
= 0;
543 if( cachedResult
== 0 ) {
545 for( dp
= _DYNAMIC
; (tag
= dp
->d_tag
) != 0; dp
++ ) {
546 if( tag
== DT_DEBUG
) {
548 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
549 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
558 void GC_register_dynamic_libraries()
563 # ifdef HAVE_DL_ITERATE_PHDR
564 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
568 lm
= GC_FirstDLOpenedLinkMap();
569 for (lm
= GC_FirstDLOpenedLinkMap();
570 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
574 unsigned long offset
;
578 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
579 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
580 offset
= ((unsigned long)(lm
->l_addr
));
581 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
582 switch( p
->p_type
) {
585 if( !(p
->p_flags
& PF_W
) ) break;
586 start
= ((char *)(p
->p_vaddr
)) + offset
;
587 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
597 #endif /* !USE_PROC_FOR_LIBRARIES */
601 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
603 #include <sys/procfs.h>
604 #include <sys/stat.h>
608 #include <signal.h> /* Only for the following test. */
613 extern void * GC_roots_present();
614 /* The type is a lie, since the real type doesn't make sense here, */
615 /* and we only test for NULL. */
617 /* We use /proc to track down all parts of the address space that are */
618 /* mapped by the process, and throw out regions we know we shouldn't */
619 /* worry about. This may also work under other SVR4 variants. */
620 void GC_register_dynamic_libraries()
624 static prmap_t
* addr_map
= 0;
625 static int current_sz
= 0; /* Number of records currently in addr_map */
626 static int needed_sz
; /* Required size of addr_map */
629 register ptr_t start
;
630 register ptr_t limit
;
631 ptr_t heap_start
= (ptr_t
)HEAP_START
;
632 ptr_t heap_end
= heap_start
;
636 # endif /* SUNOS5DL */
639 sprintf(buf
, "/proc/%d", getpid());
640 /* The above generates a lint complaint, since pid_t varies. */
641 /* It's unclear how to improve this. */
642 fd
= open(buf
, O_RDONLY
);
644 ABORT("/proc open failed");
647 if (ioctl(fd
, PIOCNMAP
, &needed_sz
) < 0) {
648 GC_err_printf2("fd = %d, errno = %d\n", fd
, errno
);
649 ABORT("/proc PIOCNMAP ioctl failed");
651 if (needed_sz
>= current_sz
) {
652 current_sz
= needed_sz
* 2 + 1;
653 /* Expansion, plus room for 0 record */
654 addr_map
= (prmap_t
*)GC_scratch_alloc((word
)
655 (current_sz
* sizeof(prmap_t
)));
657 if (ioctl(fd
, PIOCMAP
, addr_map
) < 0) {
658 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
659 fd
, errno
, needed_sz
, addr_map
);
660 ABORT("/proc PIOCMAP ioctl failed");
662 if (GC_n_heap_sects
> 0) {
663 heap_end
= GC_heap_sects
[GC_n_heap_sects
-1].hs_start
664 + GC_heap_sects
[GC_n_heap_sects
-1].hs_bytes
;
665 if (heap_end
< GC_scratch_last_end_ptr
) heap_end
= GC_scratch_last_end_ptr
;
667 for (i
= 0; i
< needed_sz
; i
++) {
668 flags
= addr_map
[i
].pr_mflags
;
669 if ((flags
& (MA_BREAK
| MA_STACK
| MA_PHYS
)) != 0) goto irrelevant
;
670 if ((flags
& (MA_READ
| MA_WRITE
)) != (MA_READ
| MA_WRITE
))
672 /* The latter test is empirically useless in very old Irix */
673 /* versions. Other than the */
674 /* main data and stack segments, everything appears to be */
675 /* mapped readable, writable, executable, and shared(!!). */
676 /* This makes no sense to me. - HB */
677 start
= (ptr_t
)(addr_map
[i
].pr_vaddr
);
678 if (GC_roots_present(start
)) goto irrelevant
;
679 if (start
< heap_end
&& start
>= heap_start
)
682 if (GC_is_thread_stack(start
)) goto irrelevant
;
683 # endif /* MMAP_STACKS */
685 limit
= start
+ addr_map
[i
].pr_size
;
686 /* The following seemed to be necessary for very old versions */
687 /* of Irix, but it has been reported to discard relevant */
688 /* segments under Irix 6.5. */
690 if (addr_map
[i
].pr_off
== 0 && strncmp(start
, ELFMAG
, 4) == 0) {
691 /* Discard text segments, i.e. 0-offset mappings against */
692 /* executable files which appear to have ELF headers. */
695 # define MAP_IRR_SZ 10
696 static ptr_t map_irr
[MAP_IRR_SZ
];
697 /* Known irrelevant map entries */
698 static int n_irr
= 0;
702 for (i
= 0; i
< n_irr
; i
++) {
703 if (map_irr
[i
] == start
) goto irrelevant
;
705 arg
= (caddr_t
)start
;
706 obj
= ioctl(fd
, PIOCOPENM
, &arg
);
710 if ((buf
.st_mode
& 0111) != 0) {
711 if (n_irr
< MAP_IRR_SZ
) {
712 map_irr
[n_irr
++] = start
;
719 GC_add_roots_inner(start
, limit
, TRUE
);
722 /* Dont keep cached descriptor, for now. Some kernels don't like us */
723 /* to keep a /proc file descriptor around during kill -9. */
724 if (close(fd
) < 0) ABORT("Couldnt close /proc file");
728 # endif /* USE_PROC || IRIX5 */
730 # if defined(MSWIN32) || defined(MSWINCE)
732 # define WIN32_LEAN_AND_MEAN
734 # include <windows.h>
737 /* We traverse the entire address space and register all segments */
738 /* that could possibly have been written to. */
740 extern GC_bool
GC_is_heap_base (ptr_t p
);
742 # ifdef GC_WIN32_THREADS
743 extern void GC_get_next_stack(char *start
, char **lo
, char **hi
);
744 void GC_cond_add_roots(char *base
, char * limit
)
746 char * curr_base
= base
;
747 char * next_stack_lo
;
748 char * next_stack_hi
;
750 if (base
== limit
) return;
752 GC_get_next_stack(curr_base
, &next_stack_lo
, &next_stack_hi
);
753 if (next_stack_lo
>= limit
) break;
754 GC_add_roots_inner(curr_base
, next_stack_lo
, TRUE
);
755 curr_base
= next_stack_hi
;
757 if (curr_base
< limit
) GC_add_roots_inner(curr_base
, limit
, TRUE
);
760 void GC_cond_add_roots(char *base
, char * limit
)
764 = (char *) ((word
)(&dummy
) & ~(GC_sysinfo
.dwAllocationGranularity
-1));
765 if (base
== limit
) return;
766 if (limit
> stack_top
&& base
< GC_stackbottom
) {
767 /* Part of the stack; ignore it. */
770 GC_add_roots_inner(base
, limit
, TRUE
);
775 extern GC_bool GC_win32s
;
778 void GC_register_dynamic_libraries()
780 MEMORY_BASIC_INFORMATION buf
;
785 char * limit
, * new_limit
;
788 if (GC_win32s
) return;
790 base
= limit
= p
= GC_sysinfo
.lpMinimumApplicationAddress
;
791 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
792 /* Only the first 32 MB of address space belongs to the current process */
793 while (p
< (LPVOID
)0x02000000) {
794 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
796 /* Page is free; advance to the next possible allocation base */
798 (((DWORD
) p
+ GC_sysinfo
.dwAllocationGranularity
)
799 & ~(GC_sysinfo
.dwAllocationGranularity
-1));
802 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
803 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
806 if (result
!= sizeof(buf
)) {
807 ABORT("Weird VirtualQuery result");
809 new_limit
= (char *)p
+ buf
.RegionSize
;
810 protect
= buf
.Protect
;
811 if (buf
.State
== MEM_COMMIT
812 && (protect
== PAGE_EXECUTE_READWRITE
813 || protect
== PAGE_READWRITE
)
814 && !GC_is_heap_base(buf
.AllocationBase
)) {
815 if ((char *)p
!= limit
) {
816 GC_cond_add_roots(base
, limit
);
822 if (p
> (LPVOID
)new_limit
/* overflow */) break;
823 p
= (LPVOID
)new_limit
;
825 GC_cond_add_roots(base
, limit
);
828 #endif /* MSWIN32 || MSWINCE */
830 #if defined(ALPHA) && defined(OSF1)
834 void GC_register_dynamic_libraries()
840 ldr_module_t moduleid
= LDR_NULL_MODULE
;
841 ldr_module_info_t moduleinfo
;
842 size_t moduleinfosize
= sizeof(moduleinfo
);
843 size_t modulereturnsize
;
847 ldr_region_info_t regioninfo
;
848 size_t regioninfosize
= sizeof(regioninfo
);
849 size_t regionreturnsize
;
851 /* Obtain id of this process */
852 mypid
= ldr_my_process();
854 /* For each module */
857 /* Get the next (first) module */
858 status
= ldr_next_module(mypid
, &moduleid
);
860 /* Any more modules? */
861 if (moduleid
== LDR_NULL_MODULE
)
862 break; /* No more modules */
864 /* Check status AFTER checking moduleid because */
865 /* of a bug in the non-shared ldr_next_module stub */
867 GC_printf1("dynamic_load: status = %ld\n", (long)status
);
869 extern char *sys_errlist
[];
872 if (errno
<= sys_nerr
) {
873 GC_printf1("dynamic_load: %s\n", (long)sys_errlist
[errno
]);
875 GC_printf1("dynamic_load: %d\n", (long)errno
);
878 ABORT("ldr_next_module failed");
881 /* Get the module information */
882 status
= ldr_inq_module(mypid
, moduleid
, &moduleinfo
,
883 moduleinfosize
, &modulereturnsize
);
885 ABORT("ldr_inq_module failed");
887 /* is module for the main program (i.e. nonshared portion)? */
888 if (moduleinfo
.lmi_flags
& LDR_MAIN
)
889 continue; /* skip the main module */
892 GC_printf("---Module---\n");
893 GC_printf("Module ID = %16ld\n", moduleinfo
.lmi_modid
);
894 GC_printf("Count of regions = %16d\n", moduleinfo
.lmi_nregion
);
895 GC_printf("flags for module = %16lx\n", moduleinfo
.lmi_flags
);
896 GC_printf("pathname of module = \"%s\"\n", moduleinfo
.lmi_name
);
899 /* For each region in this module */
900 for (region
= 0; region
< moduleinfo
.lmi_nregion
; region
++) {
902 /* Get the region information */
903 status
= ldr_inq_region(mypid
, moduleid
, region
, ®ioninfo
,
904 regioninfosize
, ®ionreturnsize
);
906 ABORT("ldr_inq_region failed");
908 /* only process writable (data) regions */
909 if (! (regioninfo
.lri_prot
& LDR_W
))
913 GC_printf("--- Region ---\n");
914 GC_printf("Region number = %16ld\n",
915 regioninfo
.lri_region_no
);
916 GC_printf("Protection flags = %016x\n", regioninfo
.lri_prot
);
917 GC_printf("Virtual address = %16p\n", regioninfo
.lri_vaddr
);
918 GC_printf("Mapped address = %16p\n", regioninfo
.lri_mapaddr
);
919 GC_printf("Region size = %16ld\n", regioninfo
.lri_size
);
920 GC_printf("Region name = \"%s\"\n", regioninfo
.lri_name
);
923 /* register region as a garbage collection root */
925 (char *)regioninfo
.lri_mapaddr
,
926 (char *)regioninfo
.lri_mapaddr
+ regioninfo
.lri_size
,
940 extern char *sys_errlist
[];
943 void GC_register_dynamic_libraries()
946 int index
= 1; /* Ordinal position in shared library search list */
947 struct shl_descriptor
*shl_desc
; /* Shared library info, see dl.h */
949 /* For each dynamic library loaded */
952 /* Get info about next shared library */
953 status
= shl_get(index
, &shl_desc
);
955 /* Check if this is the end of the list or if some error occured */
957 # ifdef GC_HPUX_THREADS
958 /* I've seen errno values of 0. The man page is not clear */
959 /* as to whether errno should get set on a -1 return. */
962 if (errno
== EINVAL
) {
963 break; /* Moved past end of shared library list --> finished */
965 if (errno
<= sys_nerr
) {
966 GC_printf1("dynamic_load: %s\n", (long) sys_errlist
[errno
]);
968 GC_printf1("dynamic_load: %d\n", (long) errno
);
970 ABORT("shl_get failed");
976 GC_printf0("---Shared library---\n");
977 GC_printf1("\tfilename = \"%s\"\n", shl_desc
->filename
);
978 GC_printf1("\tindex = %d\n", index
);
979 GC_printf1("\thandle = %08x\n",
980 (unsigned long) shl_desc
->handle
);
981 GC_printf1("\ttext seg. start = %08x\n", shl_desc
->tstart
);
982 GC_printf1("\ttext seg. end = %08x\n", shl_desc
->tend
);
983 GC_printf1("\tdata seg. start = %08x\n", shl_desc
->dstart
);
984 GC_printf1("\tdata seg. end = %08x\n", shl_desc
->dend
);
985 GC_printf1("\tref. count = %lu\n", shl_desc
->ref_count
);
988 /* register shared library's data segment as a garbage collection root */
989 GC_add_roots_inner((char *) shl_desc
->dstart
,
990 (char *) shl_desc
->dend
, TRUE
);
1000 #include <sys/errno.h>
1001 void GC_register_dynamic_libraries()
1006 struct ld_info
*ldi
;
1008 ldibuf
= alloca(ldibuflen
= 8192);
1010 while ( (len
= loadquery(L_GETINFO
,ldibuf
,ldibuflen
)) < 0) {
1011 if (errno
!= ENOMEM
) {
1012 ABORT("loadquery failed");
1014 ldibuf
= alloca(ldibuflen
*= 2);
1017 ldi
= (struct ld_info
*)ldibuf
;
1019 len
= ldi
->ldinfo_next
;
1021 ldi
->ldinfo_dataorg
,
1022 (unsigned long)ldi
->ldinfo_dataorg
1023 + ldi
->ldinfo_datasize
,
1025 ldi
= len
? (struct ld_info
*)((char *)ldi
+ len
) : 0;
1032 #else /* !DYNAMIC_LOADING */
1036 # include "il/PCR_IL.h"
1037 # include "th/PCR_ThCtl.h"
1038 # include "mm/PCR_MM.h"
1040 void GC_register_dynamic_libraries()
1042 /* Add new static data areas of dynamically loaded modules. */
1044 PCR_IL_LoadedFile
* p
= PCR_IL_GetLastLoadedFile();
1045 PCR_IL_LoadedSegment
* q
;
1047 /* Skip uncommited files */
1048 while (p
!= NIL
&& !(p
-> lf_commitPoint
)) {
1049 /* The loading of this file has not yet been committed */
1050 /* Hence its description could be inconsistent. */
1051 /* Furthermore, it hasn't yet been run. Hence its data */
1052 /* segments can't possibly reference heap allocated */
1056 for (; p
!= NIL
; p
= p
-> lf_prev
) {
1057 for (q
= p
-> lf_ls
; q
!= NIL
; q
= q
-> ls_next
) {
1058 if ((q
-> ls_flags
& PCR_IL_SegFlags_Traced_MASK
)
1059 == PCR_IL_SegFlags_Traced_on
) {
1061 ((char *)(q
-> ls_addr
),
1062 (char *)(q
-> ls_addr
) + q
-> ls_bytes
,
1073 void GC_register_dynamic_libraries(){}
1075 int GC_no_dynamic_loading
;
1078 #endif /* !DYNAMIC_LOADING */