* $(HOST_PREFIX_1)errors.o, $(HOST_PREFIX_1)ggc-none.o,
[official-gcc.git] / boehm-gc / dyn_load.c
blobec909cae3863c3171396de7277acfdac94c32a3f
1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #ifndef MACOS
30 # include <sys/types.h>
31 #endif
32 #include "gc_priv.h"
34 /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35 # if (defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
36 || defined(HPUX_THREADS) || defined(IRIX_THREADS)) && defined(dlopen) \
37 && !defined(USE_LD_WRAP)
38 /* To support threads in Solaris, gc.h interposes on dlopen by */
39 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
40 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
41 /* real system dlopen() in their implementation. We first remove */
42 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
43 # undef dlopen
44 # define GC_must_restore_redefined_dlopen
45 # else
46 # undef GC_must_restore_redefined_dlopen
47 # endif
49 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
50 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
51 !defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
52 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
53 !defined(RS6000) && !defined(SCO_ELF)
54 --> We only know how to find data segments of dynamic libraries for the
55 --> above. Additional SVR4 variants might not be too
56 --> hard to add.
57 #endif
59 #include <stdio.h>
60 #ifdef SUNOS5DL
61 # include <sys/elf.h>
62 # include <dlfcn.h>
63 # include <link.h>
64 #endif
65 #ifdef SUNOS4
66 # include <dlfcn.h>
67 # include <link.h>
68 # include <a.out.h>
69 /* struct link_map field overrides */
70 # define l_next lm_next
71 # define l_addr lm_addr
72 # define l_name lm_name
73 #endif
76 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
78 #ifdef LINT
79 Elf32_Dyn _DYNAMIC;
80 #endif
82 static struct link_map *
83 GC_FirstDLOpenedLinkMap()
85 extern Elf32_Dyn _DYNAMIC;
86 Elf32_Dyn *dp;
87 struct r_debug *r;
88 static struct link_map * cachedResult = 0;
89 static Elf32_Dyn *dynStructureAddr = 0;
90 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
92 # ifdef SUNOS53_SHARED_LIB
93 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
94 /* up properly in dynamically linked .so's. This means we have */
95 /* to use its value in the set of original object files loaded */
96 /* at program startup. */
97 if( dynStructureAddr == 0 ) {
98 void* startupSyms = dlopen(0, RTLD_LAZY);
99 dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
101 # else
102 dynStructureAddr = &_DYNAMIC;
103 # endif
105 if( dynStructureAddr == 0) {
106 return(0);
108 if( cachedResult == 0 ) {
109 int tag;
110 for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
111 if( tag == DT_DEBUG ) {
112 struct link_map *lm
113 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
114 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
115 break;
119 return cachedResult;
122 #endif /* SUNOS5DL ... */
124 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
126 #ifdef LINT
127 struct link_dynamic _DYNAMIC;
128 #endif
130 static struct link_map *
131 GC_FirstDLOpenedLinkMap()
133 extern struct link_dynamic _DYNAMIC;
135 if( &_DYNAMIC == 0) {
136 return(0);
138 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
141 /* Return the address of the ld.so allocated common symbol */
142 /* with the least address, or 0 if none. */
143 static ptr_t GC_first_common()
145 ptr_t result = 0;
146 extern struct link_dynamic _DYNAMIC;
147 struct rtc_symb * curr_symbol;
149 if( &_DYNAMIC == 0) {
150 return(0);
152 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
153 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
154 if (result == 0
155 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
156 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
159 return(result);
162 #endif /* SUNOS4 ... */
164 # if defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
165 || defined(HPUX_THREADS) || defined(IRIX_THREADS)
166 /* Make sure we're not in the middle of a collection, and make */
167 /* sure we don't start any. Returns previous value of GC_dont_gc. */
168 /* This is invoked prior to a dlopen call to avoid synchronization */
169 /* issues. We can't just acquire the allocation lock, since startup */
170 /* code in dlopen may try to allocate. */
171 /* This solution risks heap growth in the presence of many dlopen */
172 /* calls in either a multithreaded environment, or if the library */
173 /* initialization code allocates substantial amounts of GC'ed memory. */
174 /* But I don't know of a better solution. */
175 /* This can still deadlock if the client explicitly starts a GC */
176 /* during the dlopen. He shouldn't do that. */
177 static GC_bool disable_gc_for_dlopen()
179 GC_bool result;
180 LOCK();
181 result = GC_dont_gc;
182 while (GC_incremental && GC_collection_in_progress()) {
183 GC_collect_a_little_inner(1000);
185 GC_dont_gc = TRUE;
186 UNLOCK();
187 return(result);
190 /* Redefine dlopen to guarantee mutual exclusion with */
191 /* GC_register_dynamic_libraries. */
192 /* Should probably happen for other operating systems, too. */
194 #include <dlfcn.h>
196 #ifdef USE_LD_WRAP
197 void * __wrap_dlopen(const char *path, int mode)
198 #else
199 void * GC_dlopen(path, mode)
200 GC_CONST char * path;
201 int mode;
202 #endif
204 void * result;
205 GC_bool dont_gc_save;
207 # ifndef USE_PROC_FOR_LIBRARIES
208 dont_gc_save = disable_gc_for_dlopen();
209 # endif
210 # ifdef USE_LD_WRAP
211 result = __real_dlopen(path, mode);
212 # else
213 result = dlopen(path, mode);
214 # endif
215 # ifndef USE_PROC_FOR_LIBRARIES
216 GC_dont_gc = dont_gc_save;
217 # endif
218 return(result);
220 # endif /* SOLARIS_THREADS */
222 /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
223 # if defined(GC_must_restore_redefined_dlopen)
224 # define dlopen GC_dlopen
225 # endif
227 # if defined(SUNOS4) || defined(SUNOS5DL)
228 /* Add dynamic library data sections to the root set. */
229 # if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
230 # ifndef SRC_M3
231 --> fix mutual exclusion with dlopen
232 # endif /* We assume M3 programs don't call dlopen for now */
233 # endif
235 # ifndef USE_PROC_FOR_LIBRARIES
236 void GC_register_dynamic_libraries()
238 struct link_map *lm = GC_FirstDLOpenedLinkMap();
241 for (lm = GC_FirstDLOpenedLinkMap();
242 lm != (struct link_map *) 0; lm = lm->l_next)
244 # ifdef SUNOS4
245 struct exec *e;
247 e = (struct exec *) lm->lm_addr;
248 GC_add_roots_inner(
249 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
250 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
251 TRUE);
252 # endif
253 # ifdef SUNOS5DL
254 Elf32_Ehdr * e;
255 Elf32_Phdr * p;
256 unsigned long offset;
257 char * start;
258 register int i;
260 e = (Elf32_Ehdr *) lm->l_addr;
261 p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
262 offset = ((unsigned long)(lm->l_addr));
263 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
264 switch( p->p_type ) {
265 case PT_LOAD:
267 if( !(p->p_flags & PF_W) ) break;
268 start = ((char *)(p->p_vaddr)) + offset;
269 GC_add_roots_inner(
270 start,
271 start + p->p_memsz,
272 TRUE
275 break;
276 default:
277 break;
280 # endif
282 # ifdef SUNOS4
284 static ptr_t common_start = 0;
285 ptr_t common_end;
286 extern ptr_t GC_find_limit();
288 if (common_start == 0) common_start = GC_first_common();
289 if (common_start != 0) {
290 common_end = GC_find_limit(common_start, TRUE);
291 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
294 # endif
297 # endif /* !USE_PROC ... */
298 # endif /* SUNOS */
300 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
302 /* Dynamic loading code for Linux running ELF. Somewhat tested on
303 * Linux/x86, untested but hopefully should work on Linux/Alpha.
304 * This code was derived from the Solaris/ELF support. Thanks to
305 * whatever kind soul wrote that. - Patrick Bridges */
307 #include <elf.h>
308 #include <link.h>
310 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
311 * define it for those older versions that don't. */
312 # ifndef ElfW
313 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
314 # define ElfW(type) Elf32_##type
315 # else
316 # define ElfW(type) Elf64_##type
317 # endif
318 # endif
320 static struct link_map *
321 GC_FirstDLOpenedLinkMap()
323 # ifdef __GNUC__
324 # pragma weak _DYNAMIC
325 # endif
326 extern ElfW(Dyn) _DYNAMIC[];
327 ElfW(Dyn) *dp;
328 struct r_debug *r;
329 static struct link_map *cachedResult = 0;
331 if( _DYNAMIC == 0) {
332 return(0);
334 if( cachedResult == 0 ) {
335 int tag;
336 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
337 if( tag == DT_DEBUG ) {
338 struct link_map *lm
339 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
340 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
341 break;
345 return cachedResult;
349 void GC_register_dynamic_libraries()
351 struct link_map *lm = GC_FirstDLOpenedLinkMap();
354 for (lm = GC_FirstDLOpenedLinkMap();
355 lm != (struct link_map *) 0; lm = lm->l_next)
357 ElfW(Ehdr) * e;
358 ElfW(Phdr) * p;
359 unsigned long offset;
360 char * start;
361 register int i;
363 e = (ElfW(Ehdr) *) lm->l_addr;
364 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
365 offset = ((unsigned long)(lm->l_addr));
366 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
367 switch( p->p_type ) {
368 case PT_LOAD:
370 if( !(p->p_flags & PF_W) ) break;
371 start = ((char *)(p->p_vaddr)) + offset;
372 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
374 break;
375 default:
376 break;
382 #endif
384 #if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
386 #include <sys/procfs.h>
387 #include <sys/stat.h>
388 #include <fcntl.h>
389 #include <elf.h>
390 #include <errno.h>
392 extern void * GC_roots_present();
393 /* The type is a lie, since the real type doesn't make sense here, */
394 /* and we only test for NULL. */
396 #ifndef GC_scratch_last_end_ptr
397 extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
398 #endif
400 /* We use /proc to track down all parts of the address space that are */
401 /* mapped by the process, and throw out regions we know we shouldn't */
402 /* worry about. This may also work under other SVR4 variants. */
403 void GC_register_dynamic_libraries()
405 static int fd = -1;
406 char buf[30];
407 static prmap_t * addr_map = 0;
408 static int current_sz = 0; /* Number of records currently in addr_map */
409 static int needed_sz; /* Required size of addr_map */
410 register int i;
411 register long flags;
412 register ptr_t start;
413 register ptr_t limit;
414 ptr_t heap_start = (ptr_t)HEAP_START;
415 ptr_t heap_end = heap_start;
417 # ifdef SUNOS5DL
418 # define MA_PHYS 0
419 # endif /* SUNOS5DL */
421 if (fd < 0) {
422 sprintf(buf, "/proc/%d", getpid());
423 /* The above generates a lint complaint, since pid_t varies. */
424 /* It's unclear how to improve this. */
425 fd = open(buf, O_RDONLY);
426 if (fd < 0) {
427 ABORT("/proc open failed");
430 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
431 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
432 ABORT("/proc PIOCNMAP ioctl failed");
434 if (needed_sz >= current_sz) {
435 current_sz = needed_sz * 2 + 1;
436 /* Expansion, plus room for 0 record */
437 addr_map = (prmap_t *)GC_scratch_alloc((word)
438 (current_sz * sizeof(prmap_t)));
440 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
441 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
442 fd, errno, needed_sz, addr_map);
443 ABORT("/proc PIOCMAP ioctl failed");
445 if (GC_n_heap_sects > 0) {
446 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
447 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
448 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
450 for (i = 0; i < needed_sz; i++) {
451 flags = addr_map[i].pr_mflags;
452 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
453 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
454 goto irrelevant;
455 /* The latter test is empirically useless. Other than the */
456 /* main data and stack segments, everything appears to be */
457 /* mapped readable, writable, executable, and shared(!!). */
458 /* This makes no sense to me. - HB */
459 start = (ptr_t)(addr_map[i].pr_vaddr);
460 if (GC_roots_present(start)) goto irrelevant;
461 if (start < heap_end && start >= heap_start)
462 goto irrelevant;
463 # ifdef MMAP_STACKS
464 if (GC_is_thread_stack(start)) goto irrelevant;
465 # endif /* MMAP_STACKS */
467 limit = start + addr_map[i].pr_size;
468 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
469 /* Discard text segments, i.e. 0-offset mappings against */
470 /* executable files which appear to have ELF headers. */
471 caddr_t arg;
472 int obj;
473 # define MAP_IRR_SZ 10
474 static ptr_t map_irr[MAP_IRR_SZ];
475 /* Known irrelevant map entries */
476 static int n_irr = 0;
477 struct stat buf;
478 register int i;
480 for (i = 0; i < n_irr; i++) {
481 if (map_irr[i] == start) goto irrelevant;
483 arg = (caddr_t)start;
484 obj = ioctl(fd, PIOCOPENM, &arg);
485 if (obj >= 0) {
486 fstat(obj, &buf);
487 close(obj);
488 if ((buf.st_mode & 0111) != 0) {
489 if (n_irr < MAP_IRR_SZ) {
490 map_irr[n_irr++] = start;
492 goto irrelevant;
496 GC_add_roots_inner(start, limit, TRUE);
497 irrelevant: ;
499 /* Dont keep cached descriptor, for now. Some kernels don't like us */
500 /* to keep a /proc file descriptor around during kill -9. */
501 if (close(fd) < 0) ABORT("Couldnt close /proc file");
502 fd = -1;
505 # endif /* USE_PROC || IRIX5 */
507 # ifdef MSWIN32
509 # define WIN32_LEAN_AND_MEAN
510 # define NOSERVICE
511 # include <windows.h>
512 # include <stdlib.h>
514 /* We traverse the entire address space and register all segments */
515 /* that could possibly have been written to. */
516 DWORD GC_allocation_granularity;
518 extern GC_bool GC_is_heap_base (ptr_t p);
520 # ifdef WIN32_THREADS
521 extern void GC_get_next_stack(char *start, char **lo, char **hi);
522 # endif
524 void GC_cond_add_roots(char *base, char * limit)
526 char dummy;
527 char * stack_top
528 = (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
529 if (base == limit) return;
530 # ifdef WIN32_THREADS
532 char * curr_base = base;
533 char * next_stack_lo;
534 char * next_stack_hi;
536 for(;;) {
537 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
538 if (next_stack_lo >= limit) break;
539 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
540 curr_base = next_stack_hi;
542 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
544 # else
545 if (limit > stack_top && base < GC_stackbottom) {
546 /* Part of the stack; ignore it. */
547 return;
549 GC_add_roots_inner(base, limit, TRUE);
550 # endif
553 extern GC_bool GC_win32s;
555 void GC_register_dynamic_libraries()
557 MEMORY_BASIC_INFORMATION buf;
558 SYSTEM_INFO sysinfo;
559 DWORD result;
560 DWORD protect;
561 LPVOID p;
562 char * base;
563 char * limit, * new_limit;
565 if (GC_win32s) return;
566 GetSystemInfo(&sysinfo);
567 base = limit = p = sysinfo.lpMinimumApplicationAddress;
568 GC_allocation_granularity = sysinfo.dwAllocationGranularity;
569 while (p < sysinfo.lpMaximumApplicationAddress) {
570 result = VirtualQuery(p, &buf, sizeof(buf));
571 if (result != sizeof(buf)) {
572 ABORT("Weird VirtualQuery result");
574 new_limit = (char *)p + buf.RegionSize;
575 protect = buf.Protect;
576 if (buf.State == MEM_COMMIT
577 && (protect == PAGE_EXECUTE_READWRITE
578 || protect == PAGE_READWRITE)
579 && !GC_is_heap_base(buf.AllocationBase)) {
580 if ((char *)p == limit) {
581 limit = new_limit;
582 } else {
583 GC_cond_add_roots(base, limit);
584 base = p;
585 limit = new_limit;
588 if (p > (LPVOID)new_limit /* overflow */) break;
589 p = (LPVOID)new_limit;
591 GC_cond_add_roots(base, limit);
594 #endif /* MSWIN32 */
596 #if defined(ALPHA) && defined(OSF1)
598 #include <loader.h>
600 void GC_register_dynamic_libraries()
602 int status;
603 ldr_process_t mypid;
605 /* module */
606 ldr_module_t moduleid = LDR_NULL_MODULE;
607 ldr_module_info_t moduleinfo;
608 size_t moduleinfosize = sizeof(moduleinfo);
609 size_t modulereturnsize;
611 /* region */
612 ldr_region_t region;
613 ldr_region_info_t regioninfo;
614 size_t regioninfosize = sizeof(regioninfo);
615 size_t regionreturnsize;
617 /* Obtain id of this process */
618 mypid = ldr_my_process();
620 /* For each module */
621 while (TRUE) {
623 /* Get the next (first) module */
624 status = ldr_next_module(mypid, &moduleid);
626 /* Any more modules? */
627 if (moduleid == LDR_NULL_MODULE)
628 break; /* No more modules */
630 /* Check status AFTER checking moduleid because */
631 /* of a bug in the non-shared ldr_next_module stub */
632 if (status != 0 ) {
633 GC_printf1("dynamic_load: status = %ld\n", (long)status);
635 extern char *sys_errlist[];
636 extern int sys_nerr;
637 extern int errno;
638 if (errno <= sys_nerr) {
639 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
640 } else {
641 GC_printf1("dynamic_load: %d\n", (long)errno);
644 ABORT("ldr_next_module failed");
647 /* Get the module information */
648 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
649 moduleinfosize, &modulereturnsize);
650 if (status != 0 )
651 ABORT("ldr_inq_module failed");
653 /* is module for the main program (i.e. nonshared portion)? */
654 if (moduleinfo.lmi_flags & LDR_MAIN)
655 continue; /* skip the main module */
657 # ifdef VERBOSE
658 GC_printf("---Module---\n");
659 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
660 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
661 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
662 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
663 # endif
665 /* For each region in this module */
666 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
668 /* Get the region information */
669 status = ldr_inq_region(mypid, moduleid, region, &regioninfo,
670 regioninfosize, &regionreturnsize);
671 if (status != 0 )
672 ABORT("ldr_inq_region failed");
674 /* only process writable (data) regions */
675 if (! (regioninfo.lri_prot & LDR_W))
676 continue;
678 # ifdef VERBOSE
679 GC_printf("--- Region ---\n");
680 GC_printf("Region number = %16ld\n",
681 regioninfo.lri_region_no);
682 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
683 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
684 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
685 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
686 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
687 # endif
689 /* register region as a garbage collection root */
690 GC_add_roots_inner (
691 (char *)regioninfo.lri_mapaddr,
692 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
693 TRUE);
698 #endif
700 #if defined(HPUX)
702 #include <errno.h>
703 #include <dl.h>
705 extern int errno;
706 extern char *sys_errlist[];
707 extern int sys_nerr;
709 void GC_register_dynamic_libraries()
711 int status;
712 int index = 1; /* Ordinal position in shared library search list */
713 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
715 /* For each dynamic library loaded */
716 while (TRUE) {
718 /* Get info about next shared library */
719 status = shl_get(index, &shl_desc);
721 /* Check if this is the end of the list or if some error occured */
722 if (status != 0) {
723 # ifdef HPUX_THREADS
724 /* I've seen errno values of 0. The man page is not clear */
725 /* as to whether errno should get set on a -1 return. */
726 break;
727 # else
728 if (errno == EINVAL) {
729 break; /* Moved past end of shared library list --> finished */
730 } else {
731 if (errno <= sys_nerr) {
732 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
733 } else {
734 GC_printf1("dynamic_load: %d\n", (long) errno);
736 ABORT("shl_get failed");
738 # endif
741 # ifdef VERBOSE
742 GC_printf0("---Shared library---\n");
743 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
744 GC_printf1("\tindex = %d\n", index);
745 GC_printf1("\thandle = %08x\n",
746 (unsigned long) shl_desc->handle);
747 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
748 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
749 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
750 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
751 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
752 # endif
754 /* register shared library's data segment as a garbage collection root */
755 GC_add_roots_inner((char *) shl_desc->dstart,
756 (char *) shl_desc->dend, TRUE);
758 index++;
761 #endif /* HPUX */
763 #ifdef RS6000
764 #pragma alloca
765 #include <sys/ldr.h>
766 #include <sys/errno.h>
767 void GC_register_dynamic_libraries()
769 int len;
770 char *ldibuf;
771 int ldibuflen;
772 struct ld_info *ldi;
774 ldibuf = alloca(ldibuflen = 8192);
776 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
777 if (errno != ENOMEM) {
778 ABORT("loadquery failed");
780 ldibuf = alloca(ldibuflen *= 2);
783 ldi = (struct ld_info *)ldibuf;
784 while (ldi) {
785 len = ldi->ldinfo_next;
786 GC_add_roots_inner(
787 ldi->ldinfo_dataorg,
788 (unsigned long)ldi->ldinfo_dataorg
789 + ldi->ldinfo_datasize,
790 TRUE);
791 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
794 #endif /* RS6000 */
798 #else /* !DYNAMIC_LOADING */
800 #ifdef PCR
802 # include "il/PCR_IL.h"
803 # include "th/PCR_ThCtl.h"
804 # include "mm/PCR_MM.h"
806 void GC_register_dynamic_libraries()
808 /* Add new static data areas of dynamically loaded modules. */
810 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
811 PCR_IL_LoadedSegment * q;
813 /* Skip uncommited files */
814 while (p != NIL && !(p -> lf_commitPoint)) {
815 /* The loading of this file has not yet been committed */
816 /* Hence its description could be inconsistent. */
817 /* Furthermore, it hasn't yet been run. Hence its data */
818 /* segments can't possibly reference heap allocated */
819 /* objects. */
820 p = p -> lf_prev;
822 for (; p != NIL; p = p -> lf_prev) {
823 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
824 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
825 == PCR_IL_SegFlags_Traced_on) {
826 GC_add_roots_inner
827 ((char *)(q -> ls_addr),
828 (char *)(q -> ls_addr) + q -> ls_bytes,
829 TRUE);
837 #else /* !PCR */
839 void GC_register_dynamic_libraries(){}
841 int GC_no_dynamic_loading;
843 #endif /* !PCR */
844 #endif /* !DYNAMIC_LOADING */