oops - fixed typo in previous delta
[official-gcc.git] / boehm-gc / dyn_load.c
blobf44726bc833cbd4e8b0ae5f6542ec6d4f5503020
1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #ifndef MACOS
30 # include <sys/types.h>
31 #endif
32 #include "gc_priv.h"
34 /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35 # if (defined(SOLARIS_THREADS) || defined(LINUX_THREADS)) && defined(dlopen)
36 /* To support threads in Solaris, gc.h interposes on dlopen by */
37 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
38 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
39 /* real system dlopen() in their implementation. We first remove */
40 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
41 # undef dlopen
42 # define GC_must_restore_redefined_dlopen
43 # else
44 # undef GC_must_restore_redefined_dlopen
45 # endif
47 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
48 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
49 !defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
50 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
51 !defined(RS6000) && !defined(SCO_ELF)
52 --> We only know how to find data segments of dynamic libraries for the
53 --> above. Additional SVR4 variants might not be too
54 --> hard to add.
55 #endif
57 #include <stdio.h>
58 #ifdef SUNOS5DL
59 # include <sys/elf.h>
60 # include <dlfcn.h>
61 # include <link.h>
62 #endif
63 #ifdef SUNOS4
64 # include <dlfcn.h>
65 # include <link.h>
66 # include <a.out.h>
67 /* struct link_map field overrides */
68 # define l_next lm_next
69 # define l_addr lm_addr
70 # define l_name lm_name
71 #endif
74 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
76 #ifdef LINT
77 Elf32_Dyn _DYNAMIC;
78 #endif
80 static struct link_map *
81 GC_FirstDLOpenedLinkMap()
83 extern Elf32_Dyn _DYNAMIC;
84 Elf32_Dyn *dp;
85 struct r_debug *r;
86 static struct link_map * cachedResult = 0;
87 static Elf32_Dyn *dynStructureAddr = 0;
88 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
90 # ifdef SUNOS53_SHARED_LIB
91 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
92 /* up properly in dynamically linked .so's. This means we have */
93 /* to use its value in the set of original object files loaded */
94 /* at program startup. */
95 if( dynStructureAddr == 0 ) {
96 void* startupSyms = dlopen(0, RTLD_LAZY);
97 dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
99 # else
100 dynStructureAddr = &_DYNAMIC;
101 # endif
103 if( dynStructureAddr == 0) {
104 return(0);
106 if( cachedResult == 0 ) {
107 int tag;
108 for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
109 if( tag == DT_DEBUG ) {
110 struct link_map *lm
111 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
112 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
113 break;
117 return cachedResult;
120 #endif /* SUNOS5DL ... */
122 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
124 #ifdef LINT
125 struct link_dynamic _DYNAMIC;
126 #endif
128 static struct link_map *
129 GC_FirstDLOpenedLinkMap()
131 extern struct link_dynamic _DYNAMIC;
133 if( &_DYNAMIC == 0) {
134 return(0);
136 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
139 /* Return the address of the ld.so allocated common symbol */
140 /* with the least address, or 0 if none. */
141 static ptr_t GC_first_common()
143 ptr_t result = 0;
144 extern struct link_dynamic _DYNAMIC;
145 struct rtc_symb * curr_symbol;
147 if( &_DYNAMIC == 0) {
148 return(0);
150 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
151 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
152 if (result == 0
153 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
154 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
157 return(result);
160 #endif /* SUNOS4 ... */
162 # if defined(SUNOS4) || defined(SUNOS5DL)
163 /* Add dynamic library data sections to the root set. */
164 # if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
165 # ifndef SRC_M3
166 --> fix mutual exclusion with dlopen
167 # endif /* We assume M3 programs don't call dlopen for now */
168 # endif
170 # ifdef SOLARIS_THREADS
171 /* Redefine dlopen to guarantee mutual exclusion with */
172 /* GC_register_dynamic_libraries. */
173 /* assumes that dlopen doesn't need to call GC_malloc */
174 /* and friends. */
175 # include <thread.h>
176 # include <synch.h>
178 void * GC_dlopen(const char *path, int mode)
180 void * result;
182 # ifndef USE_PROC_FOR_LIBRARIES
183 mutex_lock(&GC_allocate_ml);
184 # endif
185 result = dlopen(path, mode);
186 # ifndef USE_PROC_FOR_LIBRARIES
187 mutex_unlock(&GC_allocate_ml);
188 # endif
189 return(result);
191 # endif /* SOLARIS_THREADS */
193 # ifndef USE_PROC_FOR_LIBRARIES
194 void GC_register_dynamic_libraries()
196 struct link_map *lm = GC_FirstDLOpenedLinkMap();
199 for (lm = GC_FirstDLOpenedLinkMap();
200 lm != (struct link_map *) 0; lm = lm->l_next)
202 # ifdef SUNOS4
203 struct exec *e;
205 e = (struct exec *) lm->lm_addr;
206 GC_add_roots_inner(
207 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
208 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
209 TRUE);
210 # endif
211 # ifdef SUNOS5DL
212 Elf32_Ehdr * e;
213 Elf32_Phdr * p;
214 unsigned long offset;
215 char * start;
216 register int i;
218 e = (Elf32_Ehdr *) lm->l_addr;
219 p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
220 offset = ((unsigned long)(lm->l_addr));
221 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
222 switch( p->p_type ) {
223 case PT_LOAD:
225 if( !(p->p_flags & PF_W) ) break;
226 start = ((char *)(p->p_vaddr)) + offset;
227 GC_add_roots_inner(
228 start,
229 start + p->p_memsz,
230 TRUE
233 break;
234 default:
235 break;
238 # endif
240 # ifdef SUNOS4
242 static ptr_t common_start = 0;
243 ptr_t common_end;
244 extern ptr_t GC_find_limit();
246 if (common_start == 0) common_start = GC_first_common();
247 if (common_start != 0) {
248 common_end = GC_find_limit(common_start, TRUE);
249 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
252 # endif
255 # endif /* !USE_PROC ... */
256 # endif /* SUNOS */
258 #ifdef LINUX_THREADS
259 #include <dlfcn.h>
261 void * GC_dlopen(const char *path, int mode)
263 void * result;
265 LOCK();
266 result = dlopen(path, mode);
267 UNLOCK();
268 return(result);
270 #endif /* LINUX_THREADS */
272 /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
273 #if defined(GC_must_restore_redefined_dlopen)
274 # define dlopen GC_dlopen
275 #endif
277 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
279 /* Dynamic loading code for Linux running ELF. Somewhat tested on
280 * Linux/x86, untested but hopefully should work on Linux/Alpha.
281 * This code was derived from the Solaris/ELF support. Thanks to
282 * whatever kind soul wrote that. - Patrick Bridges */
284 #include <elf.h>
285 #include <link.h>
287 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
288 * define it for those older versions that don't. */
289 # ifndef ElfW
290 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
291 # define ElfW(type) Elf32_##type
292 # else
293 # define ElfW(type) Elf64_##type
294 # endif
295 # endif
297 static struct link_map *
298 GC_FirstDLOpenedLinkMap()
300 # ifdef __GNUC__
301 # pragma weak _DYNAMIC
302 # endif
303 extern ElfW(Dyn) _DYNAMIC[];
304 ElfW(Dyn) *dp;
305 struct r_debug *r;
306 static struct link_map *cachedResult = 0;
308 if( _DYNAMIC == 0) {
309 return(0);
311 if( cachedResult == 0 ) {
312 int tag;
313 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
314 if( tag == DT_DEBUG ) {
315 struct link_map *lm
316 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
317 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
318 break;
322 return cachedResult;
326 void GC_register_dynamic_libraries()
328 struct link_map *lm = GC_FirstDLOpenedLinkMap();
331 for (lm = GC_FirstDLOpenedLinkMap();
332 lm != (struct link_map *) 0; lm = lm->l_next)
334 ElfW(Ehdr) * e;
335 ElfW(Phdr) * p;
336 unsigned long offset;
337 char * start;
338 register int i;
340 e = (ElfW(Ehdr) *) lm->l_addr;
341 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
342 offset = ((unsigned long)(lm->l_addr));
343 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
344 switch( p->p_type ) {
345 case PT_LOAD:
347 if( !(p->p_flags & PF_W) ) break;
348 start = ((char *)(p->p_vaddr)) + offset;
349 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
351 break;
352 default:
353 break;
359 #endif
361 #if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
363 #include <sys/procfs.h>
364 #include <sys/stat.h>
365 #include <fcntl.h>
366 #include <elf.h>
367 #include <errno.h>
369 extern void * GC_roots_present();
370 /* The type is a lie, since the real type doesn't make sense here, */
371 /* and we only test for NULL. */
373 extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
375 /* We use /proc to track down all parts of the address space that are */
376 /* mapped by the process, and throw out regions we know we shouldn't */
377 /* worry about. This may also work under other SVR4 variants. */
378 void GC_register_dynamic_libraries()
380 static int fd = -1;
381 char buf[30];
382 static prmap_t * addr_map = 0;
383 static int current_sz = 0; /* Number of records currently in addr_map */
384 static int needed_sz; /* Required size of addr_map */
385 register int i;
386 register long flags;
387 register ptr_t start;
388 register ptr_t limit;
389 ptr_t heap_start = (ptr_t)HEAP_START;
390 ptr_t heap_end = heap_start;
392 # ifdef SUNOS5DL
393 # define MA_PHYS 0
394 # endif /* SUNOS5DL */
396 if (fd < 0) {
397 sprintf(buf, "/proc/%d", getpid());
398 /* The above generates a lint complaint, since pid_t varies. */
399 /* It's unclear how to improve this. */
400 fd = open(buf, O_RDONLY);
401 if (fd < 0) {
402 ABORT("/proc open failed");
405 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
406 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
407 ABORT("/proc PIOCNMAP ioctl failed");
409 if (needed_sz >= current_sz) {
410 current_sz = needed_sz * 2 + 1;
411 /* Expansion, plus room for 0 record */
412 addr_map = (prmap_t *)GC_scratch_alloc((word)
413 (current_sz * sizeof(prmap_t)));
415 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
416 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
417 fd, errno, needed_sz, addr_map);
418 ABORT("/proc PIOCMAP ioctl failed");
420 if (GC_n_heap_sects > 0) {
421 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
422 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
423 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
425 for (i = 0; i < needed_sz; i++) {
426 flags = addr_map[i].pr_mflags;
427 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
428 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
429 goto irrelevant;
430 /* The latter test is empirically useless. Other than the */
431 /* main data and stack segments, everything appears to be */
432 /* mapped readable, writable, executable, and shared(!!). */
433 /* This makes no sense to me. - HB */
434 start = (ptr_t)(addr_map[i].pr_vaddr);
435 if (GC_roots_present(start)) goto irrelevant;
436 if (start < heap_end && start >= heap_start)
437 goto irrelevant;
438 # ifdef MMAP_STACKS
439 if (GC_is_thread_stack(start)) goto irrelevant;
440 # endif /* MMAP_STACKS */
442 limit = start + addr_map[i].pr_size;
443 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
444 /* Discard text segments, i.e. 0-offset mappings against */
445 /* executable files which appear to have ELF headers. */
446 caddr_t arg;
447 int obj;
448 # define MAP_IRR_SZ 10
449 static ptr_t map_irr[MAP_IRR_SZ];
450 /* Known irrelevant map entries */
451 static int n_irr = 0;
452 struct stat buf;
453 register int i;
455 for (i = 0; i < n_irr; i++) {
456 if (map_irr[i] == start) goto irrelevant;
458 arg = (caddr_t)start;
459 obj = ioctl(fd, PIOCOPENM, &arg);
460 if (obj >= 0) {
461 fstat(obj, &buf);
462 close(obj);
463 if ((buf.st_mode & 0111) != 0) {
464 if (n_irr < MAP_IRR_SZ) {
465 map_irr[n_irr++] = start;
467 goto irrelevant;
471 GC_add_roots_inner(start, limit, TRUE);
472 irrelevant: ;
474 /* Dont keep cached descriptor, for now. Some kernels don't like us */
475 /* to keep a /proc file descriptor around during kill -9. */
476 if (close(fd) < 0) ABORT("Couldnt close /proc file");
477 fd = -1;
480 # endif /* USE_PROC || IRIX5 */
482 # ifdef MSWIN32
484 # define WIN32_LEAN_AND_MEAN
485 # define NOSERVICE
486 # include <windows.h>
487 # include <stdlib.h>
489 /* We traverse the entire address space and register all segments */
490 /* that could possibly have been written to. */
491 DWORD GC_allocation_granularity;
493 extern GC_bool GC_is_heap_base (ptr_t p);
495 # ifdef WIN32_THREADS
496 extern void GC_get_next_stack(char *start, char **lo, char **hi);
497 # endif
499 void GC_cond_add_roots(char *base, char * limit)
501 char dummy;
502 char * stack_top
503 = (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
504 if (base == limit) return;
505 # ifdef WIN32_THREADS
507 char * curr_base = base;
508 char * next_stack_lo;
509 char * next_stack_hi;
511 for(;;) {
512 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
513 if (next_stack_lo >= limit) break;
514 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
515 curr_base = next_stack_hi;
517 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
519 # else
520 if (limit > stack_top && base < GC_stackbottom) {
521 /* Part of the stack; ignore it. */
522 return;
524 GC_add_roots_inner(base, limit, TRUE);
525 # endif
528 extern GC_bool GC_win32s;
530 void GC_register_dynamic_libraries()
532 MEMORY_BASIC_INFORMATION buf;
533 SYSTEM_INFO sysinfo;
534 DWORD result;
535 DWORD protect;
536 LPVOID p;
537 char * base;
538 char * limit, * new_limit;
540 if (GC_win32s) return;
541 GetSystemInfo(&sysinfo);
542 base = limit = p = sysinfo.lpMinimumApplicationAddress;
543 GC_allocation_granularity = sysinfo.dwAllocationGranularity;
544 while (p < sysinfo.lpMaximumApplicationAddress) {
545 result = VirtualQuery(p, &buf, sizeof(buf));
546 if (result != sizeof(buf)) {
547 ABORT("Weird VirtualQuery result");
549 new_limit = (char *)p + buf.RegionSize;
550 protect = buf.Protect;
551 if (buf.State == MEM_COMMIT
552 && (protect == PAGE_EXECUTE_READWRITE
553 || protect == PAGE_READWRITE)
554 && !GC_is_heap_base(buf.AllocationBase)) {
555 if ((char *)p == limit) {
556 limit = new_limit;
557 } else {
558 GC_cond_add_roots(base, limit);
559 base = p;
560 limit = new_limit;
563 if (p > (LPVOID)new_limit /* overflow */) break;
564 p = (LPVOID)new_limit;
566 GC_cond_add_roots(base, limit);
569 #endif /* MSWIN32 */
571 #if defined(ALPHA) && defined(OSF1)
573 #include <loader.h>
575 void GC_register_dynamic_libraries()
577 int status;
578 ldr_process_t mypid;
580 /* module */
581 ldr_module_t moduleid = LDR_NULL_MODULE;
582 ldr_module_info_t moduleinfo;
583 size_t moduleinfosize = sizeof(moduleinfo);
584 size_t modulereturnsize;
586 /* region */
587 ldr_region_t region;
588 ldr_region_info_t regioninfo;
589 size_t regioninfosize = sizeof(regioninfo);
590 size_t regionreturnsize;
592 /* Obtain id of this process */
593 mypid = ldr_my_process();
595 /* For each module */
596 while (TRUE) {
598 /* Get the next (first) module */
599 status = ldr_next_module(mypid, &moduleid);
601 /* Any more modules? */
602 if (moduleid == LDR_NULL_MODULE)
603 break; /* No more modules */
605 /* Check status AFTER checking moduleid because */
606 /* of a bug in the non-shared ldr_next_module stub */
607 if (status != 0 ) {
608 GC_printf1("dynamic_load: status = %ld\n", (long)status);
610 extern char *sys_errlist[];
611 extern int sys_nerr;
612 extern int errno;
613 if (errno <= sys_nerr) {
614 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
615 } else {
616 GC_printf1("dynamic_load: %d\n", (long)errno);
619 ABORT("ldr_next_module failed");
622 /* Get the module information */
623 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
624 moduleinfosize, &modulereturnsize);
625 if (status != 0 )
626 ABORT("ldr_inq_module failed");
628 /* is module for the main program (i.e. nonshared portion)? */
629 if (moduleinfo.lmi_flags & LDR_MAIN)
630 continue; /* skip the main module */
632 # ifdef VERBOSE
633 GC_printf("---Module---\n");
634 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
635 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
636 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
637 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
638 # endif
640 /* For each region in this module */
641 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
643 /* Get the region information */
644 status = ldr_inq_region(mypid, moduleid, region, &regioninfo,
645 regioninfosize, &regionreturnsize);
646 if (status != 0 )
647 ABORT("ldr_inq_region failed");
649 /* only process writable (data) regions */
650 if (! (regioninfo.lri_prot & LDR_W))
651 continue;
653 # ifdef VERBOSE
654 GC_printf("--- Region ---\n");
655 GC_printf("Region number = %16ld\n",
656 regioninfo.lri_region_no);
657 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
658 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
659 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
660 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
661 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
662 # endif
664 /* register region as a garbage collection root */
665 GC_add_roots_inner (
666 (char *)regioninfo.lri_mapaddr,
667 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
668 TRUE);
673 #endif
675 #if defined(HPUX)
677 #include <errno.h>
678 #include <dl.h>
680 extern int errno;
681 extern char *sys_errlist[];
682 extern int sys_nerr;
684 void GC_register_dynamic_libraries()
686 int status;
687 int index = 1; /* Ordinal position in shared library search list */
688 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
690 /* For each dynamic library loaded */
691 while (TRUE) {
693 /* Get info about next shared library */
694 status = shl_get(index, &shl_desc);
696 /* Check if this is the end of the list or if some error occured */
697 if (status != 0) {
698 # ifdef HPUX_THREADS
699 /* I've seen errno values of 0. The man page is not clear */
700 /* as to whether errno should get set on a -1 return. */
701 break;
702 # else
703 if (errno == EINVAL) {
704 break; /* Moved past end of shared library list --> finished */
705 } else {
706 if (errno <= sys_nerr) {
707 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
708 } else {
709 GC_printf1("dynamic_load: %d\n", (long) errno);
711 ABORT("shl_get failed");
713 # endif
716 # ifdef VERBOSE
717 GC_printf0("---Shared library---\n");
718 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
719 GC_printf1("\tindex = %d\n", index);
720 GC_printf1("\thandle = %08x\n",
721 (unsigned long) shl_desc->handle);
722 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
723 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
724 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
725 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
726 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
727 # endif
729 /* register shared library's data segment as a garbage collection root */
730 GC_add_roots_inner((char *) shl_desc->dstart,
731 (char *) shl_desc->dend, TRUE);
733 index++;
736 #endif /* HPUX */
738 #ifdef RS6000
739 #pragma alloca
740 #include <sys/ldr.h>
741 #include <sys/errno.h>
742 void GC_register_dynamic_libraries()
744 int len;
745 char *ldibuf;
746 int ldibuflen;
747 struct ld_info *ldi;
749 ldibuf = alloca(ldibuflen = 8192);
751 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
752 if (errno != ENOMEM) {
753 ABORT("loadquery failed");
755 ldibuf = alloca(ldibuflen *= 2);
758 ldi = (struct ld_info *)ldibuf;
759 while (ldi) {
760 len = ldi->ldinfo_next;
761 GC_add_roots_inner(
762 ldi->ldinfo_dataorg,
763 (unsigned long)ldi->ldinfo_dataorg
764 + ldi->ldinfo_datasize,
765 TRUE);
766 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
769 #endif /* RS6000 */
773 #else /* !DYNAMIC_LOADING */
775 #ifdef PCR
777 # include "il/PCR_IL.h"
778 # include "th/PCR_ThCtl.h"
779 # include "mm/PCR_MM.h"
781 void GC_register_dynamic_libraries()
783 /* Add new static data areas of dynamically loaded modules. */
785 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
786 PCR_IL_LoadedSegment * q;
788 /* Skip uncommited files */
789 while (p != NIL && !(p -> lf_commitPoint)) {
790 /* The loading of this file has not yet been committed */
791 /* Hence its description could be inconsistent. */
792 /* Furthermore, it hasn't yet been run. Hence its data */
793 /* segments can't possibly reference heap allocated */
794 /* objects. */
795 p = p -> lf_prev;
797 for (; p != NIL; p = p -> lf_prev) {
798 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
799 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
800 == PCR_IL_SegFlags_Traced_on) {
801 GC_add_roots_inner
802 ((char *)(q -> ls_addr),
803 (char *)(q -> ls_addr) + q -> ls_bytes,
804 TRUE);
812 #else /* !PCR */
814 void GC_register_dynamic_libraries(){}
816 int GC_no_dynamic_loading;
818 #endif /* !PCR */
819 #endif /* !DYNAMIC_LOADING */