* config/mips/iris6.h (SUBTARGET_ASM_OPTIMIZING_SPEC): Define to
[official-gcc.git] / boehm-gc / dyn_load.c
blobd0f523c340022eaf65a0a6959c13a0428f6822be
1 /*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #ifndef MACOS
30 # include <sys/types.h>
31 #endif
32 #include "gc_priv.h"
34 /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35 # if defined(SOLARIS_THREADS) && defined(dlopen)
36 /* To support threads in Solaris, gc.h interposes on dlopen by */
37 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
38 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
39 /* real system dlopen() in their implementation. We first remove */
40 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
41 # undef dlopen
42 # define GC_must_restore_redefined_dlopen
43 # else
44 # undef GC_must_restore_redefined_dlopen
45 # endif
47 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
48 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
49 !defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
50 !defined(HP_PA) && (!defined(LINUX) && !defined(__ELF__)) && \
51 !defined(RS6000) && !defined(SCO_ELF)
52 --> We only know how to find data segments of dynamic libraries for the
53 --> above. Additional SVR4 variants might not be too
54 --> hard to add.
55 #endif
57 #include <stdio.h>
58 #ifdef SUNOS5DL
59 # include <sys/elf.h>
60 # include <dlfcn.h>
61 # include <link.h>
62 #endif
63 #ifdef SUNOS4
64 # include <dlfcn.h>
65 # include <link.h>
66 # include <a.out.h>
67 /* struct link_map field overrides */
68 # define l_next lm_next
69 # define l_addr lm_addr
70 # define l_name lm_name
71 #endif
74 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
76 #ifdef LINT
77 Elf32_Dyn _DYNAMIC;
78 #endif
80 static struct link_map *
81 GC_FirstDLOpenedLinkMap()
83 extern Elf32_Dyn _DYNAMIC;
84 Elf32_Dyn *dp;
85 struct r_debug *r;
86 static struct link_map * cachedResult = 0;
87 static Elf32_Dyn *dynStructureAddr = 0;
88 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
90 # ifdef SUNOS53_SHARED_LIB
91 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
92 /* up properly in dynamically linked .so's. This means we have */
93 /* to use its value in the set of original object files loaded */
94 /* at program startup. */
95 if( dynStructureAddr == 0 ) {
96 void* startupSyms = dlopen(0, RTLD_LAZY);
97 dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
99 # else
100 dynStructureAddr = &_DYNAMIC;
101 # endif
103 if( dynStructureAddr == 0) {
104 return(0);
106 if( cachedResult == 0 ) {
107 int tag;
108 for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
109 if( tag == DT_DEBUG ) {
110 struct link_map *lm
111 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
112 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
113 break;
117 return cachedResult;
120 #endif /* SUNOS5DL ... */
122 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
124 #ifdef LINT
125 struct link_dynamic _DYNAMIC;
126 #endif
128 static struct link_map *
129 GC_FirstDLOpenedLinkMap()
131 extern struct link_dynamic _DYNAMIC;
133 if( &_DYNAMIC == 0) {
134 return(0);
136 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
139 /* Return the address of the ld.so allocated common symbol */
140 /* with the least address, or 0 if none. */
141 static ptr_t GC_first_common()
143 ptr_t result = 0;
144 extern struct link_dynamic _DYNAMIC;
145 struct rtc_symb * curr_symbol;
147 if( &_DYNAMIC == 0) {
148 return(0);
150 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
151 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
152 if (result == 0
153 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
154 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
157 return(result);
160 #endif /* SUNOS4 ... */
162 # if defined(SUNOS4) || defined(SUNOS5DL)
163 /* Add dynamic library data sections to the root set. */
164 # if !defined(PCR) && !defined(SOLARIS_THREADS) \
165 && !defined(QUICK_THREADS) && defined(THREADS)
166 # ifndef SRC_M3
167 --> fix mutual exclusion with dlopen
168 # endif /* We assume M3 programs don't call dlopen for now */
169 # endif
171 # ifdef SOLARIS_THREADS
172 /* Redefine dlopen to guarantee mutual exclusion with */
173 /* GC_register_dynamic_libraries. */
174 /* assumes that dlopen doesn't need to call GC_malloc */
175 /* and friends. */
176 # include <thread.h>
177 # include <synch.h>
179 void * GC_dlopen(const char *path, int mode)
181 void * result;
183 # ifndef USE_PROC_FOR_LIBRARIES
184 mutex_lock(&GC_allocate_ml);
185 # endif
186 result = dlopen(path, mode);
187 # ifndef USE_PROC_FOR_LIBRARIES
188 mutex_unlock(&GC_allocate_ml);
189 # endif
190 return(result);
192 # endif /* SOLARIS_THREADS */
194 /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
195 # if defined(GC_must_restore_redefined_dlopen)
196 # define dlopen GC_dlopen
197 # endif
199 # ifndef USE_PROC_FOR_LIBRARIES
200 void GC_register_dynamic_libraries()
202 struct link_map *lm = GC_FirstDLOpenedLinkMap();
205 for (lm = GC_FirstDLOpenedLinkMap();
206 lm != (struct link_map *) 0; lm = lm->l_next)
208 # ifdef SUNOS4
209 struct exec *e;
211 e = (struct exec *) lm->lm_addr;
212 GC_add_roots_inner(
213 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
214 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
215 TRUE);
216 # endif
217 # ifdef SUNOS5DL
218 Elf32_Ehdr * e;
219 Elf32_Phdr * p;
220 unsigned long offset;
221 char * start;
222 register int i;
224 e = (Elf32_Ehdr *) lm->l_addr;
225 p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
226 offset = ((unsigned long)(lm->l_addr));
227 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
228 switch( p->p_type ) {
229 case PT_LOAD:
231 if( !(p->p_flags & PF_W) ) break;
232 start = ((char *)(p->p_vaddr)) + offset;
233 GC_add_roots_inner(
234 start,
235 start + p->p_memsz,
236 TRUE
239 break;
240 default:
241 break;
244 # endif
246 # ifdef SUNOS4
248 static ptr_t common_start = 0;
249 ptr_t common_end;
250 extern ptr_t GC_find_limit();
252 if (common_start == 0) common_start = GC_first_common();
253 if (common_start != 0) {
254 common_end = GC_find_limit(common_start, TRUE);
255 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
258 # endif
261 # endif /* !USE_PROC ... */
262 # endif /* SUNOS */
264 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
266 /* Dynamic loading code for Linux running ELF. Somewhat tested on
267 * Linux/x86, untested but hopefully should work on Linux/Alpha.
268 * This code was derived from the Solaris/ELF support. Thanks to
269 * whatever kind soul wrote that. - Patrick Bridges */
271 #include <elf.h>
272 #include <link.h>
274 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
275 * define it for those older versions that don't. */
276 # ifndef ElfW
277 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
278 # define ElfW(type) Elf32_##type
279 # else
280 # define ElfW(type) Elf64_##type
281 # endif
282 # endif
284 static struct link_map *
285 GC_FirstDLOpenedLinkMap()
287 #ifdef __GNUC__
288 /* On some Linux systems, `_DYNAMIC' will not be defined when a
289 static link is done. */
290 # pragma weak _DYNAMIC
291 #endif
292 extern ElfW(Dyn) _DYNAMIC[];
293 ElfW(Dyn) *dp;
294 struct r_debug *r;
295 static struct link_map *cachedResult = 0;
297 if( _DYNAMIC == 0) {
298 return(0);
300 if( cachedResult == 0 ) {
301 int tag;
302 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
303 if( tag == DT_DEBUG ) {
304 struct link_map *lm
305 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
306 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
307 break;
311 return cachedResult;
315 void GC_register_dynamic_libraries()
317 struct link_map *lm = GC_FirstDLOpenedLinkMap();
320 for (lm = GC_FirstDLOpenedLinkMap();
321 lm != (struct link_map *) 0; lm = lm->l_next)
323 ElfW(Ehdr) * e;
324 ElfW(Phdr) * p;
325 unsigned long offset;
326 char * start;
327 register int i;
329 e = (ElfW(Ehdr) *) lm->l_addr;
330 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
331 offset = ((unsigned long)(lm->l_addr));
332 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
333 switch( p->p_type ) {
334 case PT_LOAD:
336 if( !(p->p_flags & PF_W) ) break;
337 start = ((char *)(p->p_vaddr)) + offset;
338 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
340 break;
341 default:
342 break;
348 #endif
350 #if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
352 #include <sys/procfs.h>
353 #include <sys/stat.h>
354 #include <fcntl.h>
355 #include <elf.h>
356 #include <errno.h>
358 extern void * GC_roots_present();
360 extern ptr_t GC_scratch_last_end_ptr; /* End of GC_scratch_alloc arena */
362 /* We use /proc to track down all parts of the address space that are */
363 /* mapped by the process, and throw out regions we know we shouldn't */
364 /* worry about. This may also work under other SVR4 variants. */
365 void GC_register_dynamic_libraries()
367 static int fd = -1;
368 char buf[30];
369 static prmap_t * addr_map = 0;
370 static int current_sz = 0; /* Number of records currently in addr_map */
371 static int needed_sz; /* Required size of addr_map */
372 register int i;
373 register long flags;
374 register ptr_t start;
375 register ptr_t limit;
376 ptr_t heap_start = (ptr_t)HEAP_START;
377 ptr_t heap_end = heap_start;
379 # ifdef SUNOS5DL
380 # define MA_PHYS 0
381 # endif /* SUNOS5DL */
383 if (fd < 0) {
384 sprintf(buf, "/proc/%d", getpid());
385 fd = open(buf, O_RDONLY);
386 if (fd < 0) {
387 ABORT("/proc open failed");
390 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
391 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
392 ABORT("/proc PIOCNMAP ioctl failed");
394 if (needed_sz >= current_sz) {
395 current_sz = needed_sz * 2 + 1;
396 /* Expansion, plus room for 0 record */
397 addr_map = (prmap_t *)GC_scratch_alloc(current_sz * sizeof(prmap_t));
399 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
400 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
401 fd, errno, needed_sz, addr_map);
402 ABORT("/proc PIOCMAP ioctl failed");
404 if (GC_n_heap_sects > 0) {
405 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
406 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
407 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
409 for (i = 0; i < needed_sz; i++) {
410 flags = addr_map[i].pr_mflags;
411 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
412 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
413 goto irrelevant;
414 /* The latter test is empirically useless. Other than the */
415 /* main data and stack segments, everything appears to be */
416 /* mapped readable, writable, executable, and shared(!!). */
417 /* This makes no sense to me. - HB */
418 start = (ptr_t)(addr_map[i].pr_vaddr);
419 if (GC_roots_present(start)) goto irrelevant;
420 if (start < heap_end && start >= heap_start)
421 goto irrelevant;
422 # ifdef MMAP_STACKS
423 if (GC_is_thread_stack(start)) goto irrelevant;
424 # endif /* MMAP_STACKS */
426 limit = start + addr_map[i].pr_size;
427 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
428 /* Discard text segments, i.e. 0-offset mappings against */
429 /* executable files which appear to have ELF headers. */
430 caddr_t arg;
431 int obj;
432 # define MAP_IRR_SZ 10
433 static ptr_t map_irr[MAP_IRR_SZ];
434 /* Known irrelevant map entries */
435 static int n_irr = 0;
436 struct stat buf;
437 register int i;
439 for (i = 0; i < n_irr; i++) {
440 if (map_irr[i] == start) goto irrelevant;
442 arg = (caddr_t)start;
443 obj = ioctl(fd, PIOCOPENM, &arg);
444 if (obj >= 0) {
445 fstat(obj, &buf);
446 close(obj);
447 if ((buf.st_mode & 0111) != 0) {
448 if (n_irr < MAP_IRR_SZ) {
449 map_irr[n_irr++] = start;
451 goto irrelevant;
455 GC_add_roots_inner(start, limit, TRUE);
456 irrelevant: ;
458 /* Dont keep cached descriptor, for now. Some kernels don't like us */
459 /* to keep a /proc file descriptor around during kill -9. */
460 if (close(fd) < 0) ABORT("Couldnt close /proc file");
461 fd = -1;
464 # endif /* USE_PROC || IRIX5 */
466 # ifdef MSWIN32
468 # define WIN32_LEAN_AND_MEAN
469 # define NOSERVICE
470 # include <windows.h>
471 # include <stdlib.h>
473 /* We traverse the entire address space and register all segments */
474 /* that could possibly have been written to. */
475 DWORD GC_allocation_granularity;
477 extern GC_bool GC_is_heap_base (ptr_t p);
479 # ifdef WIN32_THREADS
480 extern void GC_get_next_stack(char *start, char **lo, char **hi);
481 # endif
483 void GC_cond_add_roots(char *base, char * limit)
485 char dummy;
486 char * stack_top
487 = (char *) ((word)(&dummy) & ~(GC_allocation_granularity-1));
488 if (base == limit) return;
489 # ifdef WIN32_THREADS
491 char * curr_base = base;
492 char * next_stack_lo;
493 char * next_stack_hi;
495 for(;;) {
496 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
497 if (next_stack_lo >= limit) break;
498 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
499 curr_base = next_stack_hi;
501 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
503 # else
504 if (limit > stack_top && base < GC_stackbottom) {
505 /* Part of the stack; ignore it. */
506 return;
508 GC_add_roots_inner(base, limit, TRUE);
509 # endif
512 extern GC_bool GC_win32s;
514 void GC_register_dynamic_libraries()
516 MEMORY_BASIC_INFORMATION buf;
517 SYSTEM_INFO sysinfo;
518 DWORD result;
519 DWORD protect;
520 LPVOID p;
521 char * base;
522 char * limit, * new_limit;
524 if (GC_win32s) return;
525 GetSystemInfo(&sysinfo);
526 base = limit = p = sysinfo.lpMinimumApplicationAddress;
527 GC_allocation_granularity = sysinfo.dwAllocationGranularity;
528 while (p < sysinfo.lpMaximumApplicationAddress) {
529 result = VirtualQuery(p, &buf, sizeof(buf));
530 if (result != sizeof(buf)) {
531 ABORT("Weird VirtualQuery result");
533 new_limit = (char *)p + buf.RegionSize;
534 protect = buf.Protect;
535 if (buf.State == MEM_COMMIT
536 && (protect == PAGE_EXECUTE_READWRITE
537 || protect == PAGE_READWRITE)
538 && !GC_is_heap_base(buf.AllocationBase)) {
539 if ((char *)p == limit) {
540 limit = new_limit;
541 } else {
542 GC_cond_add_roots(base, limit);
543 base = p;
544 limit = new_limit;
547 if (p > (LPVOID)new_limit /* overflow */) break;
548 p = (LPVOID)new_limit;
550 GC_cond_add_roots(base, limit);
553 #endif /* MSWIN32 */
555 #if defined(ALPHA) && defined(OSF1)
557 #include <loader.h>
559 void GC_register_dynamic_libraries()
561 int status;
562 ldr_process_t mypid;
564 /* module */
565 ldr_module_t moduleid = LDR_NULL_MODULE;
566 ldr_module_info_t moduleinfo;
567 size_t moduleinfosize = sizeof(moduleinfo);
568 size_t modulereturnsize;
570 /* region */
571 ldr_region_t region;
572 ldr_region_info_t regioninfo;
573 size_t regioninfosize = sizeof(regioninfo);
574 size_t regionreturnsize;
576 /* Obtain id of this process */
577 mypid = ldr_my_process();
579 /* For each module */
580 while (TRUE) {
582 /* Get the next (first) module */
583 status = ldr_next_module(mypid, &moduleid);
585 /* Any more modules? */
586 if (moduleid == LDR_NULL_MODULE)
587 break; /* No more modules */
589 /* Check status AFTER checking moduleid because */
590 /* of a bug in the non-shared ldr_next_module stub */
591 if (status != 0 ) {
592 GC_printf1("dynamic_load: status = %ld\n", (long)status);
594 extern char *sys_errlist[];
595 extern int sys_nerr;
596 extern int errno;
597 if (errno <= sys_nerr) {
598 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
599 } else {
600 GC_printf1("dynamic_load: %d\n", (long)errno);
603 ABORT("ldr_next_module failed");
606 /* Get the module information */
607 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
608 moduleinfosize, &modulereturnsize);
609 if (status != 0 )
610 ABORT("ldr_inq_module failed");
612 /* is module for the main program (i.e. nonshared portion)? */
613 if (moduleinfo.lmi_flags & LDR_MAIN)
614 continue; /* skip the main module */
616 # ifdef VERBOSE
617 GC_printf("---Module---\n");
618 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
619 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
620 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
621 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
622 # endif
624 /* For each region in this module */
625 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
627 /* Get the region information */
628 status = ldr_inq_region(mypid, moduleid, region, &regioninfo,
629 regioninfosize, &regionreturnsize);
630 if (status != 0 )
631 ABORT("ldr_inq_region failed");
633 /* only process writable (data) regions */
634 if (! (regioninfo.lri_prot & LDR_W))
635 continue;
637 # ifdef VERBOSE
638 GC_printf("--- Region ---\n");
639 GC_printf("Region number = %16ld\n",
640 regioninfo.lri_region_no);
641 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
642 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
643 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
644 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
645 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
646 # endif
648 /* register region as a garbage collection root */
649 GC_add_roots_inner (
650 (char *)regioninfo.lri_mapaddr,
651 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
652 TRUE);
657 #endif
659 #if defined(HP_PA)
661 #include <errno.h>
662 #include <dl.h>
664 extern int errno;
665 extern char *sys_errlist[];
666 extern int sys_nerr;
668 void GC_register_dynamic_libraries()
670 int status;
671 int index = 1; /* Ordinal position in shared library search list */
672 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
674 /* For each dynamic library loaded */
675 while (TRUE) {
677 /* Get info about next shared library */
678 status = shl_get(index, &shl_desc);
680 /* Check if this is the end of the list or if some error occured */
681 if (status != 0) {
682 if (errno == EINVAL) {
683 break; /* Moved past end of shared library list --> finished */
684 } else {
685 if (errno <= sys_nerr) {
686 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
687 } else {
688 GC_printf1("dynamic_load: %d\n", (long) errno);
690 ABORT("shl_get failed");
694 # ifdef VERBOSE
695 GC_printf0("---Shared library---\n");
696 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
697 GC_printf1("\tindex = %d\n", index);
698 GC_printf1("\thandle = %08x\n",
699 (unsigned long) shl_desc->handle);
700 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
701 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
702 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
703 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
704 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
705 # endif
707 /* register shared library's data segment as a garbage collection root */
708 GC_add_roots_inner((char *) shl_desc->dstart,
709 (char *) shl_desc->dend, TRUE);
711 index++;
714 #endif /* HP_PA */
716 #ifdef RS6000
717 #pragma alloca
718 #include <sys/ldr.h>
719 #include <sys/errno.h>
720 void GC_register_dynamic_libraries()
722 int len;
723 char *ldibuf;
724 int ldibuflen;
725 struct ld_info *ldi;
727 ldibuf = alloca(ldibuflen = 8192);
729 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
730 if (errno != ENOMEM) {
731 ABORT("loadquery failed");
733 ldibuf = alloca(ldibuflen *= 2);
736 ldi = (struct ld_info *)ldibuf;
737 while (ldi) {
738 len = ldi->ldinfo_next;
739 GC_add_roots_inner(
740 ldi->ldinfo_dataorg,
741 (unsigned long)ldi->ldinfo_dataorg
742 + ldi->ldinfo_datasize,
743 TRUE);
744 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
747 #endif /* RS6000 */
751 #else /* !DYNAMIC_LOADING */
753 #ifdef PCR
755 # include "il/PCR_IL.h"
756 # include "th/PCR_ThCtl.h"
757 # include "mm/PCR_MM.h"
759 void GC_register_dynamic_libraries()
761 /* Add new static data areas of dynamically loaded modules. */
763 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
764 PCR_IL_LoadedSegment * q;
766 /* Skip uncommited files */
767 while (p != NIL && !(p -> lf_commitPoint)) {
768 /* The loading of this file has not yet been committed */
769 /* Hence its description could be inconsistent. */
770 /* Furthermore, it hasn't yet been run. Hence its data */
771 /* segments can't possibly reference heap allocated */
772 /* objects. */
773 p = p -> lf_prev;
775 for (; p != NIL; p = p -> lf_prev) {
776 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
777 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
778 == PCR_IL_SegFlags_Traced_on) {
779 GC_add_roots_inner
780 ((char *)(q -> ls_addr),
781 (char *)(q -> ls_addr) + q -> ls_bytes,
782 TRUE);
790 #else /* !PCR */
792 void GC_register_dynamic_libraries(){}
794 int GC_no_dynamic_loading;
796 #endif /* !PCR */
797 #endif /* !DYNAMIC_LOADING */