2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
30 # include <sys/types.h>
34 /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35 # if (defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
36 || defined(HPUX_THREADS) || defined(IRIX_THREADS)) && defined(dlopen) \
37 && !defined(USE_LD_WRAP)
38 /* To support threads in Solaris, gc.h interposes on dlopen by */
39 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
40 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
41 /* real system dlopen() in their implementation. We first remove */
42 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
44 # define GC_must_restore_redefined_dlopen
46 # undef GC_must_restore_redefined_dlopen
49 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
50 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
51 !defined(MSWIN32) && !(defined(ALPHA) && defined(OSF1)) && \
52 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
53 !defined(RS6000) && !defined(SCO_ELF)
54 --> We only know how to find data segments of dynamic libraries
for the
55 --> above
. Additional SVR4 variants might
not be too
69 /* struct link_map field overrides */
70 # define l_next lm_next
71 # define l_addr lm_addr
72 # define l_name lm_name
76 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
82 static struct link_map
*
83 GC_FirstDLOpenedLinkMap()
85 extern Elf32_Dyn _DYNAMIC
;
88 static struct link_map
* cachedResult
= 0;
89 static Elf32_Dyn
*dynStructureAddr
= 0;
90 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
92 # ifdef SUNOS53_SHARED_LIB
93 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
94 /* up properly in dynamically linked .so's. This means we have */
95 /* to use its value in the set of original object files loaded */
96 /* at program startup. */
97 if( dynStructureAddr
== 0 ) {
98 void* startupSyms
= dlopen(0, RTLD_LAZY
);
99 dynStructureAddr
= (Elf32_Dyn
*)dlsym(startupSyms
, "_DYNAMIC");
102 dynStructureAddr
= &_DYNAMIC
;
105 if( dynStructureAddr
== 0) {
108 if( cachedResult
== 0 ) {
110 for( dp
= ((Elf32_Dyn
*)(&_DYNAMIC
)); (tag
= dp
->d_tag
) != 0; dp
++ ) {
111 if( tag
== DT_DEBUG
) {
113 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
114 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
122 #endif /* SUNOS5DL ... */
124 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
127 struct link_dynamic _DYNAMIC
;
130 static struct link_map
*
131 GC_FirstDLOpenedLinkMap()
133 extern struct link_dynamic _DYNAMIC
;
135 if( &_DYNAMIC
== 0) {
138 return(_DYNAMIC
.ld_un
.ld_1
->ld_loaded
);
141 /* Return the address of the ld.so allocated common symbol */
142 /* with the least address, or 0 if none. */
143 static ptr_t
GC_first_common()
146 extern struct link_dynamic _DYNAMIC
;
147 struct rtc_symb
* curr_symbol
;
149 if( &_DYNAMIC
== 0) {
152 curr_symbol
= _DYNAMIC
.ldd
-> ldd_cp
;
153 for (; curr_symbol
!= 0; curr_symbol
= curr_symbol
-> rtc_next
) {
155 || (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
) < result
) {
156 result
= (ptr_t
)(curr_symbol
-> rtc_sp
-> n_value
);
162 #endif /* SUNOS4 ... */
164 # if defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
165 || defined(HPUX_THREADS) || defined(IRIX_THREADS)
166 /* Make sure we're not in the middle of a collection, and make */
167 /* sure we don't start any. Returns previous value of GC_dont_gc. */
168 /* This is invoked prior to a dlopen call to avoid synchronization */
169 /* issues. We can't just acquire the allocation lock, since startup */
170 /* code in dlopen may try to allocate. */
171 /* This solution risks heap growth in the presence of many dlopen */
172 /* calls in either a multithreaded environment, or if the library */
173 /* initialization code allocates substantial amounts of GC'ed memory. */
174 /* But I don't know of a better solution. */
175 /* This can still deadlock if the client explicitly starts a GC */
176 /* during the dlopen. He shouldn't do that. */
177 static GC_bool
disable_gc_for_dlopen()
182 while (GC_incremental
&& GC_collection_in_progress()) {
183 GC_collect_a_little_inner(1000);
190 /* Redefine dlopen to guarantee mutual exclusion with */
191 /* GC_register_dynamic_libraries. */
192 /* Should probably happen for other operating systems, too. */
197 void * __wrap_dlopen(const char *path
, int mode
)
199 void * GC_dlopen(path
, mode
)
200 GC_CONST
char * path
;
205 GC_bool dont_gc_save
;
207 # ifndef USE_PROC_FOR_LIBRARIES
208 dont_gc_save
= disable_gc_for_dlopen();
211 result
= __real_dlopen(path
, mode
);
213 result
= dlopen(path
, mode
);
215 # ifndef USE_PROC_FOR_LIBRARIES
216 GC_dont_gc
= dont_gc_save
;
220 # endif /* SOLARIS_THREADS */
222 /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
223 # if defined(GC_must_restore_redefined_dlopen)
224 # define dlopen GC_dlopen
227 # if defined(SUNOS4) || defined(SUNOS5DL)
228 /* Add dynamic library data sections to the root set. */
229 # if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
231 --> fix mutual exclusion with dlopen
232 # endif /* We assume M3 programs don't call dlopen for now */
235 # ifndef USE_PROC_FOR_LIBRARIES
236 void GC_register_dynamic_libraries()
238 struct link_map
*lm
= GC_FirstDLOpenedLinkMap();
241 for (lm
= GC_FirstDLOpenedLinkMap();
242 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
247 e
= (struct exec
*) lm
->lm_addr
;
249 ((char *) (N_DATOFF(*e
) + lm
->lm_addr
)),
250 ((char *) (N_BSSADDR(*e
) + e
->a_bss
+ lm
->lm_addr
)),
256 unsigned long offset
;
260 e
= (Elf32_Ehdr
*) lm
->l_addr
;
261 p
= ((Elf32_Phdr
*)(((char *)(e
)) + e
->e_phoff
));
262 offset
= ((unsigned long)(lm
->l_addr
));
263 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
264 switch( p
->p_type
) {
267 if( !(p
->p_flags
& PF_W
) ) break;
268 start
= ((char *)(p
->p_vaddr
)) + offset
;
284 static ptr_t common_start
= 0;
286 extern ptr_t
GC_find_limit();
288 if (common_start
== 0) common_start
= GC_first_common();
289 if (common_start
!= 0) {
290 common_end
= GC_find_limit(common_start
, TRUE
);
291 GC_add_roots_inner((char *)common_start
, (char *)common_end
, TRUE
);
297 # endif /* !USE_PROC ... */
300 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF)
302 /* Dynamic loading code for Linux running ELF. Somewhat tested on
303 * Linux/x86, untested but hopefully should work on Linux/Alpha.
304 * This code was derived from the Solaris/ELF support. Thanks to
305 * whatever kind soul wrote that. - Patrick Bridges */
310 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
311 * define it for those older versions that don't. */
313 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
314 # define ElfW(type) Elf32_##type
316 # define ElfW(type) Elf64_##type
320 static struct link_map
*
321 GC_FirstDLOpenedLinkMap()
324 # pragma weak _DYNAMIC
326 extern ElfW(Dyn
) _DYNAMIC
[];
329 static struct link_map
*cachedResult
= 0;
334 if( cachedResult
== 0 ) {
336 for( dp
= _DYNAMIC
; (tag
= dp
->d_tag
) != 0; dp
++ ) {
337 if( tag
== DT_DEBUG
) {
339 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
340 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NIL */
349 void GC_register_dynamic_libraries()
351 struct link_map
*lm
= GC_FirstDLOpenedLinkMap();
354 for (lm
= GC_FirstDLOpenedLinkMap();
355 lm
!= (struct link_map
*) 0; lm
= lm
->l_next
)
359 unsigned long offset
;
363 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
364 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
365 offset
= ((unsigned long)(lm
->l_addr
));
366 for( i
= 0; i
< (int)(e
->e_phnum
); ((i
++),(p
++)) ) {
367 switch( p
->p_type
) {
370 if( !(p
->p_flags
& PF_W
) ) break;
371 start
= ((char *)(p
->p_vaddr
)) + offset
;
372 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
384 #if defined(IRIX5) || defined(USE_PROC_FOR_LIBRARIES)
386 #include <sys/procfs.h>
387 #include <sys/stat.h>
392 extern void * GC_roots_present();
393 /* The type is a lie, since the real type doesn't make sense here, */
394 /* and we only test for NULL. */
396 #ifndef GC_scratch_last_end_ptr
397 extern ptr_t GC_scratch_last_end_ptr
; /* End of GC_scratch_alloc arena */
400 /* We use /proc to track down all parts of the address space that are */
401 /* mapped by the process, and throw out regions we know we shouldn't */
402 /* worry about. This may also work under other SVR4 variants. */
403 void GC_register_dynamic_libraries()
407 static prmap_t
* addr_map
= 0;
408 static int current_sz
= 0; /* Number of records currently in addr_map */
409 static int needed_sz
; /* Required size of addr_map */
412 register ptr_t start
;
413 register ptr_t limit
;
414 ptr_t heap_start
= (ptr_t
)HEAP_START
;
415 ptr_t heap_end
= heap_start
;
419 # endif /* SUNOS5DL */
422 sprintf(buf
, "/proc/%d", getpid());
423 /* The above generates a lint complaint, since pid_t varies. */
424 /* It's unclear how to improve this. */
425 fd
= open(buf
, O_RDONLY
);
427 ABORT("/proc open failed");
430 if (ioctl(fd
, PIOCNMAP
, &needed_sz
) < 0) {
431 GC_err_printf2("fd = %d, errno = %d\n", fd
, errno
);
432 ABORT("/proc PIOCNMAP ioctl failed");
434 if (needed_sz
>= current_sz
) {
435 current_sz
= needed_sz
* 2 + 1;
436 /* Expansion, plus room for 0 record */
437 addr_map
= (prmap_t
*)GC_scratch_alloc((word
)
438 (current_sz
* sizeof(prmap_t
)));
440 if (ioctl(fd
, PIOCMAP
, addr_map
) < 0) {
441 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
442 fd
, errno
, needed_sz
, addr_map
);
443 ABORT("/proc PIOCMAP ioctl failed");
445 if (GC_n_heap_sects
> 0) {
446 heap_end
= GC_heap_sects
[GC_n_heap_sects
-1].hs_start
447 + GC_heap_sects
[GC_n_heap_sects
-1].hs_bytes
;
448 if (heap_end
< GC_scratch_last_end_ptr
) heap_end
= GC_scratch_last_end_ptr
;
450 for (i
= 0; i
< needed_sz
; i
++) {
451 flags
= addr_map
[i
].pr_mflags
;
452 if ((flags
& (MA_BREAK
| MA_STACK
| MA_PHYS
)) != 0) goto irrelevant
;
453 if ((flags
& (MA_READ
| MA_WRITE
)) != (MA_READ
| MA_WRITE
))
455 /* The latter test is empirically useless. Other than the */
456 /* main data and stack segments, everything appears to be */
457 /* mapped readable, writable, executable, and shared(!!). */
458 /* This makes no sense to me. - HB */
459 start
= (ptr_t
)(addr_map
[i
].pr_vaddr
);
460 if (GC_roots_present(start
)) goto irrelevant
;
461 if (start
< heap_end
&& start
>= heap_start
)
464 if (GC_is_thread_stack(start
)) goto irrelevant
;
465 # endif /* MMAP_STACKS */
467 limit
= start
+ addr_map
[i
].pr_size
;
468 if (addr_map
[i
].pr_off
== 0 && strncmp(start
, ELFMAG
, 4) == 0) {
469 /* Discard text segments, i.e. 0-offset mappings against */
470 /* executable files which appear to have ELF headers. */
473 # define MAP_IRR_SZ 10
474 static ptr_t map_irr
[MAP_IRR_SZ
];
475 /* Known irrelevant map entries */
476 static int n_irr
= 0;
480 for (i
= 0; i
< n_irr
; i
++) {
481 if (map_irr
[i
] == start
) goto irrelevant
;
483 arg
= (caddr_t
)start
;
484 obj
= ioctl(fd
, PIOCOPENM
, &arg
);
488 if ((buf
.st_mode
& 0111) != 0) {
489 if (n_irr
< MAP_IRR_SZ
) {
490 map_irr
[n_irr
++] = start
;
496 GC_add_roots_inner(start
, limit
, TRUE
);
499 /* Dont keep cached descriptor, for now. Some kernels don't like us */
500 /* to keep a /proc file descriptor around during kill -9. */
501 if (close(fd
) < 0) ABORT("Couldnt close /proc file");
505 # endif /* USE_PROC || IRIX5 */
509 # define WIN32_LEAN_AND_MEAN
511 # include <windows.h>
514 /* We traverse the entire address space and register all segments */
515 /* that could possibly have been written to. */
516 DWORD GC_allocation_granularity
;
518 extern GC_bool
GC_is_heap_base (ptr_t p
);
520 # ifdef WIN32_THREADS
521 extern void GC_get_next_stack(char *start
, char **lo
, char **hi
);
524 void GC_cond_add_roots(char *base
, char * limit
)
528 = (char *) ((word
)(&dummy
) & ~(GC_allocation_granularity
-1));
529 if (base
== limit
) return;
530 # ifdef WIN32_THREADS
532 char * curr_base
= base
;
533 char * next_stack_lo
;
534 char * next_stack_hi
;
537 GC_get_next_stack(curr_base
, &next_stack_lo
, &next_stack_hi
);
538 if (next_stack_lo
>= limit
) break;
539 GC_add_roots_inner(curr_base
, next_stack_lo
, TRUE
);
540 curr_base
= next_stack_hi
;
542 if (curr_base
< limit
) GC_add_roots_inner(curr_base
, limit
, TRUE
);
545 if (limit
> stack_top
&& base
< GC_stackbottom
) {
546 /* Part of the stack; ignore it. */
549 GC_add_roots_inner(base
, limit
, TRUE
);
553 extern GC_bool GC_win32s
;
555 void GC_register_dynamic_libraries()
557 MEMORY_BASIC_INFORMATION buf
;
563 char * limit
, * new_limit
;
565 if (GC_win32s
) return;
566 GetSystemInfo(&sysinfo
);
567 base
= limit
= p
= sysinfo
.lpMinimumApplicationAddress
;
568 GC_allocation_granularity
= sysinfo
.dwAllocationGranularity
;
569 while (p
< sysinfo
.lpMaximumApplicationAddress
) {
570 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
571 if (result
!= sizeof(buf
)) {
572 ABORT("Weird VirtualQuery result");
574 new_limit
= (char *)p
+ buf
.RegionSize
;
575 protect
= buf
.Protect
;
576 if (buf
.State
== MEM_COMMIT
577 && (protect
== PAGE_EXECUTE_READWRITE
578 || protect
== PAGE_READWRITE
)
579 && !GC_is_heap_base(buf
.AllocationBase
)) {
580 if ((char *)p
== limit
) {
583 GC_cond_add_roots(base
, limit
);
588 if (p
> (LPVOID
)new_limit
/* overflow */) break;
589 p
= (LPVOID
)new_limit
;
591 GC_cond_add_roots(base
, limit
);
596 #if defined(ALPHA) && defined(OSF1)
600 void GC_register_dynamic_libraries()
606 ldr_module_t moduleid
= LDR_NULL_MODULE
;
607 ldr_module_info_t moduleinfo
;
608 size_t moduleinfosize
= sizeof(moduleinfo
);
609 size_t modulereturnsize
;
613 ldr_region_info_t regioninfo
;
614 size_t regioninfosize
= sizeof(regioninfo
);
615 size_t regionreturnsize
;
617 /* Obtain id of this process */
618 mypid
= ldr_my_process();
620 /* For each module */
623 /* Get the next (first) module */
624 status
= ldr_next_module(mypid
, &moduleid
);
626 /* Any more modules? */
627 if (moduleid
== LDR_NULL_MODULE
)
628 break; /* No more modules */
630 /* Check status AFTER checking moduleid because */
631 /* of a bug in the non-shared ldr_next_module stub */
633 GC_printf1("dynamic_load: status = %ld\n", (long)status
);
635 extern char *sys_errlist
[];
638 if (errno
<= sys_nerr
) {
639 GC_printf1("dynamic_load: %s\n", (long)sys_errlist
[errno
]);
641 GC_printf1("dynamic_load: %d\n", (long)errno
);
644 ABORT("ldr_next_module failed");
647 /* Get the module information */
648 status
= ldr_inq_module(mypid
, moduleid
, &moduleinfo
,
649 moduleinfosize
, &modulereturnsize
);
651 ABORT("ldr_inq_module failed");
653 /* is module for the main program (i.e. nonshared portion)? */
654 if (moduleinfo
.lmi_flags
& LDR_MAIN
)
655 continue; /* skip the main module */
658 GC_printf("---Module---\n");
659 GC_printf("Module ID = %16ld\n", moduleinfo
.lmi_modid
);
660 GC_printf("Count of regions = %16d\n", moduleinfo
.lmi_nregion
);
661 GC_printf("flags for module = %16lx\n", moduleinfo
.lmi_flags
);
662 GC_printf("pathname of module = \"%s\"\n", moduleinfo
.lmi_name
);
665 /* For each region in this module */
666 for (region
= 0; region
< moduleinfo
.lmi_nregion
; region
++) {
668 /* Get the region information */
669 status
= ldr_inq_region(mypid
, moduleid
, region
, ®ioninfo
,
670 regioninfosize
, ®ionreturnsize
);
672 ABORT("ldr_inq_region failed");
674 /* only process writable (data) regions */
675 if (! (regioninfo
.lri_prot
& LDR_W
))
679 GC_printf("--- Region ---\n");
680 GC_printf("Region number = %16ld\n",
681 regioninfo
.lri_region_no
);
682 GC_printf("Protection flags = %016x\n", regioninfo
.lri_prot
);
683 GC_printf("Virtual address = %16p\n", regioninfo
.lri_vaddr
);
684 GC_printf("Mapped address = %16p\n", regioninfo
.lri_mapaddr
);
685 GC_printf("Region size = %16ld\n", regioninfo
.lri_size
);
686 GC_printf("Region name = \"%s\"\n", regioninfo
.lri_name
);
689 /* register region as a garbage collection root */
691 (char *)regioninfo
.lri_mapaddr
,
692 (char *)regioninfo
.lri_mapaddr
+ regioninfo
.lri_size
,
706 extern char *sys_errlist
[];
709 void GC_register_dynamic_libraries()
712 int index
= 1; /* Ordinal position in shared library search list */
713 struct shl_descriptor
*shl_desc
; /* Shared library info, see dl.h */
715 /* For each dynamic library loaded */
718 /* Get info about next shared library */
719 status
= shl_get(index
, &shl_desc
);
721 /* Check if this is the end of the list or if some error occured */
724 /* I've seen errno values of 0. The man page is not clear */
725 /* as to whether errno should get set on a -1 return. */
728 if (errno
== EINVAL
) {
729 break; /* Moved past end of shared library list --> finished */
731 if (errno
<= sys_nerr
) {
732 GC_printf1("dynamic_load: %s\n", (long) sys_errlist
[errno
]);
734 GC_printf1("dynamic_load: %d\n", (long) errno
);
736 ABORT("shl_get failed");
742 GC_printf0("---Shared library---\n");
743 GC_printf1("\tfilename = \"%s\"\n", shl_desc
->filename
);
744 GC_printf1("\tindex = %d\n", index
);
745 GC_printf1("\thandle = %08x\n",
746 (unsigned long) shl_desc
->handle
);
747 GC_printf1("\ttext seg. start = %08x\n", shl_desc
->tstart
);
748 GC_printf1("\ttext seg. end = %08x\n", shl_desc
->tend
);
749 GC_printf1("\tdata seg. start = %08x\n", shl_desc
->dstart
);
750 GC_printf1("\tdata seg. end = %08x\n", shl_desc
->dend
);
751 GC_printf1("\tref. count = %lu\n", shl_desc
->ref_count
);
754 /* register shared library's data segment as a garbage collection root */
755 GC_add_roots_inner((char *) shl_desc
->dstart
,
756 (char *) shl_desc
->dend
, TRUE
);
766 #include <sys/errno.h>
767 void GC_register_dynamic_libraries()
774 ldibuf
= alloca(ldibuflen
= 8192);
776 while ( (len
= loadquery(L_GETINFO
,ldibuf
,ldibuflen
)) < 0) {
777 if (errno
!= ENOMEM
) {
778 ABORT("loadquery failed");
780 ldibuf
= alloca(ldibuflen
*= 2);
783 ldi
= (struct ld_info
*)ldibuf
;
785 len
= ldi
->ldinfo_next
;
788 (unsigned long)ldi
->ldinfo_dataorg
789 + ldi
->ldinfo_datasize
,
791 ldi
= len
? (struct ld_info
*)((char *)ldi
+ len
) : 0;
798 #else /* !DYNAMIC_LOADING */
802 # include "il/PCR_IL.h"
803 # include "th/PCR_ThCtl.h"
804 # include "mm/PCR_MM.h"
806 void GC_register_dynamic_libraries()
808 /* Add new static data areas of dynamically loaded modules. */
810 PCR_IL_LoadedFile
* p
= PCR_IL_GetLastLoadedFile();
811 PCR_IL_LoadedSegment
* q
;
813 /* Skip uncommited files */
814 while (p
!= NIL
&& !(p
-> lf_commitPoint
)) {
815 /* The loading of this file has not yet been committed */
816 /* Hence its description could be inconsistent. */
817 /* Furthermore, it hasn't yet been run. Hence its data */
818 /* segments can't possibly reference heap allocated */
822 for (; p
!= NIL
; p
= p
-> lf_prev
) {
823 for (q
= p
-> lf_ls
; q
!= NIL
; q
= q
-> ls_next
) {
824 if ((q
-> ls_flags
& PCR_IL_SegFlags_Traced_MASK
)
825 == PCR_IL_SegFlags_Traced_on
) {
827 ((char *)(q
-> ls_addr
),
828 (char *)(q
-> ls_addr
) + q
-> ls_bytes
,
839 void GC_register_dynamic_libraries(){}
841 int GC_no_dynamic_loading
;
844 #endif /* !DYNAMIC_LOADING */