2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 #include "private/gc_priv.h"
18 * This is incredibly OS specific code for tracking down data sections in
19 * dynamic libraries. There appears to be no way of doing this quickly
20 * without groveling through undocumented data structures. We would argue
21 * that this is a bug in the design of the dlopen interface. THIS CODE
22 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
23 * to let your vendor know ...
25 * None of this is safe with dlclose and incremental collection.
26 * But then not much of anything is safe in the presence of dlclose.
29 #if !defined(MACOS) && !defined(_WIN32_WCE) && !defined(__CC_ARM)
30 # include <sys/types.h>
33 /* BTL: avoid circular redefinition of dlopen if GC_SOLARIS_THREADS defined */
34 #undef GC_MUST_RESTORE_REDEFINED_DLOPEN
35 #if defined(GC_PTHREADS) && !defined(GC_NO_DLOPEN) \
36 && !defined(GC_NO_THREAD_REDIRECTS) && !defined(GC_USE_LD_WRAP)
37 /* To support threads in Solaris, gc.h interposes on dlopen by */
38 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
39 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
40 /* real system dlopen() in their implementation. We first remove */
41 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
43 # define GC_MUST_RESTORE_REDEFINED_DLOPEN
44 #endif /* !GC_NO_DLOPEN */
46 /* A user-supplied routine (custom filter) that might be called to */
47 /* determine whether a DSO really needs to be scanned by the GC. */
48 /* 0 means no filter installed. May be unused on some platforms. */
49 /* FIXME: Add filter support for more platforms. */
50 STATIC GC_has_static_roots_func GC_has_static_roots
= 0;
52 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
53 || defined(CYGWIN32)) && !defined(PCR)
55 #if !defined(SOLARISDL) && !defined(IRIX5) && \
56 !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) && \
57 !(defined(ALPHA) && defined(OSF1)) && \
58 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
59 !defined(AIX) && !defined(SCO_ELF) && !defined(DGUX) && \
60 !(defined(FREEBSD) && defined(__ELF__)) && \
61 !(defined(OPENBSD) && (defined(__ELF__) || defined(M68K))) && \
62 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD) && \
63 !defined(DARWIN) && !defined(CYGWIN32)
64 --> We only know how to find data segments of dynamic libraries
for the
65 --> above
. Additional SVR4 variants might
not be too
77 # include <sys/param.h>
79 # include <machine/elf_machdep.h>
80 # define ELFSIZE ARCH_ELFSIZE
83 #if defined(SCO_ELF) || defined(DGUX) || defined(HURD) \
84 || (defined(__ELF__) && (defined(LINUX) || defined(FREEBSD) \
85 || defined(NETBSD) || defined(OPENBSD)))
87 # if !defined(OPENBSD) && !defined(PLATFORM_ANDROID)
88 /* FIXME: Why we exclude it for OpenBSD? */
89 /* Exclude Android because linker.h below includes its own version. */
92 # ifdef PLATFORM_ANDROID
93 /* The header file is in "bionic/linker" folder of Android sources. */
94 /* If you don't need the "dynamic loading" feature, you may build */
95 /* the collector with -D IGNORE_DYNAMIC_LOADING. */
102 /* Newer versions of GNU/Linux define this macro. We
103 * define it similarly for any ELF systems that don't. */
105 # if defined(FREEBSD)
106 # if __ELF_WORD_SIZE == 32
107 # define ElfW(type) Elf32_##type
109 # define ElfW(type) Elf64_##type
111 # elif defined(NETBSD) || defined(OPENBSD)
113 # define ElfW(type) Elf32_##type
115 # define ElfW(type) Elf64_##type
118 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
119 # define ElfW(type) Elf32_##type
121 # define ElfW(type) Elf64_##type
126 #if defined(SOLARISDL) && !defined(USE_PROC_FOR_LIBRARIES)
132 STATIC
struct link_map
*
133 GC_FirstDLOpenedLinkMap(void)
135 extern ElfW(Dyn
) _DYNAMIC
;
137 static struct link_map
* cachedResult
= 0;
138 static ElfW(Dyn
) *dynStructureAddr
= 0;
139 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
141 # ifdef SUNOS53_SHARED_LIB
142 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
143 /* up properly in dynamically linked .so's. This means we have */
144 /* to use its value in the set of original object files loaded */
145 /* at program startup. */
146 if( dynStructureAddr
== 0 ) {
147 void* startupSyms
= dlopen(0, RTLD_LAZY
);
148 dynStructureAddr
= (ElfW(Dyn
)*)dlsym(startupSyms
, "_DYNAMIC");
151 dynStructureAddr
= &_DYNAMIC
;
154 if (dynStructureAddr
== 0) {
155 /* _DYNAMIC symbol not resolved. */
158 if( cachedResult
== 0 ) {
160 for( dp
= ((ElfW(Dyn
) *)(&_DYNAMIC
)); (tag
= dp
->d_tag
) != 0; dp
++ ) {
161 if( tag
== DT_DEBUG
) {
163 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
164 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NULL */
172 #endif /* SOLARISDL ... */
174 /* BTL: added to fix circular dlopen definition if GC_SOLARIS_THREADS defined */
175 # ifdef GC_MUST_RESTORE_REDEFINED_DLOPEN
176 # define dlopen GC_dlopen
179 # if defined(SOLARISDL)
181 /* Add dynamic library data sections to the root set. */
182 # if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
183 --> fix mutual exclusion with dlopen
186 # ifndef USE_PROC_FOR_LIBRARIES
187 GC_INNER
void GC_register_dynamic_libraries(void)
191 for (lm
= GC_FirstDLOpenedLinkMap(); lm
!= 0; lm
= lm
->l_next
) {
194 unsigned long offset
;
198 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
199 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
200 offset
= ((unsigned long)(lm
->l_addr
));
201 for( i
= 0; i
< (int)e
->e_phnum
; i
++, p
++ ) {
202 switch( p
->p_type
) {
205 if( !(p
->p_flags
& PF_W
) ) break;
206 start
= ((char *)(p
->p_vaddr
)) + offset
;
221 # endif /* !USE_PROC ... */
222 # endif /* SOLARISDL */
224 #if defined(SCO_ELF) || defined(DGUX) || defined(HURD) \
225 || (defined(__ELF__) && (defined(LINUX) || defined(FREEBSD) \
226 || defined(NETBSD) || defined(OPENBSD)))
228 #ifdef USE_PROC_FOR_LIBRARIES
232 #include <sys/stat.h>
236 #define MAPS_BUF_SIZE (32*1024)
238 /* Sort an array of HeapSects by start address. */
239 /* Unfortunately at least some versions of */
240 /* Linux qsort end up calling malloc by way of sysconf, and hence can't */
241 /* be used in the collector. Hence we roll our own. Should be */
242 /* reasonably fast if the array is already mostly sorted, as we expect */
244 static void sort_heap_sects(struct HeapSect
*base
, size_t number_of_elements
)
246 signed_word n
= (signed_word
)number_of_elements
;
247 signed_word nsorted
= 1;
250 while (nsorted
< n
) {
251 while (nsorted
< n
&&
252 (word
)base
[nsorted
-1].hs_start
< (word
)base
[nsorted
].hs_start
)
254 if (nsorted
== n
) break;
255 GC_ASSERT((word
)base
[nsorted
-1].hs_start
> (word
)base
[nsorted
].hs_start
);
257 while (i
>= 0 && (word
)base
[i
].hs_start
> (word
)base
[i
+1].hs_start
) {
258 struct HeapSect tmp
= base
[i
];
263 GC_ASSERT((word
)base
[nsorted
-1].hs_start
< (word
)base
[nsorted
].hs_start
);
268 STATIC word
GC_register_map_entries(char *maps
)
271 char *buf_ptr
= maps
;
273 unsigned int maj_dev
;
274 ptr_t least_ha
, greatest_ha
;
278 # ifdef DATASTART_IS_FUNC
279 static ptr_t datastart_cached
= (ptr_t
)(word
)-1;
281 /* Evaluate DATASTART only once. */
282 if (datastart_cached
== (ptr_t
)(word
)-1) {
283 datastart_cached
= (ptr_t
)(DATASTART
);
285 datastart
= datastart_cached
;
287 datastart
= (ptr_t
)(DATASTART
);
290 GC_ASSERT(I_HOLD_LOCK());
291 sort_heap_sects(GC_our_memory
, GC_n_memory
);
292 least_ha
= GC_our_memory
[0].hs_start
;
293 greatest_ha
= GC_our_memory
[GC_n_memory
-1].hs_start
294 + GC_our_memory
[GC_n_memory
-1].hs_bytes
;
297 buf_ptr
= GC_parse_map_entry(buf_ptr
, &start
, &end
, &prot
,
299 if (buf_ptr
== NULL
) return 1;
300 if (prot
[1] == 'w') {
301 /* This is a writable mapping. Add it to */
302 /* the root set unless it is already otherwise */
304 if ((word
)start
<= (word
)GC_stackbottom
305 && (word
)end
>= (word
)GC_stackbottom
) {
306 /* Stack mapping; discard */
310 /* This may fail, since a thread may already be */
311 /* unregistered, but its thread stack may still be there. */
312 /* That can fail because the stack may disappear while */
313 /* we're marking. Thus the marker is, and has to be */
314 /* prepared to recover from segmentation faults. */
316 if (GC_segment_is_thread_stack(start
, end
)) continue;
318 /* FIXME: NPTL squirrels */
319 /* away pointers in pieces of the stack segment that we */
320 /* don't scan. We work around this */
321 /* by treating anything allocated by libpthread as */
322 /* uncollectable, as we do in some other cases. */
323 /* A specifically identified problem is that */
324 /* thread stacks contain pointers to dynamic thread */
325 /* vectors, which may be reused due to thread caching. */
326 /* They may not be marked if the thread is still live. */
327 /* This specific instance should be addressed by */
328 /* INCLUDE_LINUX_THREAD_DESCR, but that doesn't quite */
329 /* seem to suffice. */
330 /* We currently trace entire thread stacks, if they are */
331 /* are currently cached but unused. This is */
332 /* very suboptimal for performance reasons. */
334 /* We no longer exclude the main data segment. */
335 if ((word
)end
<= (word
)least_ha
336 || (word
)start
>= (word
)greatest_ha
) {
337 /* The easy case; just trace entire segment */
338 GC_add_roots_inner((char *)start
, (char *)end
, TRUE
);
341 /* Add sections that don't belong to us. */
343 while ((word
)(GC_our_memory
[i
].hs_start
344 + GC_our_memory
[i
].hs_bytes
) < (word
)start
)
346 GC_ASSERT(i
< GC_n_memory
);
347 if ((word
)GC_our_memory
[i
].hs_start
<= (word
)start
) {
348 start
= GC_our_memory
[i
].hs_start
349 + GC_our_memory
[i
].hs_bytes
;
352 while (i
< GC_n_memory
353 && (word
)GC_our_memory
[i
].hs_start
< (word
)end
354 && (word
)start
< (word
)end
) {
355 if ((word
)start
< (word
)GC_our_memory
[i
].hs_start
)
356 GC_add_roots_inner((char *)start
,
357 GC_our_memory
[i
].hs_start
, TRUE
);
358 start
= GC_our_memory
[i
].hs_start
359 + GC_our_memory
[i
].hs_bytes
;
362 if ((word
)start
< (word
)end
)
363 GC_add_roots_inner((char *)start
, (char *)end
, TRUE
);
369 GC_INNER
void GC_register_dynamic_libraries(void)
371 if (!GC_register_map_entries(GC_get_maps()))
372 ABORT("Failed to read /proc for library registration");
375 /* We now take care of the main data segment ourselves: */
376 GC_INNER GC_bool
GC_register_main_static_data(void)
381 # define HAVE_REGISTER_MAIN_STATIC_DATA
383 #else /* !USE_PROC_FOR_LIBRARIES */
385 /* The following is the preferred way to walk dynamic libraries */
386 /* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
387 /* versions. Thanks to Jakub Jelinek for most of the code. */
389 #if (defined(LINUX) || defined (__GLIBC__)) /* Are others OK here, too? */ \
390 && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
391 || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
392 /* We have the header files for a glibc that includes dl_iterate_phdr. */
393 /* It may still not be available in the library on the target system. */
394 /* Thus we also treat it as a weak symbol. */
395 # define HAVE_DL_ITERATE_PHDR
396 # pragma weak dl_iterate_phdr
399 #if (defined(FREEBSD) && __FreeBSD__ >= 7)
400 /* On the FreeBSD system, any target system at major version 7 shall */
401 /* have dl_iterate_phdr; therefore, we need not make it weak as above. */
402 # define HAVE_DL_ITERATE_PHDR
403 # define DL_ITERATE_PHDR_STRONG
406 #if defined(HAVE_DL_ITERATE_PHDR)
409 /* Instead of registering PT_LOAD sections directly, we keep them */
410 /* in a temporary list, and filter them by excluding PT_GNU_RELRO */
411 /* segments. Processing PT_GNU_RELRO sections with */
412 /* GC_exclude_static_roots instead would be superficially cleaner. But */
413 /* it runs into trouble if a client registers an overlapping segment, */
414 /* which unfortunately seems quite possible. */
416 # define MAX_LOAD_SEGS MAX_ROOT_SETS
418 static struct load_segment
{
421 /* Room for a second segment if we remove a RELRO segment */
422 /* from the middle. */
425 } load_segs
[MAX_LOAD_SEGS
];
427 static int n_load_segs
;
428 # endif /* PT_GNU_RELRO */
430 STATIC
int GC_register_dynlib_callback(struct dl_phdr_info
* info
,
431 size_t size
, void * ptr
)
433 const ElfW(Phdr
) * p
;
437 /* Make sure struct dl_phdr_info is at least as big as we need. */
438 if (size
< offsetof (struct dl_phdr_info
, dlpi_phnum
)
439 + sizeof (info
->dlpi_phnum
))
443 for( i
= 0; i
< (int)info
->dlpi_phnum
; i
++, p
++ ) {
444 switch( p
->p_type
) {
447 /* This entry is known to be constant and will eventually be remapped
448 read-only. However, the address range covered by this entry is
449 typically a subset of a previously encountered `LOAD' segment, so
450 we need to exclude it. */
454 start
= ((ptr_t
)(p
->p_vaddr
)) + info
->dlpi_addr
;
455 end
= start
+ p
->p_memsz
;
456 for (j
= n_load_segs
; --j
>= 0; ) {
457 if ((word
)start
>= (word
)load_segs
[j
].start
458 && (word
)start
< (word
)load_segs
[j
].end
) {
459 if (load_segs
[j
].start2
!= 0) {
460 WARN("More than one GNU_RELRO segment per load seg\n",0);
462 GC_ASSERT((word
)end
<= (word
)load_segs
[j
].end
);
463 /* Remove from the existing load segment */
464 load_segs
[j
].end2
= load_segs
[j
].end
;
465 load_segs
[j
].end
= start
;
466 load_segs
[j
].start2
= end
;
470 if (j
== 0) WARN("Failed to find PT_GNU_RELRO segment"
471 " inside PT_LOAD region", 0);
480 GC_has_static_roots_func callback
= GC_has_static_roots
;
481 if( !(p
->p_flags
& PF_W
) ) break;
482 start
= ((char *)(p
->p_vaddr
)) + info
->dlpi_addr
;
483 end
= start
+ p
->p_memsz
;
485 if (callback
!= 0 && !callback(info
->dlpi_name
, start
, p
->p_memsz
))
488 if (n_load_segs
>= MAX_LOAD_SEGS
) ABORT("Too many PT_LOAD segs");
489 # if CPP_WORDSZ == 64
490 /* FIXME: GC_push_all eventually does the correct */
491 /* rounding to the next multiple of ALIGNMENT, so, most */
492 /* probably, we should remove the corresponding assertion */
493 /* check in GC_add_roots_inner along with this code line. */
494 /* start pointer value may require aligning */
495 start
= (ptr_t
)((word
)start
& ~(sizeof(word
) - 1));
497 load_segs
[n_load_segs
].start
= start
;
498 load_segs
[n_load_segs
].end
= end
;
499 load_segs
[n_load_segs
].start2
= 0;
500 load_segs
[n_load_segs
].end2
= 0;
503 GC_add_roots_inner(start
, end
, TRUE
);
504 # endif /* PT_GNU_RELRO */
512 *(int *)ptr
= 1; /* Signal that we were called */
516 /* Do we need to separately register the main static data segment? */
517 GC_INNER GC_bool
GC_register_main_static_data(void)
519 # ifdef DL_ITERATE_PHDR_STRONG
520 /* If dl_iterate_phdr is not a weak symbol then don't test against */
521 /* zero (otherwise a compiler might issue a warning). */
524 return (dl_iterate_phdr
== 0); /* implicit conversion to function ptr */
528 /* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
529 STATIC GC_bool
GC_register_dynamic_libraries_dl_iterate_phdr(void)
532 if (GC_register_main_static_data())
537 static GC_bool excluded_segs
= FALSE
;
539 if (!EXPECT(excluded_segs
, TRUE
)) {
540 GC_exclude_static_roots_inner((ptr_t
)load_segs
,
541 (ptr_t
)load_segs
+ sizeof(load_segs
));
542 excluded_segs
= TRUE
;
548 dl_iterate_phdr(GC_register_dynlib_callback
, &did_something
);
553 for (i
= 0; i
< n_load_segs
; ++i
) {
554 if ((word
)load_segs
[i
].end
> (word
)load_segs
[i
].start
) {
555 GC_add_roots_inner(load_segs
[i
].start
, load_segs
[i
].end
, TRUE
);
557 if ((word
)load_segs
[i
].end2
> (word
)load_segs
[i
].start2
) {
558 GC_add_roots_inner(load_segs
[i
].start2
, load_segs
[i
].end2
, TRUE
);
565 # ifdef DATASTART_IS_FUNC
566 static ptr_t datastart_cached
= (ptr_t
)(word
)-1;
568 /* Evaluate DATASTART only once. */
569 if (datastart_cached
== (ptr_t
)(word
)-1) {
570 datastart_cached
= (ptr_t
)(DATASTART
);
572 datastart
= (char *)datastart_cached
;
574 datastart
= DATASTART
;
576 # ifdef DATAEND_IS_FUNC
578 static ptr_t dataend_cached
= 0;
579 /* Evaluate DATAEND only once. */
580 if (dataend_cached
== 0) {
581 dataend_cached
= (ptr_t
)(DATAEND
);
583 dataend
= (char *)dataend_cached
;
589 /* dl_iterate_phdr may forget the static data segment in */
590 /* statically linked executables. */
591 GC_add_roots_inner(datastart
, dataend
, TRUE
);
592 # if defined(DATASTART2)
593 GC_add_roots_inner(DATASTART2
, (char *)(DATAEND2
), TRUE
);
599 # define HAVE_REGISTER_MAIN_STATIC_DATA
601 #else /* !HAVE_DL_ITERATE_PHDR */
603 /* Dynamic loading code for Linux running ELF. Somewhat tested on
604 * Linux/x86, untested but hopefully should work on Linux/Alpha.
605 * This code was derived from the Solaris/ELF support. Thanks to
606 * whatever kind soul wrote that. - Patrick Bridges */
608 /* This doesn't necessarily work in all cases, e.g. with preloaded
609 * dynamic libraries. */
611 # if defined(NETBSD) || defined(OPENBSD)
612 # include <sys/exec_elf.h>
613 /* for compatibility with 1.4.x */
623 # elif !defined(PLATFORM_ANDROID)
627 # ifndef PLATFORM_ANDROID
631 #endif /* !HAVE_DL_ITERATE_PHDR */
634 # pragma weak _DYNAMIC
636 extern ElfW(Dyn
) _DYNAMIC
[];
638 STATIC
struct link_map
*
639 GC_FirstDLOpenedLinkMap(void)
642 static struct link_map
*cachedResult
= 0;
644 if (0 == (ptr_t
)_DYNAMIC
) {
645 /* _DYNAMIC symbol not resolved. */
648 if( cachedResult
== 0 ) {
649 # if defined(NETBSD) && defined(RTLD_DI_LINKMAP)
650 struct link_map
*lm
= NULL
;
651 if (!dlinfo(RTLD_SELF
, RTLD_DI_LINKMAP
, &lm
))
655 for( dp
= _DYNAMIC
; (tag
= dp
->d_tag
) != 0; dp
++ ) {
656 if( tag
== DT_DEBUG
) {
658 = ((struct r_debug
*)(dp
->d_un
.d_ptr
))->r_map
;
659 if( lm
!= 0 ) cachedResult
= lm
->l_next
; /* might be NULL */
663 # endif /* !NETBSD || !RTLD_DI_LINKMAP */
668 GC_INNER
void GC_register_dynamic_libraries(void)
672 # ifdef HAVE_DL_ITERATE_PHDR
673 if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
677 for (lm
= GC_FirstDLOpenedLinkMap(); lm
!= 0; lm
= lm
->l_next
)
681 unsigned long offset
;
685 e
= (ElfW(Ehdr
) *) lm
->l_addr
;
686 # ifdef PLATFORM_ANDROID
690 p
= ((ElfW(Phdr
) *)(((char *)(e
)) + e
->e_phoff
));
691 offset
= ((unsigned long)(lm
->l_addr
));
692 for( i
= 0; i
< (int)e
->e_phnum
; i
++, p
++ ) {
693 switch( p
->p_type
) {
696 if( !(p
->p_flags
& PF_W
) ) break;
697 start
= ((char *)(p
->p_vaddr
)) + offset
;
698 GC_add_roots_inner(start
, start
+ p
->p_memsz
, TRUE
);
708 #endif /* !USE_PROC_FOR_LIBRARIES */
712 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
714 #include <sys/procfs.h>
715 #include <sys/stat.h>
719 #include <signal.h> /* Only for the following test. */
724 /* We use /proc to track down all parts of the address space that are */
725 /* mapped by the process, and throw out regions we know we shouldn't */
726 /* worry about. This may also work under other SVR4 variants. */
727 GC_INNER
void GC_register_dynamic_libraries(void)
731 static prmap_t
* addr_map
= 0;
732 static int current_sz
= 0; /* Number of records currently in addr_map */
733 static int needed_sz
; /* Required size of addr_map */
738 ptr_t heap_start
= HEAP_START
;
739 ptr_t heap_end
= heap_start
;
743 # endif /* SOLARISDL */
746 (void)snprintf(buf
, sizeof(buf
), "/proc/%ld", (long)getpid());
747 buf
[sizeof(buf
) - 1] = '\0';
748 /* The above generates a lint complaint, since pid_t varies. */
749 /* It's unclear how to improve this. */
750 fd
= open(buf
, O_RDONLY
);
752 ABORT("/proc open failed");
755 if (ioctl(fd
, PIOCNMAP
, &needed_sz
) < 0) {
756 GC_err_printf("fd = %d, errno = %d\n", fd
, errno
);
757 ABORT("/proc PIOCNMAP ioctl failed");
759 if (needed_sz
>= current_sz
) {
760 current_sz
= needed_sz
* 2 + 1;
761 /* Expansion, plus room for 0 record */
762 addr_map
= (prmap_t
*)GC_scratch_alloc(
763 (word
)current_sz
* sizeof(prmap_t
));
764 if (addr_map
== NULL
)
765 ABORT("Insufficient memory for address map");
767 if (ioctl(fd
, PIOCMAP
, addr_map
) < 0) {
768 GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = %p\n",
769 fd
, errno
, needed_sz
, addr_map
);
770 ABORT("/proc PIOCMAP ioctl failed");
772 if (GC_n_heap_sects
> 0) {
773 heap_end
= GC_heap_sects
[GC_n_heap_sects
-1].hs_start
774 + GC_heap_sects
[GC_n_heap_sects
-1].hs_bytes
;
775 if ((word
)heap_end
< (word
)GC_scratch_last_end_ptr
)
776 heap_end
= GC_scratch_last_end_ptr
;
778 for (i
= 0; i
< needed_sz
; i
++) {
779 flags
= addr_map
[i
].pr_mflags
;
780 if ((flags
& (MA_BREAK
| MA_STACK
| MA_PHYS
781 | MA_FETCHOP
| MA_NOTCACHED
)) != 0) goto irrelevant
;
782 if ((flags
& (MA_READ
| MA_WRITE
)) != (MA_READ
| MA_WRITE
))
784 /* The latter test is empirically useless in very old Irix */
785 /* versions. Other than the */
786 /* main data and stack segments, everything appears to be */
787 /* mapped readable, writable, executable, and shared(!!). */
788 /* This makes no sense to me. - HB */
789 start
= (ptr_t
)(addr_map
[i
].pr_vaddr
);
790 if (GC_roots_present(start
)) goto irrelevant
;
791 if ((word
)start
< (word
)heap_end
&& (word
)start
>= (word
)heap_start
)
794 limit
= start
+ addr_map
[i
].pr_size
;
795 /* The following seemed to be necessary for very old versions */
796 /* of Irix, but it has been reported to discard relevant */
797 /* segments under Irix 6.5. */
799 if (addr_map
[i
].pr_off
== 0 && strncmp(start
, ELFMAG
, 4) == 0) {
800 /* Discard text segments, i.e. 0-offset mappings against */
801 /* executable files which appear to have ELF headers. */
804 # define MAP_IRR_SZ 10
805 static ptr_t map_irr
[MAP_IRR_SZ
];
806 /* Known irrelevant map entries */
807 static int n_irr
= 0;
811 for (j
= 0; j
< n_irr
; j
++) {
812 if (map_irr
[j
] == start
) goto irrelevant
;
814 arg
= (caddr_t
)start
;
815 obj
= ioctl(fd
, PIOCOPENM
, &arg
);
819 if ((buf
.st_mode
& 0111) != 0) {
820 if (n_irr
< MAP_IRR_SZ
) {
821 map_irr
[n_irr
++] = start
;
828 GC_add_roots_inner(start
, limit
, TRUE
);
831 /* Don't keep cached descriptor, for now. Some kernels don't like us */
832 /* to keep a /proc file descriptor around during kill -9. */
833 if (close(fd
) < 0) ABORT("Couldn't close /proc file");
837 # endif /* USE_PROC || IRIX5 */
839 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
841 # ifndef WIN32_LEAN_AND_MEAN
842 # define WIN32_LEAN_AND_MEAN 1
845 # include <windows.h>
848 /* We traverse the entire address space and register all segments */
849 /* that could possibly have been written to. */
850 STATIC
void GC_cond_add_roots(char *base
, char * limit
)
852 # ifdef GC_WIN32_THREADS
853 char * curr_base
= base
;
854 char * next_stack_lo
;
855 char * next_stack_hi
;
857 if (base
== limit
) return;
859 GC_get_next_stack(curr_base
, limit
, &next_stack_lo
, &next_stack_hi
);
860 if ((word
)next_stack_lo
>= (word
)limit
) break;
861 if ((word
)next_stack_lo
> (word
)curr_base
)
862 GC_add_roots_inner(curr_base
, next_stack_lo
, TRUE
);
863 curr_base
= next_stack_hi
;
865 if ((word
)curr_base
< (word
)limit
)
866 GC_add_roots_inner(curr_base
, limit
, TRUE
);
869 = (char *)((word
)GC_approx_sp() &
870 ~(GC_sysinfo
.dwAllocationGranularity
- 1));
872 if (base
== limit
) return;
873 if ((word
)limit
> (word
)stack_top
874 && (word
)base
< (word
)GC_stackbottom
) {
875 /* Part of the stack; ignore it. */
878 GC_add_roots_inner(base
, limit
, TRUE
);
882 #ifdef DYNAMIC_LOADING
883 /* GC_register_main_static_data is not needed unless DYNAMIC_LOADING. */
884 GC_INNER GC_bool
GC_register_main_static_data(void)
886 # if defined(MSWINCE) || defined(CYGWIN32)
887 /* Do we need to separately register the main static data segment? */
890 return GC_no_win32_dlls
;
893 # define HAVE_REGISTER_MAIN_STATIC_DATA
894 #endif /* DYNAMIC_LOADING */
896 # ifdef DEBUG_VIRTUALQUERY
897 void GC_dump_meminfo(MEMORY_BASIC_INFORMATION
*buf
)
899 GC_printf("BaseAddress = 0x%lx, AllocationBase = 0x%lx,"
900 " RegionSize = 0x%lx(%lu)\n", buf
-> BaseAddress
,
901 buf
-> AllocationBase
, buf
-> RegionSize
, buf
-> RegionSize
);
902 GC_printf("\tAllocationProtect = 0x%lx, State = 0x%lx, Protect = 0x%lx, "
903 "Type = 0x%lx\n", buf
-> AllocationProtect
, buf
-> State
,
904 buf
-> Protect
, buf
-> Type
);
906 # endif /* DEBUG_VIRTUALQUERY */
908 # if defined(MSWINCE) || defined(CYGWIN32)
909 /* FIXME: Should we really need to scan MEM_PRIVATE sections? */
910 /* For now, we don't add MEM_PRIVATE sections to the data roots for */
911 /* WinCE because otherwise SEGV fault sometimes happens to occur in */
912 /* GC_mark_from() (and, even if we use WRAP_MARK_SOME, WinCE prints */
913 /* a "Data Abort" message to the debugging console). */
914 /* To workaround that, use -DGC_REGISTER_MEM_PRIVATE. */
918 GC_INNER
void GC_register_dynamic_libraries(void)
920 MEMORY_BASIC_INFORMATION buf
;
925 char * limit
, * new_limit
;
928 if (GC_no_win32_dlls
) return;
930 base
= limit
= p
= GC_sysinfo
.lpMinimumApplicationAddress
;
931 while ((word
)p
< (word
)GC_sysinfo
.lpMaximumApplicationAddress
) {
932 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
935 /* Page is free; advance to the next possible allocation base */
937 (((DWORD
) p
+ GC_sysinfo
.dwAllocationGranularity
)
938 & ~(GC_sysinfo
.dwAllocationGranularity
-1));
942 if (result
!= sizeof(buf
)) {
943 ABORT("Weird VirtualQuery result");
945 new_limit
= (char *)p
+ buf
.RegionSize
;
946 protect
= buf
.Protect
;
947 if (buf
.State
== MEM_COMMIT
948 && (protect
== PAGE_EXECUTE_READWRITE
949 || protect
== PAGE_READWRITE
)
950 && (buf
.Type
== MEM_IMAGE
951 # ifdef GC_REGISTER_MEM_PRIVATE
952 || (protect
== PAGE_READWRITE
&& buf
.Type
== MEM_PRIVATE
)
954 /* There is some evidence that we cannot always */
955 /* ignore MEM_PRIVATE sections under Windows ME */
956 /* and predecessors. Hence we now also check for */
958 || (!GC_wnt
&& buf
.Type
== MEM_PRIVATE
)
961 && !GC_is_heap_base(buf
.AllocationBase
)) {
962 # ifdef DEBUG_VIRTUALQUERY
963 GC_dump_meminfo(&buf
);
965 if ((char *)p
!= limit
) {
966 GC_cond_add_roots(base
, limit
);
972 if ((word
)p
> (word
)new_limit
/* overflow */) break;
973 p
= (LPVOID
)new_limit
;
975 GC_cond_add_roots(base
, limit
);
978 #endif /* MSWIN32 || MSWINCE || CYGWIN32 */
980 #if defined(ALPHA) && defined(OSF1)
984 extern char *sys_errlist
[];
988 GC_INNER
void GC_register_dynamic_libraries(void)
994 ldr_module_t moduleid
= LDR_NULL_MODULE
;
995 ldr_module_info_t moduleinfo
;
996 size_t moduleinfosize
= sizeof(moduleinfo
);
997 size_t modulereturnsize
;
1000 ldr_region_t region
;
1001 ldr_region_info_t regioninfo
;
1002 size_t regioninfosize
= sizeof(regioninfo
);
1003 size_t regionreturnsize
;
1005 /* Obtain id of this process */
1006 mypid
= ldr_my_process();
1008 /* For each module */
1011 /* Get the next (first) module */
1012 status
= ldr_next_module(mypid
, &moduleid
);
1014 /* Any more modules? */
1015 if (moduleid
== LDR_NULL_MODULE
)
1016 break; /* No more modules */
1018 /* Check status AFTER checking moduleid because */
1019 /* of a bug in the non-shared ldr_next_module stub */
1021 GC_COND_LOG_PRINTF("dynamic_load: status = %d\n", status
);
1022 if (errno
< sys_nerr
) {
1023 GC_COND_LOG_PRINTF("dynamic_load: %s\n", sys_errlist
[errno
]);
1025 GC_COND_LOG_PRINTF("dynamic_load: err_code = %d\n", errno
);
1027 ABORT("ldr_next_module failed");
1030 /* Get the module information */
1031 status
= ldr_inq_module(mypid
, moduleid
, &moduleinfo
,
1032 moduleinfosize
, &modulereturnsize
);
1034 ABORT("ldr_inq_module failed");
1036 /* is module for the main program (i.e. nonshared portion)? */
1037 if (moduleinfo
.lmi_flags
& LDR_MAIN
)
1038 continue; /* skip the main module */
1041 GC_log_printf("---Module---\n");
1042 GC_log_printf("Module ID\t = %16ld\n", moduleinfo
.lmi_modid
);
1043 GC_log_printf("Count of regions = %16d\n", moduleinfo
.lmi_nregion
);
1044 GC_log_printf("flags for module = %16lx\n", moduleinfo
.lmi_flags
);
1045 GC_log_printf("module pathname\t = \"%s\"\n", moduleinfo
.lmi_name
);
1048 /* For each region in this module */
1049 for (region
= 0; region
< moduleinfo
.lmi_nregion
; region
++) {
1050 /* Get the region information */
1051 status
= ldr_inq_region(mypid
, moduleid
, region
, ®ioninfo
,
1052 regioninfosize
, ®ionreturnsize
);
1054 ABORT("ldr_inq_region failed");
1056 /* only process writable (data) regions */
1057 if (! (regioninfo
.lri_prot
& LDR_W
))
1061 GC_log_printf("--- Region ---\n");
1062 GC_log_printf("Region number\t = %16ld\n",
1063 regioninfo
.lri_region_no
);
1064 GC_log_printf("Protection flags = %016x\n", regioninfo
.lri_prot
);
1065 GC_log_printf("Virtual address\t = %16p\n", regioninfo
.lri_vaddr
);
1066 GC_log_printf("Mapped address\t = %16p\n",
1067 regioninfo
.lri_mapaddr
);
1068 GC_log_printf("Region size\t = %16ld\n", regioninfo
.lri_size
);
1069 GC_log_printf("Region name\t = \"%s\"\n", regioninfo
.lri_name
);
1072 /* register region as a garbage collection root */
1073 GC_add_roots_inner((char *)regioninfo
.lri_mapaddr
,
1074 (char *)regioninfo
.lri_mapaddr
+ regioninfo
.lri_size
,
1087 extern char *sys_errlist
[];
1088 extern int sys_nerr
;
1090 GC_INNER
void GC_register_dynamic_libraries(void)
1093 int index
= 1; /* Ordinal position in shared library search list */
1094 struct shl_descriptor
*shl_desc
; /* Shared library info, see dl.h */
1096 /* For each dynamic library loaded */
1099 /* Get info about next shared library */
1100 status
= shl_get(index
, &shl_desc
);
1102 /* Check if this is the end of the list or if some error occured */
1104 # ifdef GC_HPUX_THREADS
1105 /* I've seen errno values of 0. The man page is not clear */
1106 /* as to whether errno should get set on a -1 return. */
1109 if (errno
== EINVAL
) {
1110 break; /* Moved past end of shared library list --> finished */
1112 if (errno
< sys_nerr
) {
1113 GC_COND_LOG_PRINTF("dynamic_load: %s\n", sys_errlist
[errno
]);
1115 GC_COND_LOG_PRINTF("dynamic_load: err_code = %d\n", errno
);
1117 ABORT("shl_get failed");
1123 GC_log_printf("---Shared library---\n");
1124 GC_log_printf("\tfilename\t= \"%s\"\n", shl_desc
->filename
);
1125 GC_log_printf("\tindex\t\t= %d\n", index
);
1126 GC_log_printf("\thandle\t\t= %08x\n",
1127 (unsigned long) shl_desc
->handle
);
1128 GC_log_printf("\ttext seg.start\t= %08x\n", shl_desc
->tstart
);
1129 GC_log_printf("\ttext seg.end\t= %08x\n", shl_desc
->tend
);
1130 GC_log_printf("\tdata seg.start\t= %08x\n", shl_desc
->dstart
);
1131 GC_log_printf("\tdata seg.end\t= %08x\n", shl_desc
->dend
);
1132 GC_log_printf("\tref.count\t= %lu\n", shl_desc
->ref_count
);
1135 /* register shared library's data segment as a garbage collection root */
1136 GC_add_roots_inner((char *) shl_desc
->dstart
,
1137 (char *) shl_desc
->dend
, TRUE
);
1146 # include <sys/ldr.h>
1147 # include <sys/errno.h>
1148 GC_INNER
void GC_register_dynamic_libraries(void)
1153 struct ld_info
*ldi
;
1155 ldibuf
= alloca(ldibuflen
= 8192);
1157 while ( (len
= loadquery(L_GETINFO
,ldibuf
,ldibuflen
)) < 0) {
1158 if (errno
!= ENOMEM
) {
1159 ABORT("loadquery failed");
1161 ldibuf
= alloca(ldibuflen
*= 2);
1164 ldi
= (struct ld_info
*)ldibuf
;
1166 len
= ldi
->ldinfo_next
;
1168 ldi
->ldinfo_dataorg
,
1169 (ptr_t
)(unsigned long)ldi
->ldinfo_dataorg
1170 + ldi
->ldinfo_datasize
,
1172 ldi
= len
? (struct ld_info
*)((char *)ldi
+ len
) : 0;
1179 /* __private_extern__ hack required for pre-3.4 gcc versions. */
1180 #ifndef __private_extern__
1181 # define __private_extern__ extern
1182 # include <mach-o/dyld.h>
1183 # undef __private_extern__
1185 # include <mach-o/dyld.h>
1187 #include <mach-o/getsect.h>
1189 /*#define DARWIN_DEBUG*/
1191 /* Writable sections generally available on Darwin. */
1192 STATIC
const struct {
1195 } GC_dyld_sections
[] = {
1196 { SEG_DATA
, SECT_DATA
},
1197 /* Used by FSF GCC, but not by OS X system tools, so far. */
1198 { SEG_DATA
, "__static_data" },
1199 { SEG_DATA
, SECT_BSS
},
1200 { SEG_DATA
, SECT_COMMON
},
1201 /* FSF GCC - zero-sized object sections for targets */
1202 /*supporting section anchors. */
1203 { SEG_DATA
, "__zobj_data" },
1204 { SEG_DATA
, "__zobj_bss" }
1207 /* Additional writable sections: */
1208 /* GCC on Darwin constructs aligned sections "on demand", where */
1209 /* the alignment size is embedded in the section name. */
1210 /* Furthermore, there are distinctions between sections */
1211 /* containing private vs. public symbols. It also constructs */
1212 /* sections specifically for zero-sized objects, when the */
1213 /* target supports section anchors. */
1214 STATIC
const char * const GC_dyld_add_sect_fmts
[] = {
1221 /* Currently, mach-o will allow up to the max of 2^15 alignment */
1222 /* in an object file. */
1223 #ifndef L2_MAX_OFILE_ALIGNMENT
1224 # define L2_MAX_OFILE_ALIGNMENT 15
1227 STATIC
const char *GC_dyld_name_for_hdr(const struct GC_MACH_HEADER
*hdr
)
1230 c
= _dyld_image_count();
1231 for (i
= 0; i
< c
; i
++)
1232 if ((const struct GC_MACH_HEADER
*)_dyld_get_image_header(i
) == hdr
)
1233 return _dyld_get_image_name(i
);
1237 /* This should never be called by a thread holding the lock. */
1238 STATIC
void GC_dyld_image_add(const struct GC_MACH_HEADER
*hdr
,
1241 unsigned long start
, end
;
1243 const struct GC_MACH_SECTION
*sec
;
1245 GC_has_static_roots_func callback
= GC_has_static_roots
;
1250 if (GC_no_dls
) return;
1251 # ifdef DARWIN_DEBUG
1252 name
= GC_dyld_name_for_hdr(hdr
);
1254 name
= callback
!= 0 ? GC_dyld_name_for_hdr(hdr
) : NULL
;
1256 for (i
= 0; i
< sizeof(GC_dyld_sections
)/sizeof(GC_dyld_sections
[0]); i
++) {
1257 sec
= GC_GETSECTBYNAME(hdr
, GC_dyld_sections
[i
].seg
,
1258 GC_dyld_sections
[i
].sect
);
1259 if (sec
== NULL
|| sec
->size
< sizeof(word
))
1261 start
= slide
+ sec
->addr
;
1262 end
= start
+ sec
->size
;
1264 /* The user callback is called holding the lock. */
1265 if (callback
== 0 || callback(name
, (void*)start
, (size_t)sec
->size
)) {
1266 # ifdef DARWIN_DEBUG
1268 "Adding section __DATA,%s at %p-%p (%lu bytes) from image %s\n",
1269 GC_dyld_sections
[i
].sect
, (void*)start
, (void*)end
,
1270 (unsigned long)sec
->size
, name
);
1272 GC_add_roots_inner((ptr_t
)start
, (ptr_t
)end
, FALSE
);
1277 /* Sections constructed on demand. */
1278 for (j
= 0; j
< sizeof(GC_dyld_add_sect_fmts
) / sizeof(char *); j
++) {
1279 fmt
= GC_dyld_add_sect_fmts
[j
];
1280 /* Add our manufactured aligned BSS sections. */
1281 for (i
= 0; i
<= L2_MAX_OFILE_ALIGNMENT
; i
++) {
1282 (void)snprintf(secnam
, sizeof(secnam
), fmt
, (unsigned)i
);
1283 secnam
[sizeof(secnam
) - 1] = '\0';
1284 sec
= GC_GETSECTBYNAME(hdr
, SEG_DATA
, secnam
);
1285 if (sec
== NULL
|| sec
->size
== 0)
1287 start
= slide
+ sec
->addr
;
1288 end
= start
+ sec
->size
;
1289 # ifdef DARWIN_DEBUG
1290 GC_log_printf("Adding on-demand section __DATA,%s at"
1291 " %p-%p (%lu bytes) from image %s\n",
1292 secnam
, (void*)start
, (void*)end
,
1293 (unsigned long)sec
->size
, name
);
1295 GC_add_roots((char*)start
, (char*)end
);
1299 # ifdef DARWIN_DEBUG
1300 GC_print_static_roots();
1304 /* This should never be called by a thread holding the lock. */
1305 STATIC
void GC_dyld_image_remove(const struct GC_MACH_HEADER
*hdr
,
1308 unsigned long start
, end
;
1310 const struct GC_MACH_SECTION
*sec
;
1314 for (i
= 0; i
< sizeof(GC_dyld_sections
)/sizeof(GC_dyld_sections
[0]); i
++) {
1315 sec
= GC_GETSECTBYNAME(hdr
, GC_dyld_sections
[i
].seg
,
1316 GC_dyld_sections
[i
].sect
);
1317 if (sec
== NULL
|| sec
->size
== 0)
1319 start
= slide
+ sec
->addr
;
1320 end
= start
+ sec
->size
;
1321 # ifdef DARWIN_DEBUG
1323 "Removing section __DATA,%s at %p-%p (%lu bytes) from image %s\n",
1324 GC_dyld_sections
[i
].sect
, (void*)start
, (void*)end
,
1325 (unsigned long)sec
->size
, GC_dyld_name_for_hdr(hdr
));
1327 GC_remove_roots((char*)start
, (char*)end
);
1330 /* Remove our on-demand sections. */
1331 for (j
= 0; j
< sizeof(GC_dyld_add_sect_fmts
) / sizeof(char *); j
++) {
1332 fmt
= GC_dyld_add_sect_fmts
[j
];
1333 for (i
= 0; i
<= L2_MAX_OFILE_ALIGNMENT
; i
++) {
1334 (void)snprintf(secnam
, sizeof(secnam
), fmt
, (unsigned)i
);
1335 secnam
[sizeof(secnam
) - 1] = '\0';
1336 sec
= GC_GETSECTBYNAME(hdr
, SEG_DATA
, secnam
);
1337 if (sec
== NULL
|| sec
->size
== 0)
1339 start
= slide
+ sec
->addr
;
1340 end
= start
+ sec
->size
;
1341 # ifdef DARWIN_DEBUG
1342 GC_log_printf("Removing on-demand section __DATA,%s at"
1343 " %p-%p (%lu bytes) from image %s\n", secnam
,
1344 (void*)start
, (void*)end
, (unsigned long)sec
->size
,
1345 GC_dyld_name_for_hdr(hdr
));
1347 GC_remove_roots((char*)start
, (char*)end
);
1351 # ifdef DARWIN_DEBUG
1352 GC_print_static_roots();
1356 GC_INNER
void GC_register_dynamic_libraries(void)
1358 /* Currently does nothing. The callbacks are setup by GC_init_dyld()
1359 The dyld library takes it from there. */
1362 /* The _dyld_* functions have an internal lock so no _dyld functions
1363 can be called while the world is stopped without the risk of a deadlock.
1364 Because of this we MUST setup callbacks BEFORE we ever stop the world.
1365 This should be called BEFORE any thread in created and WITHOUT the
1366 allocation lock held. */
1368 GC_INNER
void GC_init_dyld(void)
1370 static GC_bool initialized
= FALSE
;
1372 if (initialized
) return;
1374 # ifdef DARWIN_DEBUG
1375 GC_log_printf("Registering dyld callbacks...\n");
1378 /* Apple's Documentation:
1379 When you call _dyld_register_func_for_add_image, the dynamic linker
1380 runtime calls the specified callback (func) once for each of the images
1381 that is currently loaded into the program. When a new image is added to
1382 the program, your callback is called again with the mach_header for the
1383 new image, and the virtual memory slide amount of the new image.
1385 This WILL properly register already linked libraries and libraries
1386 linked in the future.
1389 _dyld_register_func_for_add_image(GC_dyld_image_add
);
1390 _dyld_register_func_for_remove_image(GC_dyld_image_remove
);
1391 /* Ignore 2 compiler warnings here: passing argument 1 of */
1392 /* '_dyld_register_func_for_add/remove_image' from incompatible */
1395 /* Set this early to avoid reentrancy issues. */
1398 # ifdef NO_DYLD_BIND_FULLY_IMAGE
1399 /* FIXME: What should we do in this case? */
1401 if (GC_no_dls
) return; /* skip main data segment registration */
1403 /* When the environment variable is set, the dynamic linker binds */
1404 /* all undefined symbols the application needs at launch time. */
1405 /* This includes function symbols that are normally bound lazily at */
1406 /* the time of their first invocation. */
1407 if (GETENV("DYLD_BIND_AT_LAUNCH") == 0) {
1408 /* The environment variable is unset, so we should bind manually. */
1409 # ifdef DARWIN_DEBUG
1410 GC_log_printf("Forcing full bind of GC code...\n");
1412 /* FIXME: '_dyld_bind_fully_image_containing_address' is deprecated. */
1413 if (!_dyld_bind_fully_image_containing_address(
1414 (unsigned long *)GC_malloc
))
1415 ABORT("_dyld_bind_fully_image_containing_address failed");
1420 #define HAVE_REGISTER_MAIN_STATIC_DATA
1421 GC_INNER GC_bool
GC_register_main_static_data(void)
1423 /* Already done through dyld callbacks */
1431 # include "il/PCR_IL.h"
1432 # include "th/PCR_ThCtl.h"
1433 # include "mm/PCR_MM.h"
1435 GC_INNER
void GC_register_dynamic_libraries(void)
1437 /* Add new static data areas of dynamically loaded modules. */
1438 PCR_IL_LoadedFile
* p
= PCR_IL_GetLastLoadedFile();
1439 PCR_IL_LoadedSegment
* q
;
1441 /* Skip uncommitted files */
1442 while (p
!= NIL
&& !(p
-> lf_commitPoint
)) {
1443 /* The loading of this file has not yet been committed */
1444 /* Hence its description could be inconsistent. */
1445 /* Furthermore, it hasn't yet been run. Hence its data */
1446 /* segments can't possibly reference heap allocated */
1450 for (; p
!= NIL
; p
= p
-> lf_prev
) {
1451 for (q
= p
-> lf_ls
; q
!= NIL
; q
= q
-> ls_next
) {
1452 if ((q
-> ls_flags
& PCR_IL_SegFlags_Traced_MASK
)
1453 == PCR_IL_SegFlags_Traced_on
) {
1454 GC_add_roots_inner((char *)(q
-> ls_addr
),
1455 (char *)(q
-> ls_addr
) + q
-> ls_bytes
, TRUE
);
1460 #endif /* PCR && !DYNAMIC_LOADING && !MSWIN32 */
1462 #if !defined(HAVE_REGISTER_MAIN_STATIC_DATA) && defined(DYNAMIC_LOADING)
1463 /* Do we need to separately register the main static data segment? */
1464 GC_INNER GC_bool
GC_register_main_static_data(void)
1468 #endif /* HAVE_REGISTER_MAIN_STATIC_DATA */
1470 /* Register a routine to filter dynamic library registration. */
1471 GC_API
void GC_CALL
GC_register_has_static_roots_callback(
1472 GC_has_static_roots_func callback
)
1474 GC_has_static_roots
= callback
;