3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
20 # include "private/gc_pmark.h"
22 #if defined(MSWIN32) && defined(__GNUC__)
26 /* We put this here to minimize the risk of inlining. */
29 void GC_noop(void *p
, ...) {}
31 void GC_noop(void *p
, ...) {}
34 /* Single argument version, robust against whole program analysis. */
38 static VOLATILE word sink
;
43 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
45 word GC_n_mark_procs
= GC_RESERVED_MARK_PROCS
;
47 /* Initialize GC_obj_kinds properly and standard free lists properly. */
48 /* This must be done statically since they may be accessed before */
49 /* GC_init is called. */
50 /* It's done here, since we need to deal with mark descriptors. */
51 struct obj_kind GC_obj_kinds
[MAXOBJKINDS
] = {
52 /* PTRFREE */ { &GC_aobjfreelist
[0], 0 /* filled in dynamically */,
53 0 | GC_DS_LENGTH
, FALSE
, FALSE
},
54 /* NORMAL */ { &GC_objfreelist
[0], 0,
55 0 | GC_DS_LENGTH
, /* Adjusted in GC_init_inner for EXTRA_BYTES */
56 TRUE
/* add length to descr */, TRUE
},
58 { &GC_uobjfreelist
[0], 0,
59 0 | GC_DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
60 # ifdef ATOMIC_UNCOLLECTABLE
62 { &GC_auobjfreelist
[0], 0,
63 0 | GC_DS_LENGTH
, FALSE
/* add length to descr */, FALSE
},
65 # ifdef STUBBORN_ALLOC
66 /*STUBBORN*/ { &GC_sobjfreelist
[0], 0,
67 0 | GC_DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
71 # ifdef ATOMIC_UNCOLLECTABLE
72 # ifdef STUBBORN_ALLOC
78 # ifdef STUBBORN_ALLOC
86 # ifndef INITIAL_MARK_STACK_SIZE
87 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89 /* multiple of HBLKSIZE. */
90 /* The incremental collector actually likes a larger */
91 /* size, since it want to push all marked dirty objs */
92 /* before marking anything new. Currently we let it */
93 /* grow dynamically. */
97 * Limits of stack for GC_mark routine.
98 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99 * need to be marked from.
102 word GC_n_rescuing_pages
; /* Number of dirty pages we marked from */
103 /* excludes ptrfree pages, etc. */
107 mse
* GC_mark_stack_limit
;
109 word GC_mark_stack_size
= 0;
112 mse
* VOLATILE GC_mark_stack_top
;
114 mse
* GC_mark_stack_top
;
117 static struct hblk
* scan_ptr
;
119 mark_state_t GC_mark_state
= MS_NONE
;
121 GC_bool GC_mark_stack_too_small
= FALSE
;
123 GC_bool GC_objects_are_marked
= FALSE
; /* Are there collectable marked */
124 /* objects in the heap? */
126 /* Is a collection in progress? Note that this can return true in the */
127 /* nonincremental case, if a collection has been abandoned and the */
128 /* mark state is now MS_INVALID. */
129 GC_bool
GC_collection_in_progress()
131 return(GC_mark_state
!= MS_NONE
);
134 /* clear all mark bits in the header */
135 void GC_clear_hdr_marks(hhdr
)
138 # ifdef USE_MARK_BYTES
139 BZERO(hhdr
-> hb_marks
, MARK_BITS_SZ
);
141 BZERO(hhdr
-> hb_marks
, MARK_BITS_SZ
*sizeof(word
));
145 /* Set all mark bits in the header. Used for uncollectable blocks. */
146 void GC_set_hdr_marks(hhdr
)
151 for (i
= 0; i
< MARK_BITS_SZ
; ++i
) {
152 # ifdef USE_MARK_BYTES
153 hhdr
-> hb_marks
[i
] = 1;
155 hhdr
-> hb_marks
[i
] = ONES
;
161 * Clear all mark bits associated with block h.
164 # if defined(__STDC__) || defined(__cplusplus)
165 static void clear_marks_for_block(struct hblk
*h
, word dummy
)
167 static void clear_marks_for_block(h
, dummy
)
172 register hdr
* hhdr
= HDR(h
);
174 if (IS_UNCOLLECTABLE(hhdr
-> hb_obj_kind
)) return;
175 /* Mark bit for these is cleared only once the object is */
176 /* explicitly deallocated. This either frees the block, or */
177 /* the bit is cleared once the object is on the free list. */
178 GC_clear_hdr_marks(hhdr
);
181 /* Slow but general routines for setting/clearing/asking about mark bits */
182 void GC_set_mark_bit(p
)
185 register struct hblk
*h
= HBLKPTR(p
);
186 register hdr
* hhdr
= HDR(h
);
187 register int word_no
= (word
*)p
- (word
*)h
;
189 set_mark_bit_from_hdr(hhdr
, word_no
);
192 void GC_clear_mark_bit(p
)
195 register struct hblk
*h
= HBLKPTR(p
);
196 register hdr
* hhdr
= HDR(h
);
197 register int word_no
= (word
*)p
- (word
*)h
;
199 clear_mark_bit_from_hdr(hhdr
, word_no
);
202 GC_bool
GC_is_marked(p
)
205 register struct hblk
*h
= HBLKPTR(p
);
206 register hdr
* hhdr
= HDR(h
);
207 register int word_no
= (word
*)p
- (word
*)h
;
209 return(mark_bit_from_hdr(hhdr
, word_no
));
214 * Clear mark bits in all allocated heap blocks. This invalidates
215 * the marker invariant, and sets GC_mark_state to reflect this.
216 * (This implicitly starts marking to reestablish the invariant.)
218 void GC_clear_marks()
220 GC_apply_to_all_blocks(clear_marks_for_block
, (word
)0);
221 GC_objects_are_marked
= FALSE
;
222 GC_mark_state
= MS_INVALID
;
225 /* Counters reflect currently marked objects: reset here */
226 GC_composite_in_use
= 0;
227 GC_atomic_in_use
= 0;
232 /* Initiate a garbage collection. Initiates a full collection if the */
233 /* mark state is invalid. */
235 void GC_initiate_gc()
237 if (GC_dirty_maintained
) GC_read_dirty();
238 # ifdef STUBBORN_ALLOC
243 extern void GC_check_dirty();
245 if (GC_dirty_maintained
) GC_check_dirty();
248 GC_n_rescuing_pages
= 0;
249 if (GC_mark_state
== MS_NONE
) {
250 GC_mark_state
= MS_PUSH_RESCUERS
;
251 } else if (GC_mark_state
!= MS_INVALID
) {
252 ABORT("unexpected state");
253 } /* else this is really a full collection, and mark */
254 /* bits are invalid. */
259 static void alloc_mark_stack();
261 /* Perform a small amount of marking. */
262 /* We try to touch roughly a page of memory. */
263 /* Return TRUE if we just finished a mark phase. */
264 /* Cold_gc_frame is an address inside a GC frame that */
265 /* remains valid until all marking is complete. */
266 /* A zero value indicates that it's OK to miss some */
267 /* register values. */
268 /* We hold the allocation lock. In the case of */
269 /* incremental collection, the world may not be stopped.*/
271 /* For win32, this is called after we establish a structured */
272 /* exception handler, in case Windows unmaps one of our root */
273 /* segments. See below. In either case, we acquire the */
274 /* allocator lock long before we get here. */
275 GC_bool
GC_mark_some_inner(cold_gc_frame
)
278 GC_bool
GC_mark_some(cold_gc_frame
)
282 switch(GC_mark_state
) {
286 case MS_PUSH_RESCUERS
:
287 if (GC_mark_stack_top
288 >= GC_mark_stack_limit
- INITIAL_MARK_STACK_SIZE
/2) {
289 /* Go ahead and mark, even though that might cause us to */
290 /* see more marked dirty objects later on. Avoid this */
292 GC_mark_stack_too_small
= TRUE
;
293 MARK_FROM_MARK_STACK();
296 scan_ptr
= GC_push_next_marked_dirty(scan_ptr
);
299 if (GC_print_stats
) {
300 GC_printf1("Marked from %lu dirty pages\n",
301 (unsigned long)GC_n_rescuing_pages
);
304 GC_push_roots(FALSE
, cold_gc_frame
);
305 GC_objects_are_marked
= TRUE
;
306 if (GC_mark_state
!= MS_INVALID
) {
307 GC_mark_state
= MS_ROOTS_PUSHED
;
313 case MS_PUSH_UNCOLLECTABLE
:
314 if (GC_mark_stack_top
315 >= GC_mark_stack
+ GC_mark_stack_size
/4) {
316 # ifdef PARALLEL_MARK
317 /* Avoid this, since we don't parallelize the marker */
319 if (GC_parallel
) GC_mark_stack_too_small
= TRUE
;
321 MARK_FROM_MARK_STACK();
324 scan_ptr
= GC_push_next_marked_uncollectable(scan_ptr
);
326 GC_push_roots(TRUE
, cold_gc_frame
);
327 GC_objects_are_marked
= TRUE
;
328 if (GC_mark_state
!= MS_INVALID
) {
329 GC_mark_state
= MS_ROOTS_PUSHED
;
335 case MS_ROOTS_PUSHED
:
336 # ifdef PARALLEL_MARK
337 /* In the incremental GC case, this currently doesn't */
338 /* quite do the right thing, since it runs to */
339 /* completion. On the other hand, starting a */
340 /* parallel marker is expensive, so perhaps it is */
341 /* the right thing? */
342 /* Eventually, incremental marking should run */
343 /* asynchronously in multiple threads, without grabbing */
344 /* the allocation lock. */
346 GC_do_parallel_mark();
347 GC_ASSERT(GC_mark_stack_top
< GC_first_nonempty
);
348 GC_mark_stack_top
= GC_mark_stack
- 1;
349 if (GC_mark_stack_too_small
) {
350 alloc_mark_stack(2*GC_mark_stack_size
);
352 if (GC_mark_state
== MS_ROOTS_PUSHED
) {
353 GC_mark_state
= MS_NONE
;
360 if (GC_mark_stack_top
>= GC_mark_stack
) {
361 MARK_FROM_MARK_STACK();
364 GC_mark_state
= MS_NONE
;
365 if (GC_mark_stack_too_small
) {
366 alloc_mark_stack(2*GC_mark_stack_size
);
372 case MS_PARTIALLY_INVALID
:
373 if (!GC_objects_are_marked
) {
374 GC_mark_state
= MS_PUSH_UNCOLLECTABLE
;
377 if (GC_mark_stack_top
>= GC_mark_stack
) {
378 MARK_FROM_MARK_STACK();
381 if (scan_ptr
== 0 && GC_mark_state
== MS_INVALID
) {
382 /* About to start a heap scan for marked objects. */
383 /* Mark stack is empty. OK to reallocate. */
384 if (GC_mark_stack_too_small
) {
385 alloc_mark_stack(2*GC_mark_stack_size
);
387 GC_mark_state
= MS_PARTIALLY_INVALID
;
389 scan_ptr
= GC_push_next_marked(scan_ptr
);
390 if (scan_ptr
== 0 && GC_mark_state
== MS_PARTIALLY_INVALID
) {
391 GC_push_roots(TRUE
, cold_gc_frame
);
392 GC_objects_are_marked
= TRUE
;
393 if (GC_mark_state
!= MS_INVALID
) {
394 GC_mark_state
= MS_ROOTS_PUSHED
;
399 ABORT("GC_mark_some: bad state");
410 EXCEPTION_REGISTRATION ex_reg
;
415 static EXCEPTION_DISPOSITION
mark_ex_handler(
416 struct _EXCEPTION_RECORD
*ex_rec
,
418 struct _CONTEXT
*context
,
421 if (ex_rec
->ExceptionCode
== STATUS_ACCESS_VIOLATION
) {
422 ext_ex_regn
*xer
= (ext_ex_regn
*)est_frame
;
424 /* Unwind from the inner function assuming the standard */
425 /* function prologue. */
426 /* Assumes code has not been compiled with */
427 /* -fomit-frame-pointer. */
428 context
->Esp
= context
->Ebp
;
429 context
->Ebp
= *((DWORD
*)context
->Esp
);
430 context
->Esp
= context
->Esp
- 8;
432 /* Resume execution at the "real" handler within the */
433 /* wrapper function. */
434 context
->Eip
= (DWORD
)(xer
->alt_path
);
436 return ExceptionContinueExecution
;
439 return ExceptionContinueSearch
;
442 # endif /* __GNUC__ */
445 GC_bool
GC_mark_some(cold_gc_frame
)
451 /* Windows 98 appears to asynchronously create and remove */
452 /* writable memory mappings, for reasons we haven't yet */
453 /* understood. Since we look for writable regions to */
454 /* determine the root set, we may try to mark from an */
455 /* address range that disappeared since we started the */
456 /* collection. Thus we have to recover from faults here. */
457 /* This code does not appear to be necessary for Windows */
458 /* 95/NT/2000. Note that this code should never generate */
459 /* an incremental GC write fault. */
463 # else /* __GNUC__ */
465 /* Manually install an exception handler since GCC does */
466 /* not yet support Structured Exception Handling (SEH) on */
471 er
.alt_path
= &&handle_ex
;
472 er
.ex_reg
.handler
= mark_ex_handler
;
473 asm volatile ("movl %%fs:0, %0" : "=r" (er
.ex_reg
.prev
));
474 asm volatile ("movl %0, %%fs:0" : : "r" (&er
));
476 # endif /* __GNUC__ */
478 ret_val
= GC_mark_some_inner(cold_gc_frame
);
482 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION
?
483 EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH
) {
485 # else /* __GNUC__ */
487 /* Prevent GCC from considering the following code unreachable */
488 /* and thus eliminating it. */
489 if (er
.alt_path
!= 0)
493 /* Execution resumes from here on an access violation. */
495 # endif /* __GNUC__ */
498 if (GC_print_stats
) {
499 GC_printf0("Caught ACCESS_VIOLATION in marker. "
500 "Memory mapping disappeared.\n");
502 # endif /* CONDPRINT */
504 /* We have bad roots on the stack. Discard mark stack. */
505 /* Rescan from marked objects. Redetermine roots. */
506 GC_invalidate_mark_state();
515 # else /* __GNUC__ */
518 /* Uninstall the exception handler */
519 asm volatile ("mov %0, %%fs:0" : : "r" (er
.ex_reg
.prev
));
521 # endif /* __GNUC__ */
528 GC_bool
GC_mark_stack_empty()
530 return(GC_mark_stack_top
< GC_mark_stack
);
534 word GC_prof_array
[10];
535 # define PROF(n) GC_prof_array[n]++
540 /* Given a pointer to someplace other than a small object page or the */
541 /* first page of a large object, either: */
542 /* - return a pointer to somewhere in the first page of the large */
543 /* object, if current points to a large object. */
544 /* In this case *hhdr is replaced with a pointer to the header */
545 /* for the large object. */
546 /* - just return current if it does not point to a large object. */
548 ptr_t
GC_find_start(current
, hhdr
, new_hdr_p
)
549 register ptr_t current
;
550 register hdr
*hhdr
, **new_hdr_p
;
552 if (GC_all_interior_pointers
) {
554 register ptr_t orig
= current
;
556 current
= (ptr_t
)HBLKPTR(current
);
558 current
= current
- HBLKSIZE
*(word
)hhdr
;
560 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr
));
561 /* current points to near the start of the large object */
562 if (hhdr
-> hb_flags
& IGNORE_OFF_PAGE
) return(orig
);
563 if ((word
*)orig
- (word
*)current
564 >= (ptrdiff_t)(hhdr
->hb_sz
)) {
565 /* Pointer past the end of the block */
578 void GC_invalidate_mark_state()
580 GC_mark_state
= MS_INVALID
;
581 GC_mark_stack_top
= GC_mark_stack
-1;
584 mse
* GC_signal_mark_stack_overflow(msp
)
587 GC_mark_state
= MS_INVALID
;
588 GC_mark_stack_too_small
= TRUE
;
590 if (GC_print_stats
) {
591 GC_printf1("Mark stack overflow; current size = %lu entries\n",
595 return(msp
- GC_MARK_STACK_DISCARDS
);
599 * Mark objects pointed to by the regions described by
600 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
601 * inclusive. Assumes the upper limit of a mark stack entry
602 * is never 0. A mark stack entry never has size 0.
603 * We try to traverse on the order of a hblk of memory before we return.
604 * Caller is responsible for calling this until the mark stack is empty.
605 * Note that this is the most performance critical routine in the
606 * collector. Hence it contains all sorts of ugly hacks to speed
607 * things up. In particular, we avoid procedure calls on the common
608 * path, we take advantage of peculiarities of the mark descriptor
609 * encoding, we optionally maintain a cache for the block address to
610 * header mapping, we prefetch when an object is "grayed", etc.
612 mse
* GC_mark_from(mark_stack_top
, mark_stack
, mark_stack_limit
)
613 mse
* mark_stack_top
;
615 mse
* mark_stack_limit
;
617 int credit
= HBLKSIZE
; /* Remaining credit for marking work */
618 register word
* current_p
; /* Pointer to current candidate ptr. */
619 register word current
; /* Candidate pointer. */
620 register word
* limit
; /* (Incl) limit of current candidate */
623 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
624 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
627 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
629 GC_objects_are_marked
= TRUE
;
631 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
632 while (mark_stack_top
>= mark_stack
&& credit
>= 0) {
634 while ((((ptr_t
)mark_stack_top
- (ptr_t
)mark_stack
) | credit
)
637 current_p
= mark_stack_top
-> mse_start
;
638 descr
= mark_stack_top
-> mse_descr
;
640 /* current_p and descr describe the current object. */
641 /* *mark_stack_top is vacant. */
642 /* The following is 0 only for small objects described by a simple */
643 /* length descriptor. For many applications this is the common */
644 /* case, so we try to detect it quickly. */
645 if (descr
& ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS
) - 1)) | GC_DS_TAGS
)) {
646 word tag
= descr
& GC_DS_TAGS
;
651 /* Process part of the range to avoid pushing too much on the */
653 GC_ASSERT(descr
< (word
)GC_greatest_plausible_heap_addr
654 - (word
)GC_least_plausible_heap_addr
);
655 # ifdef PARALLEL_MARK
656 # define SHARE_BYTES 2048
657 if (descr
> SHARE_BYTES
&& GC_parallel
658 && mark_stack_top
< mark_stack_limit
- 1) {
659 int new_size
= (descr
/2) & ~(sizeof(word
)-1);
660 mark_stack_top
-> mse_start
= current_p
;
661 mark_stack_top
-> mse_descr
= new_size
+ sizeof(word
);
662 /* makes sure we handle */
663 /* misaligned pointers. */
665 current_p
= (word
*) ((char *)current_p
+ new_size
);
669 # endif /* PARALLEL_MARK */
670 mark_stack_top
-> mse_start
=
671 limit
= current_p
+ SPLIT_RANGE_WORDS
-1;
672 mark_stack_top
-> mse_descr
=
673 descr
- WORDS_TO_BYTES(SPLIT_RANGE_WORDS
-1);
674 /* Make sure that pointers overlapping the two ranges are */
676 limit
= (word
*)((char *)limit
+ sizeof(word
) - ALIGNMENT
);
680 descr
&= ~GC_DS_TAGS
;
681 credit
-= WORDS_TO_BYTES(WORDSZ
/2); /* guess */
683 if ((signed_word
)descr
< 0) {
684 current
= *current_p
;
685 FIXUP_POINTER(current
);
686 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
687 PREFETCH((ptr_t
)current
);
688 HC_PUSH_CONTENTS((ptr_t
)current
, mark_stack_top
,
689 mark_stack_limit
, current_p
, exit1
);
698 credit
-= GC_PROC_BYTES
;
701 (current_p
, mark_stack_top
,
702 mark_stack_limit
, ENV(descr
));
704 case GC_DS_PER_OBJECT
:
705 if ((signed_word
)descr
>= 0) {
706 /* Descriptor is in the object. */
707 descr
= *(word
*)((ptr_t
)current_p
+ descr
- GC_DS_PER_OBJECT
);
709 /* Descriptor is in type descriptor pointed to by first */
710 /* word in object. */
711 ptr_t type_descr
= *(ptr_t
*)current_p
;
712 /* type_descr is either a valid pointer to the descriptor */
713 /* structure, or this object was on a free list. If it */
714 /* it was anything but the last object on the free list, */
715 /* we will misinterpret the next object on the free list as */
716 /* the type descriptor, and get a 0 GC descriptor, which */
717 /* is ideal. Unfortunately, we need to check for the last */
718 /* object case explicitly. */
719 if (0 == type_descr
) {
720 /* Rarely executed. */
724 descr
= *(word
*)(type_descr
725 - (descr
- (GC_DS_PER_OBJECT
726 - GC_INDIR_PER_OBJ_BIAS
)));
729 /* Can happen either because we generated a 0 descriptor */
730 /* or we saw a pointer to a free object. */
736 } else /* Small object with length descriptor */ {
738 limit
= (word
*)(((ptr_t
)current_p
) + (word
)descr
);
740 /* The simple case in which we're scanning a range. */
741 GC_ASSERT(!((word
)current_p
& (ALIGNMENT
-1)));
742 credit
-= (ptr_t
)limit
- (ptr_t
)current_p
;
747 # ifndef SMALL_CONFIG
750 /* Try to prefetch the next pointer to be examined asap. */
751 /* Empirically, this also seems to help slightly without */
752 /* prefetches, at least on linux/X86. Presumably this loop */
753 /* ends up with less register pressure, and gcc thus ends up */
754 /* generating slightly better code. Overall gcc code quality */
755 /* for this loop is still not great. */
757 PREFETCH((ptr_t
)limit
- PREF_DIST
*CACHE_LINE_SIZE
);
758 GC_ASSERT(limit
>= current_p
);
760 FIXUP_POINTER(deferred
);
761 limit
= (word
*)((char *)limit
- ALIGNMENT
);
762 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
763 PREFETCH((ptr_t
)deferred
);
766 if (current_p
> limit
) goto next_object
;
767 /* Unroll once, so we don't do too many of the prefetches */
768 /* based on limit. */
770 FIXUP_POINTER(deferred
);
771 limit
= (word
*)((char *)limit
- ALIGNMENT
);
772 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
773 PREFETCH((ptr_t
)deferred
);
776 if (current_p
> limit
) goto next_object
;
780 while (current_p
<= limit
) {
781 /* Empirically, unrolling this loop doesn't help a lot. */
782 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
784 current
= *current_p
;
785 FIXUP_POINTER(current
);
786 PREFETCH((ptr_t
)current_p
+ PREF_DIST
*CACHE_LINE_SIZE
);
787 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
788 /* Prefetch the contents of the object we just pushed. It's */
789 /* likely we will need them soon. */
790 PREFETCH((ptr_t
)current
);
791 HC_PUSH_CONTENTS((ptr_t
)current
, mark_stack_top
,
792 mark_stack_limit
, current_p
, exit2
);
794 current_p
= (word
*)((char *)current_p
+ ALIGNMENT
);
797 # ifndef SMALL_CONFIG
798 /* We still need to mark the entry we previously prefetched. */
799 /* We alrady know that it passes the preliminary pointer */
801 HC_PUSH_CONTENTS((ptr_t
)deferred
, mark_stack_top
,
802 mark_stack_limit
, current_p
, exit4
);
807 return mark_stack_top
;
812 /* We assume we have an ANSI C Compiler. */
813 GC_bool GC_help_wanted
= FALSE
;
814 unsigned GC_helper_count
= 0;
815 unsigned GC_active_count
= 0;
816 mse
* VOLATILE GC_first_nonempty
;
819 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
820 /* Under normal circumstances, this is big enough to guarantee */
821 /* We don't overflow half of it in a single call to */
825 /* Steal mark stack entries starting at mse low into mark stack local */
826 /* until we either steal mse high, or we have max entries. */
827 /* Return a pointer to the top of the local mark stack. */
828 /* *next is replaced by a pointer to the next unscanned mark stack */
830 mse
* GC_steal_mark_stack(mse
* low
, mse
* high
, mse
* local
,
831 unsigned max
, mse
**next
)
834 mse
*top
= local
- 1;
837 /* Make sure that prior writes to the mark stack are visible. */
838 /* On some architectures, the fact that the reads are */
839 /* volatile should suffice. */
840 # if !defined(IA64) && !defined(HP_PA) && !defined(I386)
843 GC_ASSERT(high
>= low
-1 && high
- low
+ 1 <= GC_mark_stack_size
);
844 for (p
= low
; p
<= high
&& i
<= max
; ++p
) {
845 word descr
= *(volatile word
*) &(p
-> mse_descr
);
846 /* In the IA64 memory model, the following volatile store is */
847 /* ordered after this read of descr. Thus a thread must read */
848 /* the original nonzero value. HP_PA appears to be similar, */
849 /* and if I'm reading the P4 spec correctly, X86 is probably */
850 /* also OK. In some other cases we need a barrier. */
851 # if !defined(IA64) && !defined(HP_PA) && !defined(I386)
855 *(volatile word
*) &(p
-> mse_descr
) = 0;
856 /* More than one thread may get this entry, but that's only */
857 /* a minor performance problem. */
859 top
-> mse_descr
= descr
;
860 top
-> mse_start
= p
-> mse_start
;
861 GC_ASSERT( (top
-> mse_descr
& GC_DS_TAGS
) != GC_DS_LENGTH
||
862 top
-> mse_descr
< (ptr_t
)GC_greatest_plausible_heap_addr
863 - (ptr_t
)GC_least_plausible_heap_addr
);
864 /* If this is a big object, count it as */
865 /* size/256 + 1 objects. */
867 if ((descr
& GC_DS_TAGS
) == GC_DS_LENGTH
) i
+= (descr
>> 8);
874 /* Copy back a local mark stack. */
875 /* low and high are inclusive bounds. */
876 void GC_return_mark_stack(mse
* low
, mse
* high
)
882 if (high
< low
) return;
883 stack_size
= high
- low
+ 1;
884 GC_acquire_mark_lock();
885 my_top
= GC_mark_stack_top
;
886 my_start
= my_top
+ 1;
887 if (my_start
- GC_mark_stack
+ stack_size
> GC_mark_stack_size
) {
889 if (GC_print_stats
) {
890 GC_printf0("No room to copy back mark stack.");
893 GC_mark_state
= MS_INVALID
;
894 GC_mark_stack_too_small
= TRUE
;
895 /* We drop the local mark stack. We'll fix things later. */
897 BCOPY(low
, my_start
, stack_size
* sizeof(mse
));
898 GC_ASSERT(GC_mark_stack_top
= my_top
);
899 # if !defined(IA64) && !defined(HP_PA)
902 /* On IA64, the volatile write acts as a release barrier. */
903 GC_mark_stack_top
= my_top
+ stack_size
;
905 GC_release_mark_lock();
906 GC_notify_all_marker();
909 /* Mark from the local mark stack. */
910 /* On return, the local mark stack is empty. */
911 /* But this may be achieved by copying the */
912 /* local mark stack back into the global one. */
913 void GC_do_local_mark(mse
*local_mark_stack
, mse
*local_top
)
916 # define N_LOCAL_ITERS 1
918 # ifdef GC_ASSERTIONS
919 /* Make sure we don't hold mark lock. */
920 GC_acquire_mark_lock();
921 GC_release_mark_lock();
924 for (n
= 0; n
< N_LOCAL_ITERS
; ++n
) {
925 local_top
= GC_mark_from(local_top
, local_mark_stack
,
926 local_mark_stack
+ LOCAL_MARK_STACK_SIZE
);
927 if (local_top
< local_mark_stack
) return;
928 if (local_top
- local_mark_stack
>= LOCAL_MARK_STACK_SIZE
/2) {
929 GC_return_mark_stack(local_mark_stack
, local_top
);
933 if (GC_mark_stack_top
< GC_first_nonempty
&&
934 GC_active_count
< GC_helper_count
935 && local_top
> local_mark_stack
+ 1) {
936 /* Try to share the load, since the main stack is empty, */
937 /* and helper threads are waiting for a refill. */
938 /* The entries near the bottom of the stack are likely */
939 /* to require more work. Thus we return those, even though */
942 mse
* new_bottom
= local_mark_stack
943 + (local_top
- local_mark_stack
)/2;
944 GC_ASSERT(new_bottom
> local_mark_stack
945 && new_bottom
< local_top
);
946 GC_return_mark_stack(local_mark_stack
, new_bottom
- 1);
947 memmove(local_mark_stack
, new_bottom
,
948 (local_top
- new_bottom
+ 1) * sizeof(mse
));
949 local_top
-= (new_bottom
- local_mark_stack
);
954 #define ENTRIES_TO_GET 5
956 long GC_markers
= 2; /* Normally changed by thread-library- */
957 /* -specific code. */
959 /* Mark using the local mark stack until the global mark stack is empty */
960 /* and there are no active workers. Update GC_first_nonempty to reflect */
962 /* Caller does not hold mark lock. */
963 /* Caller has already incremented GC_helper_count. We decrement it, */
964 /* and maintain GC_active_count. */
965 void GC_mark_local(mse
*local_mark_stack
, int id
)
967 mse
* my_first_nonempty
;
969 GC_acquire_mark_lock();
971 my_first_nonempty
= GC_first_nonempty
;
972 GC_ASSERT(GC_first_nonempty
>= GC_mark_stack
&&
973 GC_first_nonempty
<= GC_mark_stack_top
+ 1);
975 GC_printf1("Starting mark helper %lu\n", (unsigned long)id
);
977 GC_release_mark_lock();
984 mse
* global_first_nonempty
= GC_first_nonempty
;
986 GC_ASSERT(my_first_nonempty
>= GC_mark_stack
&&
987 my_first_nonempty
<= GC_mark_stack_top
+ 1);
988 GC_ASSERT(global_first_nonempty
>= GC_mark_stack
&&
989 global_first_nonempty
<= GC_mark_stack_top
+ 1);
990 if (my_first_nonempty
< global_first_nonempty
) {
991 my_first_nonempty
= global_first_nonempty
;
992 } else if (global_first_nonempty
< my_first_nonempty
) {
993 GC_compare_and_exchange((word
*)(&GC_first_nonempty
),
994 (word
) global_first_nonempty
,
995 (word
) my_first_nonempty
);
996 /* If this fails, we just go ahead, without updating */
997 /* GC_first_nonempty. */
999 /* Perhaps we should also update GC_first_nonempty, if it */
1000 /* is less. But that would require using atomic updates. */
1001 my_top
= GC_mark_stack_top
;
1002 n_on_stack
= my_top
- my_first_nonempty
+ 1;
1003 if (0 == n_on_stack
) {
1004 GC_acquire_mark_lock();
1005 my_top
= GC_mark_stack_top
;
1006 n_on_stack
= my_top
- my_first_nonempty
+ 1;
1007 if (0 == n_on_stack
) {
1009 GC_ASSERT(GC_active_count
<= GC_helper_count
);
1010 /* Other markers may redeposit objects */
1012 if (0 == GC_active_count
) GC_notify_all_marker();
1013 while (GC_active_count
> 0
1014 && GC_first_nonempty
> GC_mark_stack_top
) {
1015 /* We will be notified if either GC_active_count */
1016 /* reaches zero, or if more objects are pushed on */
1017 /* the global mark stack. */
1020 if (GC_active_count
== 0 &&
1021 GC_first_nonempty
> GC_mark_stack_top
) {
1022 GC_bool need_to_notify
= FALSE
;
1023 /* The above conditions can't be falsified while we */
1024 /* hold the mark lock, since neither */
1025 /* GC_active_count nor GC_mark_stack_top can */
1026 /* change. GC_first_nonempty can only be */
1027 /* incremented asynchronously. Thus we know that */
1028 /* both conditions actually held simultaneously. */
1030 if (0 == GC_helper_count
) need_to_notify
= TRUE
;
1033 "Finished mark helper %lu\n", (unsigned long)id
);
1035 GC_release_mark_lock();
1036 if (need_to_notify
) GC_notify_all_marker();
1039 /* else there's something on the stack again, or */
1040 /* another helper may push something. */
1042 GC_ASSERT(GC_active_count
> 0);
1043 GC_release_mark_lock();
1046 GC_release_mark_lock();
1049 n_to_get
= ENTRIES_TO_GET
;
1050 if (n_on_stack
< 2 * ENTRIES_TO_GET
) n_to_get
= 1;
1051 local_top
= GC_steal_mark_stack(my_first_nonempty
, my_top
,
1052 local_mark_stack
, n_to_get
,
1053 &my_first_nonempty
);
1054 GC_ASSERT(my_first_nonempty
>= GC_mark_stack
&&
1055 my_first_nonempty
<= GC_mark_stack_top
+ 1);
1056 GC_do_local_mark(local_mark_stack
, local_top
);
1060 /* Perform Parallel mark. */
1061 /* We hold the GC lock, not the mark lock. */
1062 /* Currently runs until the mark stack is */
1064 void GC_do_parallel_mark()
1066 mse local_mark_stack
[LOCAL_MARK_STACK_SIZE
];
1070 GC_acquire_mark_lock();
1071 GC_ASSERT(I_HOLD_LOCK());
1072 /* This could be a GC_ASSERT, but it seems safer to keep it on */
1073 /* all the time, especially since it's cheap. */
1074 if (GC_help_wanted
|| GC_active_count
!= 0 || GC_helper_count
!= 0)
1075 ABORT("Tried to start parallel mark in bad state");
1077 GC_printf1("Starting marking for mark phase number %lu\n",
1078 (unsigned long)GC_mark_no
);
1080 GC_first_nonempty
= GC_mark_stack
;
1081 GC_active_count
= 0;
1082 GC_helper_count
= 1;
1083 GC_help_wanted
= TRUE
;
1084 GC_release_mark_lock();
1085 GC_notify_all_marker();
1086 /* Wake up potential helpers. */
1087 GC_mark_local(local_mark_stack
, 0);
1088 GC_acquire_mark_lock();
1089 GC_help_wanted
= FALSE
;
1090 /* Done; clean up. */
1091 while (GC_helper_count
> 0) GC_wait_marker();
1092 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
1095 "Finished marking for mark phase number %lu\n",
1096 (unsigned long)GC_mark_no
);
1099 GC_release_mark_lock();
1100 GC_notify_all_marker();
1104 /* Try to help out the marker, if it's running. */
1105 /* We do not hold the GC lock, but the requestor does. */
1106 void GC_help_marker(word my_mark_no
)
1108 mse local_mark_stack
[LOCAL_MARK_STACK_SIZE
];
1110 mse
* my_first_nonempty
;
1112 if (!GC_parallel
) return;
1113 GC_acquire_mark_lock();
1114 while (GC_mark_no
< my_mark_no
1115 || !GC_help_wanted
&& GC_mark_no
== my_mark_no
) {
1118 my_id
= GC_helper_count
;
1119 if (GC_mark_no
!= my_mark_no
|| my_id
>= GC_markers
) {
1120 /* Second test is useful only if original threads can also */
1121 /* act as helpers. Under Linux they can't. */
1122 GC_release_mark_lock();
1125 GC_helper_count
= my_id
+ 1;
1126 GC_release_mark_lock();
1127 GC_mark_local(local_mark_stack
, my_id
);
1128 /* GC_mark_local decrements GC_helper_count. */
1131 #endif /* PARALLEL_MARK */
1133 /* Allocate or reallocate space for mark stack of size s words */
1134 /* May silently fail. */
1135 static void alloc_mark_stack(n
)
1138 mse
* new_stack
= (mse
*)GC_scratch_alloc(n
* sizeof(struct GC_ms_entry
));
1140 GC_mark_stack_too_small
= FALSE
;
1141 if (GC_mark_stack_size
!= 0) {
1142 if (new_stack
!= 0) {
1143 word displ
= (word
)GC_mark_stack
& (GC_page_size
- 1);
1144 signed_word size
= GC_mark_stack_size
* sizeof(struct GC_ms_entry
);
1146 /* Recycle old space */
1147 if (0 != displ
) displ
= GC_page_size
- displ
;
1148 size
= (size
- displ
) & ~(GC_page_size
- 1);
1150 GC_add_to_heap((struct hblk
*)
1151 ((word
)GC_mark_stack
+ displ
), (word
)size
);
1153 GC_mark_stack
= new_stack
;
1154 GC_mark_stack_size
= n
;
1155 GC_mark_stack_limit
= new_stack
+ n
;
1157 if (GC_print_stats
) {
1158 GC_printf1("Grew mark stack to %lu frames\n",
1159 (unsigned long) GC_mark_stack_size
);
1164 if (GC_print_stats
) {
1165 GC_printf1("Failed to grow mark stack to %lu frames\n",
1171 if (new_stack
== 0) {
1172 GC_err_printf0("No space for mark stack\n");
1175 GC_mark_stack
= new_stack
;
1176 GC_mark_stack_size
= n
;
1177 GC_mark_stack_limit
= new_stack
+ n
;
1179 GC_mark_stack_top
= GC_mark_stack
-1;
1184 alloc_mark_stack(INITIAL_MARK_STACK_SIZE
);
1188 * Push all locations between b and t onto the mark stack.
1189 * b is the first location to be checked. t is one past the last
1190 * location to be checked.
1191 * Should only be used if there is no possibility of mark stack
1194 void GC_push_all(bottom
, top
)
1198 register word length
;
1200 bottom
= (ptr_t
)(((word
) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1201 top
= (ptr_t
)(((word
) top
) & ~(ALIGNMENT
-1));
1202 if (top
== 0 || bottom
== top
) return;
1203 GC_mark_stack_top
++;
1204 if (GC_mark_stack_top
>= GC_mark_stack_limit
) {
1205 ABORT("unexpected mark stack overflow");
1207 length
= top
- bottom
;
1208 # if GC_DS_TAGS > ALIGNMENT - 1
1209 length
+= GC_DS_TAGS
;
1210 length
&= ~GC_DS_TAGS
;
1212 GC_mark_stack_top
-> mse_start
= (word
*)bottom
;
1213 GC_mark_stack_top
-> mse_descr
= length
;
1217 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1218 * We use push_fn to actually push the block.
1219 * Used both to selectively push dirty pages, or to push a block
1220 * in piecemeal fashion, to allow for more marking concurrency.
1221 * Will not overflow mark stack if push_fn pushes a small fixed number
1222 * of entries. (This is invoked only if push_fn pushes a single entry,
1223 * or if it marks each object before pushing it, thus ensuring progress
1224 * in the event of a stack overflow.)
1226 void GC_push_selected(bottom
, top
, dirty_fn
, push_fn
)
1229 int (*dirty_fn
) GC_PROTO((struct hblk
* h
));
1230 void (*push_fn
) GC_PROTO((ptr_t bottom
, ptr_t top
));
1232 register struct hblk
* h
;
1234 bottom
= (ptr_t
)(((long) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1235 top
= (ptr_t
)(((long) top
) & ~(ALIGNMENT
-1));
1237 if (top
== 0 || bottom
== top
) return;
1238 h
= HBLKPTR(bottom
+ HBLKSIZE
);
1239 if (top
<= (ptr_t
) h
) {
1240 if ((*dirty_fn
)(h
-1)) {
1241 (*push_fn
)(bottom
, top
);
1245 if ((*dirty_fn
)(h
-1)) {
1246 (*push_fn
)(bottom
, (ptr_t
)h
);
1248 while ((ptr_t
)(h
+1) <= top
) {
1249 if ((*dirty_fn
)(h
)) {
1250 if ((word
)(GC_mark_stack_top
- GC_mark_stack
)
1251 > 3 * GC_mark_stack_size
/ 4) {
1252 /* Danger of mark stack overflow */
1253 (*push_fn
)((ptr_t
)h
, top
);
1256 (*push_fn
)((ptr_t
)h
, (ptr_t
)(h
+1));
1261 if ((ptr_t
)h
!= top
) {
1262 if ((*dirty_fn
)(h
)) {
1263 (*push_fn
)((ptr_t
)h
, top
);
1266 if (GC_mark_stack_top
>= GC_mark_stack_limit
) {
1267 ABORT("unexpected mark stack overflow");
1271 # ifndef SMALL_CONFIG
1273 #ifdef PARALLEL_MARK
1274 /* Break up root sections into page size chunks to better spread */
1276 GC_bool
GC_true_func(struct hblk
*h
) { return TRUE
; }
1277 # define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
1279 # define GC_PUSH_ALL(b,t) GC_push_all(b,t);
1283 void GC_push_conditional(bottom
, top
, all
)
1289 if (GC_dirty_maintained
) {
1291 /* Pages that were never dirtied cannot contain pointers */
1292 GC_push_selected(bottom
, top
, GC_page_was_ever_dirty
, GC_push_all
);
1294 GC_push_all(bottom
, top
);
1297 GC_push_all(bottom
, top
);
1300 GC_push_selected(bottom
, top
, GC_page_was_dirty
, GC_push_all
);
1305 # if defined(MSWIN32) || defined(MSWINCE)
1306 void __cdecl
GC_push_one(p
)
1312 GC_PUSH_ONE_STACK(p
, MARKED_FROM_REGISTER
);
1315 struct GC_ms_entry
*GC_mark_and_push(obj
, mark_stack_ptr
, mark_stack_limit
, src
)
1317 struct GC_ms_entry
* mark_stack_ptr
;
1318 struct GC_ms_entry
* mark_stack_limit
;
1322 PUSH_CONTENTS(obj
, mark_stack_ptr
/* modified */, mark_stack_limit
, src
,
1323 was_marked
/* internally generated exit label */);
1324 return mark_stack_ptr
;
1328 # define BASE(p) (word)GC_base((void *)(p))
1330 # define BASE(p) (word)GC_base((char *)(p))
1333 /* Mark and push (i.e. gray) a single object p onto the main */
1334 /* mark stack. Consider p to be valid if it is an interior */
1336 /* The object p has passed a preliminary pointer validity */
1337 /* test, but we do not definitely know whether it is valid. */
1338 /* Mark bits are NOT atomically updated. Thus this must be the */
1339 /* only thread setting them. */
1340 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1341 void GC_mark_and_push_stack(p
, source
)
1344 void GC_mark_and_push_stack(p
)
1350 register hdr
* hhdr
;
1354 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
1358 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
1361 register map_entry_type map_entry
;
1363 displ
= HBLKDISPL(p
);
1364 map_entry
= MAP_ENTRY((hhdr
-> hb_map
), displ
);
1365 if (map_entry
>= MAX_OFFSET
) {
1366 if (map_entry
== OFFSET_TOO_BIG
|| !GC_all_interior_pointers
) {
1368 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
1369 if (r
== 0) hhdr
= 0;
1371 /* Offset invalid, but map reflects interior pointers */
1375 displ
= BYTES_TO_WORDS(displ
);
1377 r
= (word
)((word
*)(HBLKPTR(p
)) + displ
);
1380 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
1381 /* displ is the word index within the block. */
1383 # ifdef PRINT_BLACK_LIST
1384 GC_add_to_black_list_stack(p
, source
);
1386 GC_add_to_black_list_stack(p
);
1388 # undef source /* In case we had to define it. */
1390 if (!mark_bit_from_hdr(hhdr
, displ
)) {
1391 set_mark_bit_from_hdr(hhdr
, displ
);
1392 GC_STORE_BACK_PTR(source
, (ptr_t
)r
);
1393 PUSH_OBJ((word
*)r
, hhdr
, GC_mark_stack_top
,
1394 GC_mark_stack_limit
);
1401 # define TRACE_ENTRIES 1000
1403 struct trace_entry
{
1409 } GC_trace_buf
[TRACE_ENTRIES
];
1411 int GC_trace_buf_ptr
= 0;
1413 void GC_add_trace_entry(char *kind
, word arg1
, word arg2
)
1415 GC_trace_buf
[GC_trace_buf_ptr
].kind
= kind
;
1416 GC_trace_buf
[GC_trace_buf_ptr
].gc_no
= GC_gc_no
;
1417 GC_trace_buf
[GC_trace_buf_ptr
].words_allocd
= GC_words_allocd
;
1418 GC_trace_buf
[GC_trace_buf_ptr
].arg1
= arg1
^ 0x80000000;
1419 GC_trace_buf
[GC_trace_buf_ptr
].arg2
= arg2
^ 0x80000000;
1421 if (GC_trace_buf_ptr
>= TRACE_ENTRIES
) GC_trace_buf_ptr
= 0;
1424 void GC_print_trace(word gc_no
, GC_bool lock
)
1427 struct trace_entry
*p
;
1430 for (i
= GC_trace_buf_ptr
-1; i
!= GC_trace_buf_ptr
; i
--) {
1431 if (i
< 0) i
= TRACE_ENTRIES
-1;
1432 p
= GC_trace_buf
+ i
;
1433 if (p
-> gc_no
< gc_no
|| p
-> kind
== 0) return;
1434 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
1435 p
-> kind
, p
-> gc_no
, p
-> words_allocd
,
1436 (p
-> arg1
) ^ 0x80000000, (p
-> arg2
) ^ 0x80000000);
1438 printf("Trace incomplete\n");
1442 # endif /* TRACE_BUF */
1445 * A version of GC_push_all that treats all interior pointers as valid
1446 * and scans the entire region immediately, in case the contents
1449 void GC_push_all_eager(bottom
, top
)
1453 word
* b
= (word
*)(((word
) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1454 word
* t
= (word
*)(((word
) top
) & ~(ALIGNMENT
-1));
1458 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1459 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1460 # define GC_greatest_plausible_heap_addr greatest_ha
1461 # define GC_least_plausible_heap_addr least_ha
1463 if (top
== 0) return;
1464 /* check all pointers in range and push if they appear */
1466 lim
= t
- 1 /* longword */;
1467 for (p
= b
; p
<= lim
; p
= (word
*)(((char *)p
) + ALIGNMENT
)) {
1469 GC_PUSH_ONE_STACK(q
, p
);
1471 # undef GC_greatest_plausible_heap_addr
1472 # undef GC_least_plausible_heap_addr
1477 * A version of GC_push_all that treats all interior pointers as valid
1478 * and scans part of the area immediately, to make sure that saved
1479 * register values are not lost.
1480 * Cold_gc_frame delimits the stack section that must be scanned
1481 * eagerly. A zero value indicates that no eager scanning is needed.
1483 void GC_push_all_stack_partially_eager(bottom
, top
, cold_gc_frame
)
1486 ptr_t cold_gc_frame
;
1488 if (!NEED_FIXUP_POINTER
&& GC_all_interior_pointers
) {
1489 # define EAGER_BYTES 1024
1490 /* Push the hot end of the stack eagerly, so that register values */
1491 /* saved inside GC frames are marked before they disappear. */
1492 /* The rest of the marking can be deferred until later. */
1493 if (0 == cold_gc_frame
) {
1494 GC_push_all_stack(bottom
, top
);
1497 GC_ASSERT(bottom
<= cold_gc_frame
&& cold_gc_frame
<= top
);
1498 # ifdef STACK_GROWS_DOWN
1499 GC_push_all(cold_gc_frame
- sizeof(ptr_t
), top
);
1500 GC_push_all_eager(bottom
, cold_gc_frame
);
1501 # else /* STACK_GROWS_UP */
1502 GC_push_all(bottom
, cold_gc_frame
+ sizeof(ptr_t
));
1503 GC_push_all_eager(cold_gc_frame
, top
);
1504 # endif /* STACK_GROWS_UP */
1506 GC_push_all_eager(bottom
, top
);
1509 GC_add_trace_entry("GC_push_all_stack", bottom
, top
);
1512 #endif /* !THREADS */
1514 void GC_push_all_stack(bottom
, top
)
1518 if (!NEED_FIXUP_POINTER
&& GC_all_interior_pointers
) {
1519 GC_push_all(bottom
, top
);
1521 GC_push_all_eager(bottom
, top
);
1525 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1526 /* Push all objects reachable from marked objects in the given block */
1527 /* of size 1 objects. */
1528 void GC_push_marked1(h
, hhdr
)
1530 register hdr
* hhdr
;
1532 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1537 register word mark_word
;
1538 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1539 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1540 register mse
* mark_stack_top
= GC_mark_stack_top
;
1541 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1542 # define GC_mark_stack_top mark_stack_top
1543 # define GC_mark_stack_limit mark_stack_limit
1544 # define GC_greatest_plausible_heap_addr greatest_ha
1545 # define GC_least_plausible_heap_addr least_ha
1547 p
= (word
*)(h
->hb_body
);
1548 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1550 /* go through all words in block */
1552 mark_word
= *mark_word_addr
++;
1554 while(mark_word
!= 0) {
1555 if (mark_word
& 1) {
1557 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1564 # undef GC_greatest_plausible_heap_addr
1565 # undef GC_least_plausible_heap_addr
1566 # undef GC_mark_stack_top
1567 # undef GC_mark_stack_limit
1568 GC_mark_stack_top
= mark_stack_top
;
1574 /* Push all objects reachable from marked objects in the given block */
1575 /* of size 2 objects. */
1576 void GC_push_marked2(h
, hhdr
)
1578 register hdr
* hhdr
;
1580 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1585 register word mark_word
;
1586 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1587 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1588 register mse
* mark_stack_top
= GC_mark_stack_top
;
1589 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1590 # define GC_mark_stack_top mark_stack_top
1591 # define GC_mark_stack_limit mark_stack_limit
1592 # define GC_greatest_plausible_heap_addr greatest_ha
1593 # define GC_least_plausible_heap_addr least_ha
1595 p
= (word
*)(h
->hb_body
);
1596 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1598 /* go through all words in block */
1600 mark_word
= *mark_word_addr
++;
1602 while(mark_word
!= 0) {
1603 if (mark_word
& 1) {
1605 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1607 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1614 # undef GC_greatest_plausible_heap_addr
1615 # undef GC_least_plausible_heap_addr
1616 # undef GC_mark_stack_top
1617 # undef GC_mark_stack_limit
1618 GC_mark_stack_top
= mark_stack_top
;
1621 /* Push all objects reachable from marked objects in the given block */
1622 /* of size 4 objects. */
1623 /* There is a risk of mark stack overflow here. But we handle that. */
1624 /* And only unmarked objects get pushed, so it's not very likely. */
1625 void GC_push_marked4(h
, hhdr
)
1627 register hdr
* hhdr
;
1629 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1634 register word mark_word
;
1635 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1636 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1637 register mse
* mark_stack_top
= GC_mark_stack_top
;
1638 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1639 # define GC_mark_stack_top mark_stack_top
1640 # define GC_mark_stack_limit mark_stack_limit
1641 # define GC_greatest_plausible_heap_addr greatest_ha
1642 # define GC_least_plausible_heap_addr least_ha
1644 p
= (word
*)(h
->hb_body
);
1645 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1647 /* go through all words in block */
1649 mark_word
= *mark_word_addr
++;
1651 while(mark_word
!= 0) {
1652 if (mark_word
& 1) {
1654 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1656 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 1);
1658 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 2);
1660 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 3);
1667 # undef GC_greatest_plausible_heap_addr
1668 # undef GC_least_plausible_heap_addr
1669 # undef GC_mark_stack_top
1670 # undef GC_mark_stack_limit
1671 GC_mark_stack_top
= mark_stack_top
;
1674 #endif /* UNALIGNED */
1676 #endif /* SMALL_CONFIG */
1678 /* Push all objects reachable from marked objects in the given block */
1679 void GC_push_marked(h
, hhdr
)
1681 register hdr
* hhdr
;
1683 register int sz
= hhdr
-> hb_sz
;
1684 register int descr
= hhdr
-> hb_descr
;
1686 register int word_no
;
1687 register word
* lim
;
1688 register mse
* GC_mark_stack_top_reg
;
1689 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1691 /* Some quick shortcuts: */
1692 if ((0 | GC_DS_LENGTH
) == descr
) return;
1693 if (GC_block_empty(hhdr
)/* nothing marked */) return;
1694 GC_n_rescuing_pages
++;
1695 GC_objects_are_marked
= TRUE
;
1696 if (sz
> MAXOBJSZ
) {
1699 lim
= (word
*)(h
+ 1) - sz
;
1703 # if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1705 GC_push_marked1(h
, hhdr
);
1708 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \
1709 !defined(USE_MARK_BYTES)
1711 GC_push_marked2(h
, hhdr
);
1714 GC_push_marked4(h
, hhdr
);
1718 GC_mark_stack_top_reg
= GC_mark_stack_top
;
1719 for (p
= (word
*)h
, word_no
= 0; p
<= lim
; p
+= sz
, word_no
+= sz
) {
1720 if (mark_bit_from_hdr(hhdr
, word_no
)) {
1721 /* Mark from fields inside the object */
1722 PUSH_OBJ((word
*)p
, hhdr
, GC_mark_stack_top_reg
, mark_stack_limit
);
1724 /* Subtract this object from total, since it was */
1725 /* added in twice. */
1726 GC_composite_in_use
-= sz
;
1730 GC_mark_stack_top
= GC_mark_stack_top_reg
;
1734 #ifndef SMALL_CONFIG
1735 /* Test whether any page in the given block is dirty */
1736 GC_bool
GC_block_was_dirty(h
, hhdr
)
1738 register hdr
* hhdr
;
1740 register int sz
= hhdr
-> hb_sz
;
1742 if (sz
<= MAXOBJSZ
) {
1743 return(GC_page_was_dirty(h
));
1745 register ptr_t p
= (ptr_t
)h
;
1746 sz
= WORDS_TO_BYTES(sz
);
1747 while (p
< (ptr_t
)h
+ sz
) {
1748 if (GC_page_was_dirty((struct hblk
*)p
)) return(TRUE
);
1754 #endif /* SMALL_CONFIG */
1756 /* Similar to GC_push_next_marked, but return address of next block */
1757 struct hblk
* GC_push_next_marked(h
)
1760 register hdr
* hhdr
;
1762 h
= GC_next_used_block(h
);
1763 if (h
== 0) return(0);
1765 GC_push_marked(h
, hhdr
);
1766 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1769 #ifndef SMALL_CONFIG
1770 /* Identical to above, but mark only from dirty pages */
1771 struct hblk
* GC_push_next_marked_dirty(h
)
1774 register hdr
* hhdr
;
1776 if (!GC_dirty_maintained
) { ABORT("dirty bits not set up"); }
1778 h
= GC_next_used_block(h
);
1779 if (h
== 0) return(0);
1781 # ifdef STUBBORN_ALLOC
1782 if (hhdr
-> hb_obj_kind
== STUBBORN
) {
1783 if (GC_page_was_changed(h
) && GC_block_was_dirty(h
, hhdr
)) {
1787 if (GC_block_was_dirty(h
, hhdr
)) break;
1790 if (GC_block_was_dirty(h
, hhdr
)) break;
1792 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1794 GC_push_marked(h
, hhdr
);
1795 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1799 /* Similar to above, but for uncollectable pages. Needed since we */
1800 /* do not clear marks for such pages, even for full collections. */
1801 struct hblk
* GC_push_next_marked_uncollectable(h
)
1804 register hdr
* hhdr
= HDR(h
);
1807 h
= GC_next_used_block(h
);
1808 if (h
== 0) return(0);
1810 if (hhdr
-> hb_obj_kind
== UNCOLLECTABLE
) break;
1811 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1813 GC_push_marked(h
, hhdr
);
1814 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));