3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
22 /* We put this here to minimize the risk of inlining. */
25 void GC_noop(void *p
, ...) {}
30 /* Single argument version, robust against whole program analysis. */
34 static VOLATILE word sink
;
39 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
41 word GC_n_mark_procs
= GC_RESERVED_MARK_PROCS
;
43 /* Initialize GC_obj_kinds properly and standard free lists properly. */
44 /* This must be done statically since they may be accessed before */
45 /* GC_init is called. */
46 /* It's done here, since we need to deal with mark descriptors. */
47 struct obj_kind GC_obj_kinds
[MAXOBJKINDS
] = {
48 /* PTRFREE */ { &GC_aobjfreelist
[0], 0 /* filled in dynamically */,
49 0 | DS_LENGTH
, FALSE
, FALSE
},
50 /* NORMAL */ { &GC_objfreelist
[0], 0,
51 # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
52 (word
)(-ALIGNMENT
) | DS_LENGTH
,
56 TRUE
/* add length to descr */, TRUE
},
58 { &GC_uobjfreelist
[0], 0,
59 0 | DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
60 # ifdef ATOMIC_UNCOLLECTABLE
62 { &GC_auobjfreelist
[0], 0,
63 0 | DS_LENGTH
, FALSE
/* add length to descr */, FALSE
},
65 # ifdef STUBBORN_ALLOC
66 /*STUBBORN*/ { &GC_sobjfreelist
[0], 0,
67 0 | DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
71 # ifdef ATOMIC_UNCOLLECTABLE
72 # ifdef STUBBORN_ALLOC
78 # ifdef STUBBORN_ALLOC
86 # ifndef INITIAL_MARK_STACK_SIZE
87 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89 /* multiple of HBLKSIZE. */
90 /* The incremental collector actually likes a larger */
91 /* size, since it want to push all marked dirty objs */
92 /* before marking anything new. Currently we let it */
93 /* grow dynamically. */
97 * Limits of stack for GC_mark routine.
98 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99 * need to be marked from.
102 word GC_n_rescuing_pages
; /* Number of dirty pages we marked from */
103 /* excludes ptrfree pages, etc. */
107 word GC_mark_stack_size
= 0;
109 mse
* GC_mark_stack_top
;
111 static struct hblk
* scan_ptr
;
113 mark_state_t GC_mark_state
= MS_NONE
;
115 GC_bool GC_mark_stack_too_small
= FALSE
;
117 GC_bool GC_objects_are_marked
= FALSE
; /* Are there collectable marked */
118 /* objects in the heap? */
120 /* Is a collection in progress? Note that this can return true in the */
121 /* nonincremental case, if a collection has been abandoned and the */
122 /* mark state is now MS_INVALID. */
123 GC_bool
GC_collection_in_progress()
125 return(GC_mark_state
!= MS_NONE
);
128 /* clear all mark bits in the header */
129 void GC_clear_hdr_marks(hhdr
)
132 BZERO(hhdr
-> hb_marks
, MARK_BITS_SZ
*sizeof(word
));
135 /* Set all mark bits in the header. Used for uncollectable blocks. */
136 void GC_set_hdr_marks(hhdr
)
141 for (i
= 0; i
< MARK_BITS_SZ
; ++i
) {
142 hhdr
-> hb_marks
[i
] = ONES
;
147 * Clear all mark bits associated with block h.
150 static void clear_marks_for_block(h
, dummy
)
154 register hdr
* hhdr
= HDR(h
);
156 if (IS_UNCOLLECTABLE(hhdr
-> hb_obj_kind
)) return;
157 /* Mark bit for these is cleared only once the object is */
158 /* explicitly deallocated. This either frees the block, or */
159 /* the bit is cleared once the object is on the free list. */
160 GC_clear_hdr_marks(hhdr
);
163 /* Slow but general routines for setting/clearing/asking about mark bits */
164 void GC_set_mark_bit(p
)
167 register struct hblk
*h
= HBLKPTR(p
);
168 register hdr
* hhdr
= HDR(h
);
169 register int word_no
= (word
*)p
- (word
*)h
;
171 set_mark_bit_from_hdr(hhdr
, word_no
);
174 void GC_clear_mark_bit(p
)
177 register struct hblk
*h
= HBLKPTR(p
);
178 register hdr
* hhdr
= HDR(h
);
179 register int word_no
= (word
*)p
- (word
*)h
;
181 clear_mark_bit_from_hdr(hhdr
, word_no
);
184 GC_bool
GC_is_marked(p
)
187 register struct hblk
*h
= HBLKPTR(p
);
188 register hdr
* hhdr
= HDR(h
);
189 register int word_no
= (word
*)p
- (word
*)h
;
191 return(mark_bit_from_hdr(hhdr
, word_no
));
196 * Clear mark bits in all allocated heap blocks. This invalidates
197 * the marker invariant, and sets GC_mark_state to reflect this.
198 * (This implicitly starts marking to reestablish the invariant.)
200 void GC_clear_marks()
202 GC_apply_to_all_blocks(clear_marks_for_block
, (word
)0);
203 GC_objects_are_marked
= FALSE
;
204 GC_mark_state
= MS_INVALID
;
207 /* Counters reflect currently marked objects: reset here */
208 GC_composite_in_use
= 0;
209 GC_atomic_in_use
= 0;
214 /* Initiate a garbage collection. Initiates a full collection if the */
215 /* mark state is invalid. */
217 void GC_initiate_gc()
219 if (GC_dirty_maintained
) GC_read_dirty();
220 # ifdef STUBBORN_ALLOC
225 extern void GC_check_dirty();
227 if (GC_dirty_maintained
) GC_check_dirty();
231 GC_n_rescuing_pages
= 0;
233 if (GC_mark_state
== MS_NONE
) {
234 GC_mark_state
= MS_PUSH_RESCUERS
;
235 } else if (GC_mark_state
!= MS_INVALID
) {
236 ABORT("unexpected state");
237 } /* else this is really a full collection, and mark */
238 /* bits are invalid. */
243 static void alloc_mark_stack();
245 /* Perform a small amount of marking. */
246 /* We try to touch roughly a page of memory. */
247 /* Return TRUE if we just finished a mark phase. */
248 /* Cold_gc_frame is an address inside a GC frame that */
249 /* remains valid until all marking is complete. */
250 /* A zero value indicates that it's OK to miss some */
251 /* register values. */
252 GC_bool
GC_mark_some(cold_gc_frame
)
256 /* Windows 98 appears to asynchronously create and remove writable */
257 /* memory mappings, for reasons we haven't yet understood. Since */
258 /* we look for writable regions to determine the root set, we may */
259 /* try to mark from an address range that disappeared since we */
260 /* started the collection. Thus we have to recover from faults here. */
261 /* This code does not appear to be necessary for Windows 95/NT/2000. */
262 /* Note that this code should never generate an incremental GC write */
266 switch(GC_mark_state
) {
270 case MS_PUSH_RESCUERS
:
271 if (GC_mark_stack_top
272 >= GC_mark_stack
+ GC_mark_stack_size
273 - INITIAL_MARK_STACK_SIZE
/2) {
274 /* Go ahead and mark, even though that might cause us to */
275 /* see more marked dirty objects later on. Avoid this */
277 GC_mark_stack_too_small
= TRUE
;
278 GC_mark_from_mark_stack();
281 scan_ptr
= GC_push_next_marked_dirty(scan_ptr
);
284 GC_printf1("Marked from %lu dirty pages\n",
285 (unsigned long)GC_n_rescuing_pages
);
287 GC_push_roots(FALSE
, cold_gc_frame
);
288 GC_objects_are_marked
= TRUE
;
289 if (GC_mark_state
!= MS_INVALID
) {
290 GC_mark_state
= MS_ROOTS_PUSHED
;
296 case MS_PUSH_UNCOLLECTABLE
:
297 if (GC_mark_stack_top
298 >= GC_mark_stack
+ INITIAL_MARK_STACK_SIZE
/4) {
299 GC_mark_from_mark_stack();
302 scan_ptr
= GC_push_next_marked_uncollectable(scan_ptr
);
304 GC_push_roots(TRUE
, cold_gc_frame
);
305 GC_objects_are_marked
= TRUE
;
306 if (GC_mark_state
!= MS_INVALID
) {
307 GC_mark_state
= MS_ROOTS_PUSHED
;
313 case MS_ROOTS_PUSHED
:
314 if (GC_mark_stack_top
>= GC_mark_stack
) {
315 GC_mark_from_mark_stack();
318 GC_mark_state
= MS_NONE
;
319 if (GC_mark_stack_too_small
) {
320 alloc_mark_stack(2*GC_mark_stack_size
);
326 case MS_PARTIALLY_INVALID
:
327 if (!GC_objects_are_marked
) {
328 GC_mark_state
= MS_PUSH_UNCOLLECTABLE
;
331 if (GC_mark_stack_top
>= GC_mark_stack
) {
332 GC_mark_from_mark_stack();
335 if (scan_ptr
== 0 && GC_mark_state
== MS_INVALID
) {
336 /* About to start a heap scan for marked objects. */
337 /* Mark stack is empty. OK to reallocate. */
338 if (GC_mark_stack_too_small
) {
339 alloc_mark_stack(2*GC_mark_stack_size
);
341 GC_mark_state
= MS_PARTIALLY_INVALID
;
343 scan_ptr
= GC_push_next_marked(scan_ptr
);
344 if (scan_ptr
== 0 && GC_mark_state
== MS_PARTIALLY_INVALID
) {
345 GC_push_roots(TRUE
, cold_gc_frame
);
346 GC_objects_are_marked
= TRUE
;
347 if (GC_mark_state
!= MS_INVALID
) {
348 GC_mark_state
= MS_ROOTS_PUSHED
;
353 ABORT("GC_mark_some: bad state");
357 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION
?
358 EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH
) {
360 GC_printf0("Caught ACCESS_VIOLATION in marker. "
361 "Memory mapping disappeared.\n");
362 # endif /* PRINTSTATS */
363 /* We have bad roots on the stack. Discard mark stack. */
364 /* Rescan from marked objects. Redetermine roots. */
365 GC_invalidate_mark_state();
373 GC_bool
GC_mark_stack_empty()
375 return(GC_mark_stack_top
< GC_mark_stack
);
379 word GC_prof_array
[10];
380 # define PROF(n) GC_prof_array[n]++
385 /* Given a pointer to someplace other than a small object page or the */
386 /* first page of a large object, return a pointer either to the */
387 /* start of the large object or NIL. */
388 /* In the latter case black list the address current. */
389 /* Returns NIL without black listing if current points to a block */
390 /* with IGNORE_OFF_PAGE set. */
392 # ifdef PRINT_BLACK_LIST
393 ptr_t
GC_find_start(current
, hhdr
, source
)
396 ptr_t
GC_find_start(current
, hhdr
)
399 register ptr_t current
;
402 # ifdef ALL_INTERIOR_POINTERS
404 register ptr_t orig
= current
;
406 current
= (ptr_t
)HBLKPTR(current
) + HDR_BYTES
;
408 current
= current
- HBLKSIZE
*(word
)hhdr
;
410 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr
));
411 /* current points to the start of the large object */
412 if (hhdr
-> hb_flags
& IGNORE_OFF_PAGE
) return(0);
413 if ((word
*)orig
- (word
*)current
414 >= (ptrdiff_t)(hhdr
->hb_sz
)) {
415 /* Pointer past the end of the block */
416 GC_ADD_TO_BLACK_LIST_NORMAL(orig
, source
);
421 GC_ADD_TO_BLACK_LIST_NORMAL(current
, source
);
425 GC_ADD_TO_BLACK_LIST_NORMAL(current
, source
);
431 void GC_invalidate_mark_state()
433 GC_mark_state
= MS_INVALID
;
434 GC_mark_stack_top
= GC_mark_stack
-1;
437 mse
* GC_signal_mark_stack_overflow(msp
)
440 GC_mark_state
= MS_INVALID
;
441 GC_mark_stack_too_small
= TRUE
;
443 GC_printf1("Mark stack overflow; current size = %lu entries\n",
446 return(msp
-INITIAL_MARK_STACK_SIZE
/8);
451 * Mark objects pointed to by the regions described by
452 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
453 * inclusive. Assumes the upper limit of a mark stack entry
454 * is never 0. A mark stack entry never has size 0.
455 * We try to traverse on the order of a hblk of memory before we return.
456 * Caller is responsible for calling this until the mark stack is empty.
457 * Note that this is the most performance critical routine in the
458 * collector. Hence it contains all sorts of ugly hacks to speed
459 * things up. In particular, we avoid procedure calls on the common
460 * path, we take advantage of peculiarities of the mark descriptor
461 * encoding, we optionally maintain a cache for the block address to
462 * header mapping, we prefetch when an object is "grayed", etc.
464 void GC_mark_from_mark_stack()
466 mse
* GC_mark_stack_reg
= GC_mark_stack
;
467 mse
* GC_mark_stack_top_reg
= GC_mark_stack_top
;
468 mse
* mark_stack_limit
= &(GC_mark_stack
[GC_mark_stack_size
]);
469 int credit
= HBLKSIZE
; /* Remaining credit for marking work */
470 register word
* current_p
; /* Pointer to current candidate ptr. */
471 register word current
; /* Candidate pointer. */
472 register word
* limit
; /* (Incl) limit of current candidate */
475 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
476 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
479 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
481 GC_objects_are_marked
= TRUE
;
483 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
484 while (GC_mark_stack_top_reg
>= GC_mark_stack_reg
&& credit
>= 0) {
486 while ((((ptr_t
)GC_mark_stack_top_reg
- (ptr_t
)GC_mark_stack_reg
) | credit
)
489 current_p
= GC_mark_stack_top_reg
-> mse_start
;
490 descr
= GC_mark_stack_top_reg
-> mse_descr
;
492 /* current_p and descr describe the current object. */
493 /* *GC_mark_stack_top_reg is vacant. */
494 /* The following is 0 only for small objects described by a simple */
495 /* length descriptor. For many applications this is the common */
496 /* case, so we try to detect it quickly. */
497 if (descr
& ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS
) - 1)) | DS_TAGS
)) {
498 word tag
= descr
& DS_TAGS
;
503 /* Process part of the range to avoid pushing too much on the */
505 GC_mark_stack_top_reg
-> mse_start
=
506 limit
= current_p
+ SPLIT_RANGE_WORDS
-1;
507 GC_mark_stack_top_reg
-> mse_descr
=
508 descr
- WORDS_TO_BYTES(SPLIT_RANGE_WORDS
-1);
509 /* Make sure that pointers overlapping the two ranges are */
511 limit
= (word
*)((char *)limit
+ sizeof(word
) - ALIGNMENT
);
514 GC_mark_stack_top_reg
--;
516 credit
-= WORDS_TO_BYTES(WORDSZ
/2); /* guess */
518 if ((signed_word
)descr
< 0) {
519 current
= *current_p
;
520 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
522 HC_PUSH_CONTENTS((ptr_t
)current
, GC_mark_stack_top_reg
,
523 mark_stack_limit
, current_p
, exit1
);
531 GC_mark_stack_top_reg
--;
532 credit
-= PROC_BYTES
;
533 GC_mark_stack_top_reg
=
535 (current_p
, GC_mark_stack_top_reg
,
536 mark_stack_limit
, ENV(descr
));
539 if ((signed_word
)descr
>= 0) {
540 /* Descriptor is in the object. */
541 descr
= *(word
*)((ptr_t
)current_p
+ descr
- DS_PER_OBJECT
);
543 /* Descriptor is in type descriptor pointed to by first */
544 /* word in object. */
545 ptr_t type_descr
= *(ptr_t
*)current_p
;
546 /* type_descr is either a valid pointer to the descriptor */
547 /* structure, or this object was on a free list. If it */
548 /* it was anything but the last object on the free list, */
549 /* we will misinterpret the next object on the free list as */
550 /* the type descriptor, and get a 0 GC descriptor, which */
551 /* is ideal. Unfortunately, we need to check for the last */
552 /* object case explicitly. */
553 if (0 == type_descr
) {
554 /* Rarely executed. */
555 GC_mark_stack_top_reg
--;
558 descr
= *(word
*)(type_descr
559 - (descr
- (DS_PER_OBJECT
- INDIR_PER_OBJ_BIAS
)));
563 } else /* Small object with length descriptor */ {
564 GC_mark_stack_top_reg
--;
565 limit
= (word
*)(((ptr_t
)current_p
) + (word
)descr
);
567 /* The simple case in which we're scanning a range. */
568 credit
-= (ptr_t
)limit
- (ptr_t
)current_p
;
573 # ifndef SMALL_CONFIG
576 /* Try to prefetch the next pointer to be examined asap. */
577 /* Empirically, this also seems to help slightly without */
578 /* prefetches, at least on linux/X86. Presumably this loop */
579 /* ends up with less register pressure, and gcc thus ends up */
580 /* generating slightly better code. Overall gcc code quality */
581 /* for this loop is still not great. */
583 PREFETCH((ptr_t
)limit
- PREF_DIST
*CACHE_LINE_SIZE
);
585 limit
= (word
*)((char *)limit
- ALIGNMENT
);
586 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
590 if (current_p
> limit
) goto next_object
;
591 /* Unroll once, so we don't do too many of the prefetches */
592 /* based on limit. */
594 limit
= (word
*)((char *)limit
- ALIGNMENT
);
595 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
599 if (current_p
> limit
) goto next_object
;
603 while (current_p
<= limit
) {
604 /* Empirically, unrolling this loop doesn't help a lot. */
605 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
607 current
= *current_p
;
608 PREFETCH((ptr_t
)current_p
+ PREF_DIST
*CACHE_LINE_SIZE
);
609 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
610 /* Prefetch the contents of the object we just pushed. It's */
611 /* likely we will need them soon. */
613 HC_PUSH_CONTENTS((ptr_t
)current
, GC_mark_stack_top_reg
,
614 mark_stack_limit
, current_p
, exit2
);
616 current_p
= (word
*)((char *)current_p
+ ALIGNMENT
);
619 # ifndef SMALL_CONFIG
620 /* We still need to mark the entry we previously prefetched. */
621 /* We alrady know that it passes the preliminary pointer */
623 HC_PUSH_CONTENTS((ptr_t
)deferred
, GC_mark_stack_top_reg
,
624 mark_stack_limit
, current_p
, exit4
);
629 GC_mark_stack_top
= GC_mark_stack_top_reg
;
632 /* Allocate or reallocate space for mark stack of size s words */
633 /* May silently fail. */
634 static void alloc_mark_stack(n
)
637 mse
* new_stack
= (mse
*)GC_scratch_alloc(n
* sizeof(struct ms_entry
));
639 GC_mark_stack_too_small
= FALSE
;
640 if (GC_mark_stack_size
!= 0) {
641 if (new_stack
!= 0) {
642 word displ
= (word
)GC_mark_stack
& (GC_page_size
- 1);
643 signed_word size
= GC_mark_stack_size
* sizeof(struct ms_entry
);
645 /* Recycle old space */
646 if (0 != displ
) displ
= GC_page_size
- displ
;
647 size
= (size
- displ
) & ~(GC_page_size
- 1);
649 GC_add_to_heap((struct hblk
*)
650 ((word
)GC_mark_stack
+ displ
), (word
)size
);
652 GC_mark_stack
= new_stack
;
653 GC_mark_stack_size
= n
;
655 GC_printf1("Grew mark stack to %lu frames\n",
656 (unsigned long) GC_mark_stack_size
);
660 GC_printf1("Failed to grow mark stack to %lu frames\n",
665 if (new_stack
== 0) {
666 GC_err_printf0("No space for mark stack\n");
669 GC_mark_stack
= new_stack
;
670 GC_mark_stack_size
= n
;
672 GC_mark_stack_top
= GC_mark_stack
-1;
677 alloc_mark_stack(INITIAL_MARK_STACK_SIZE
);
681 * Push all locations between b and t onto the mark stack.
682 * b is the first location to be checked. t is one past the last
683 * location to be checked.
684 * Should only be used if there is no possibility of mark stack
687 void GC_push_all(bottom
, top
)
691 register word length
;
693 bottom
= (ptr_t
)(((word
) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
694 top
= (ptr_t
)(((word
) top
) & ~(ALIGNMENT
-1));
695 if (top
== 0 || bottom
== top
) return;
697 if (GC_mark_stack_top
>= GC_mark_stack
+ GC_mark_stack_size
) {
698 ABORT("unexpected mark stack overflow");
700 length
= top
- bottom
;
701 # if DS_TAGS > ALIGNMENT - 1
705 GC_mark_stack_top
-> mse_start
= (word
*)bottom
;
706 GC_mark_stack_top
-> mse_descr
= length
;
710 * Analogous to the above, but push only those pages that may have been
711 * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
712 * We use push_fn to actually push the block.
713 * Will not overflow mark stack if push_fn pushes a small fixed number
714 * of entries. (This is invoked only if push_fn pushes a single entry,
715 * or if it marks each object before pushing it, thus ensuring progress
716 * in the event of a stack overflow.)
718 void GC_push_dirty(bottom
, top
, dirty_fn
, push_fn
)
721 int (*dirty_fn
)(/* struct hblk * h */);
722 void (*push_fn
)(/* ptr_t bottom, ptr_t top */);
724 register struct hblk
* h
;
726 bottom
= (ptr_t
)(((long) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
727 top
= (ptr_t
)(((long) top
) & ~(ALIGNMENT
-1));
729 if (top
== 0 || bottom
== top
) return;
730 h
= HBLKPTR(bottom
+ HBLKSIZE
);
731 if (top
<= (ptr_t
) h
) {
732 if ((*dirty_fn
)(h
-1)) {
733 (*push_fn
)(bottom
, top
);
737 if ((*dirty_fn
)(h
-1)) {
738 (*push_fn
)(bottom
, (ptr_t
)h
);
740 while ((ptr_t
)(h
+1) <= top
) {
741 if ((*dirty_fn
)(h
)) {
742 if ((word
)(GC_mark_stack_top
- GC_mark_stack
)
743 > 3 * GC_mark_stack_size
/ 4) {
744 /* Danger of mark stack overflow */
745 (*push_fn
)((ptr_t
)h
, top
);
748 (*push_fn
)((ptr_t
)h
, (ptr_t
)(h
+1));
753 if ((ptr_t
)h
!= top
) {
754 if ((*dirty_fn
)(h
)) {
755 (*push_fn
)((ptr_t
)h
, top
);
758 if (GC_mark_stack_top
>= GC_mark_stack
+ GC_mark_stack_size
) {
759 ABORT("unexpected mark stack overflow");
763 # ifndef SMALL_CONFIG
764 void GC_push_conditional(bottom
, top
, all
)
770 if (GC_dirty_maintained
) {
772 /* Pages that were never dirtied cannot contain pointers */
773 GC_push_dirty(bottom
, top
, GC_page_was_ever_dirty
, GC_push_all
);
775 GC_push_all(bottom
, top
);
778 GC_push_all(bottom
, top
);
781 GC_push_dirty(bottom
, top
, GC_page_was_dirty
, GC_push_all
);
787 void __cdecl
GC_push_one(p
)
794 if (0 != GC_push_proc
) {
799 GC_PUSH_ONE_STACK(p
, MARKED_FROM_REGISTER
);
803 # define BASE(p) (word)GC_base((void *)(p))
805 # define BASE(p) (word)GC_base((char *)(p))
808 /* As above, but argument passed preliminary test. */
809 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
810 void GC_push_one_checked(p
, interior_ptrs
, source
)
813 void GC_push_one_checked(p
, interior_ptrs
)
817 register GC_bool interior_ptrs
;
824 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
825 if (hhdr
!= 0 && interior_ptrs
) {
828 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
833 register map_entry_type map_entry
;
835 displ
= HBLKDISPL(p
);
836 map_entry
= MAP_ENTRY((hhdr
-> hb_map
), displ
);
837 if (map_entry
== OBJ_INVALID
) {
838 # ifndef ALL_INTERIOR_POINTERS
841 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
842 if (r
== 0) hhdr
= 0;
847 /* map already reflects interior pointers */
851 displ
= BYTES_TO_WORDS(displ
);
853 r
= (word
)((word
*)(HBLKPTR(p
)) + displ
);
856 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
857 /* displ is the word index within the block. */
860 # ifdef PRINT_BLACK_LIST
861 GC_add_to_black_list_stack(p
, source
);
863 GC_add_to_black_list_stack(p
);
866 GC_ADD_TO_BLACK_LIST_NORMAL(p
, source
);
867 # undef source /* In case we had to define it. */
870 if (!mark_bit_from_hdr(hhdr
, displ
)) {
871 set_mark_bit_from_hdr(hhdr
, displ
);
872 GC_STORE_BACK_PTR(source
, (ptr_t
)r
);
873 PUSH_OBJ((word
*)r
, hhdr
, GC_mark_stack_top
,
874 &(GC_mark_stack
[GC_mark_stack_size
]));
881 # define TRACE_ENTRIES 1000
889 } GC_trace_buf
[TRACE_ENTRIES
];
891 int GC_trace_buf_ptr
= 0;
893 void GC_add_trace_entry(char *kind
, word arg1
, word arg2
)
895 GC_trace_buf
[GC_trace_buf_ptr
].kind
= kind
;
896 GC_trace_buf
[GC_trace_buf_ptr
].gc_no
= GC_gc_no
;
897 GC_trace_buf
[GC_trace_buf_ptr
].words_allocd
= GC_words_allocd
;
898 GC_trace_buf
[GC_trace_buf_ptr
].arg1
= arg1
^ 0x80000000;
899 GC_trace_buf
[GC_trace_buf_ptr
].arg2
= arg2
^ 0x80000000;
901 if (GC_trace_buf_ptr
>= TRACE_ENTRIES
) GC_trace_buf_ptr
= 0;
904 void GC_print_trace(word gc_no
, GC_bool lock
)
907 struct trace_entry
*p
;
910 for (i
= GC_trace_buf_ptr
-1; i
!= GC_trace_buf_ptr
; i
--) {
911 if (i
< 0) i
= TRACE_ENTRIES
-1;
912 p
= GC_trace_buf
+ i
;
913 if (p
-> gc_no
< gc_no
|| p
-> kind
== 0) return;
914 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
915 p
-> kind
, p
-> gc_no
, p
-> words_allocd
,
916 (p
-> arg1
) ^ 0x80000000, (p
-> arg2
) ^ 0x80000000);
918 printf("Trace incomplete\n");
922 # endif /* TRACE_BUF */
925 * A version of GC_push_all that treats all interior pointers as valid
926 * and scans the entire region immediately, in case the contents
929 void GC_push_all_eager(bottom
, top
)
933 word
* b
= (word
*)(((long) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
934 word
* t
= (word
*)(((long) top
) & ~(ALIGNMENT
-1));
938 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
939 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
940 # define GC_greatest_plausible_heap_addr greatest_ha
941 # define GC_least_plausible_heap_addr least_ha
943 if (top
== 0) return;
944 /* check all pointers in range and put in push if they appear */
946 lim
= t
- 1 /* longword */;
947 for (p
= b
; p
<= lim
; p
= (word
*)(((char *)p
) + ALIGNMENT
)) {
949 GC_PUSH_ONE_STACK(q
, p
);
951 # undef GC_greatest_plausible_heap_addr
952 # undef GC_least_plausible_heap_addr
957 * A version of GC_push_all that treats all interior pointers as valid
958 * and scans part of the area immediately, to make sure that saved
959 * register values are not lost.
960 * Cold_gc_frame delimits the stack section that must be scanned
961 * eagerly. A zero value indicates that no eager scanning is needed.
963 void GC_push_all_stack_partially_eager(bottom
, top
, cold_gc_frame
)
968 # ifdef ALL_INTERIOR_POINTERS
969 # define EAGER_BYTES 1024
970 /* Push the hot end of the stack eagerly, so that register values */
971 /* saved inside GC frames are marked before they disappear. */
972 /* The rest of the marking can be deferred until later. */
973 if (0 == cold_gc_frame
) {
974 GC_push_all_stack(bottom
, top
);
977 # ifdef STACK_GROWS_DOWN
978 GC_push_all_eager(bottom
, cold_gc_frame
);
979 GC_push_all(cold_gc_frame
- sizeof(ptr_t
), top
);
980 # else /* STACK_GROWS_UP */
981 GC_push_all_eager(cold_gc_frame
, top
);
982 GC_push_all(bottom
, cold_gc_frame
+ sizeof(ptr_t
));
983 # endif /* STACK_GROWS_UP */
985 GC_push_all_eager(bottom
, top
);
988 GC_add_trace_entry("GC_push_all_stack", bottom
, top
);
991 #endif /* !THREADS */
993 void GC_push_all_stack(bottom
, top
)
997 # ifdef ALL_INTERIOR_POINTERS
998 GC_push_all(bottom
, top
);
1000 GC_push_all_eager(bottom
, top
);
1004 #ifndef SMALL_CONFIG
1005 /* Push all objects reachable from marked objects in the given block */
1006 /* of size 1 objects. */
1007 void GC_push_marked1(h
, hhdr
)
1009 register hdr
* hhdr
;
1011 word
* mark_word_addr
= &(hhdr
->hb_marks
[divWORDSZ(HDR_WORDS
)]);
1016 register word mark_word
;
1017 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1018 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1019 # define GC_greatest_plausible_heap_addr greatest_ha
1020 # define GC_least_plausible_heap_addr least_ha
1022 p
= (word
*)(h
->hb_body
);
1023 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1025 /* go through all words in block */
1027 mark_word
= *mark_word_addr
++;
1029 while(mark_word
!= 0) {
1030 if (mark_word
& 1) {
1032 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1039 # undef GC_greatest_plausible_heap_addr
1040 # undef GC_least_plausible_heap_addr
1046 /* Push all objects reachable from marked objects in the given block */
1047 /* of size 2 objects. */
1048 void GC_push_marked2(h
, hhdr
)
1050 register hdr
* hhdr
;
1052 word
* mark_word_addr
= &(hhdr
->hb_marks
[divWORDSZ(HDR_WORDS
)]);
1057 register word mark_word
;
1058 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1059 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1060 # define GC_greatest_plausible_heap_addr greatest_ha
1061 # define GC_least_plausible_heap_addr least_ha
1063 p
= (word
*)(h
->hb_body
);
1064 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1066 /* go through all words in block */
1068 mark_word
= *mark_word_addr
++;
1070 while(mark_word
!= 0) {
1071 if (mark_word
& 1) {
1073 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1075 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1082 # undef GC_greatest_plausible_heap_addr
1083 # undef GC_least_plausible_heap_addr
1086 /* Push all objects reachable from marked objects in the given block */
1087 /* of size 4 objects. */
1088 /* There is a risk of mark stack overflow here. But we handle that. */
1089 /* And only unmarked objects get pushed, so it's not very likely. */
1090 void GC_push_marked4(h
, hhdr
)
1092 register hdr
* hhdr
;
1094 word
* mark_word_addr
= &(hhdr
->hb_marks
[divWORDSZ(HDR_WORDS
)]);
1099 register word mark_word
;
1100 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1101 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1102 # define GC_greatest_plausible_heap_addr greatest_ha
1103 # define GC_least_plausible_heap_addr least_ha
1105 p
= (word
*)(h
->hb_body
);
1106 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1108 /* go through all words in block */
1110 mark_word
= *mark_word_addr
++;
1112 while(mark_word
!= 0) {
1113 if (mark_word
& 1) {
1115 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1117 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 1);
1119 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 2);
1121 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 3);
1128 # undef GC_greatest_plausible_heap_addr
1129 # undef GC_least_plausible_heap_addr
1132 #endif /* UNALIGNED */
1134 #endif /* SMALL_CONFIG */
1136 /* Push all objects reachable from marked objects in the given block */
1137 void GC_push_marked(h
, hhdr
)
1139 register hdr
* hhdr
;
1141 register int sz
= hhdr
-> hb_sz
;
1142 register int descr
= hhdr
-> hb_descr
;
1144 register int word_no
;
1145 register word
* lim
;
1146 register mse
* GC_mark_stack_top_reg
;
1147 register mse
* mark_stack_limit
= &(GC_mark_stack
[GC_mark_stack_size
]);
1149 /* Some quick shortcuts: */
1150 if ((0 | DS_LENGTH
) == descr
) return;
1151 if (GC_block_empty(hhdr
)/* nothing marked */) return;
1153 GC_n_rescuing_pages
++;
1155 GC_objects_are_marked
= TRUE
;
1156 if (sz
> MAXOBJSZ
) {
1157 lim
= (word
*)h
+ HDR_WORDS
;
1159 lim
= (word
*)(h
+ 1) - sz
;
1163 # if !defined(SMALL_CONFIG)
1165 GC_push_marked1(h
, hhdr
);
1168 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
1170 GC_push_marked2(h
, hhdr
);
1173 GC_push_marked4(h
, hhdr
);
1177 GC_mark_stack_top_reg
= GC_mark_stack_top
;
1178 for (p
= (word
*)h
+ HDR_WORDS
, word_no
= HDR_WORDS
; p
<= lim
;
1179 p
+= sz
, word_no
+= sz
) {
1180 if (mark_bit_from_hdr(hhdr
, word_no
)) {
1181 /* Mark from fields inside the object */
1182 PUSH_OBJ((word
*)p
, hhdr
, GC_mark_stack_top_reg
, mark_stack_limit
);
1184 /* Subtract this object from total, since it was */
1185 /* added in twice. */
1186 GC_composite_in_use
-= sz
;
1190 GC_mark_stack_top
= GC_mark_stack_top_reg
;
1194 #ifndef SMALL_CONFIG
1195 /* Test whether any page in the given block is dirty */
1196 GC_bool
GC_block_was_dirty(h
, hhdr
)
1198 register hdr
* hhdr
;
1200 register int sz
= hhdr
-> hb_sz
;
1202 if (sz
< MAXOBJSZ
) {
1203 return(GC_page_was_dirty(h
));
1205 register ptr_t p
= (ptr_t
)h
;
1207 sz
= WORDS_TO_BYTES(sz
);
1208 while (p
< (ptr_t
)h
+ sz
) {
1209 if (GC_page_was_dirty((struct hblk
*)p
)) return(TRUE
);
1215 #endif /* SMALL_CONFIG */
1217 /* Similar to GC_push_next_marked, but return address of next block */
1218 struct hblk
* GC_push_next_marked(h
)
1221 register hdr
* hhdr
;
1223 h
= GC_next_used_block(h
);
1224 if (h
== 0) return(0);
1226 GC_push_marked(h
, hhdr
);
1227 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1230 #ifndef SMALL_CONFIG
1231 /* Identical to above, but mark only from dirty pages */
1232 struct hblk
* GC_push_next_marked_dirty(h
)
1235 register hdr
* hhdr
;
1237 if (!GC_dirty_maintained
) { ABORT("dirty bits not set up"); }
1239 h
= GC_next_used_block(h
);
1240 if (h
== 0) return(0);
1242 # ifdef STUBBORN_ALLOC
1243 if (hhdr
-> hb_obj_kind
== STUBBORN
) {
1244 if (GC_page_was_changed(h
) && GC_block_was_dirty(h
, hhdr
)) {
1248 if (GC_block_was_dirty(h
, hhdr
)) break;
1251 if (GC_block_was_dirty(h
, hhdr
)) break;
1253 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1255 GC_push_marked(h
, hhdr
);
1256 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1260 /* Similar to above, but for uncollectable pages. Needed since we */
1261 /* do not clear marks for such pages, even for full collections. */
1262 struct hblk
* GC_push_next_marked_uncollectable(h
)
1265 register hdr
* hhdr
= HDR(h
);
1268 h
= GC_next_used_block(h
);
1269 if (h
== 0) return(0);
1271 if (hhdr
-> hb_obj_kind
== UNCOLLECTABLE
) break;
1272 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1274 GC_push_marked(h
, hhdr
);
1275 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));