3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
20 # include "private/gc_pmark.h"
22 /* We put this here to minimize the risk of inlining. */
25 void GC_noop(void *p
, ...) {}
30 /* Single argument version, robust against whole program analysis. */
34 static VOLATILE word sink
;
39 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
41 word GC_n_mark_procs
= GC_RESERVED_MARK_PROCS
;
43 /* Initialize GC_obj_kinds properly and standard free lists properly. */
44 /* This must be done statically since they may be accessed before */
45 /* GC_init is called. */
46 /* It's done here, since we need to deal with mark descriptors. */
47 struct obj_kind GC_obj_kinds
[MAXOBJKINDS
] = {
48 /* PTRFREE */ { &GC_aobjfreelist
[0], 0 /* filled in dynamically */,
49 0 | GC_DS_LENGTH
, FALSE
, FALSE
},
50 /* NORMAL */ { &GC_objfreelist
[0], 0,
51 0 | GC_DS_LENGTH
, /* Adjusted in GC_init_inner for EXTRA_BYTES */
52 TRUE
/* add length to descr */, TRUE
},
54 { &GC_uobjfreelist
[0], 0,
55 0 | GC_DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
56 # ifdef ATOMIC_UNCOLLECTABLE
58 { &GC_auobjfreelist
[0], 0,
59 0 | GC_DS_LENGTH
, FALSE
/* add length to descr */, FALSE
},
61 # ifdef STUBBORN_ALLOC
62 /*STUBBORN*/ { &GC_sobjfreelist
[0], 0,
63 0 | GC_DS_LENGTH
, TRUE
/* add length to descr */, TRUE
},
67 # ifdef ATOMIC_UNCOLLECTABLE
68 # ifdef STUBBORN_ALLOC
74 # ifdef STUBBORN_ALLOC
82 # ifndef INITIAL_MARK_STACK_SIZE
83 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
84 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
85 /* multiple of HBLKSIZE. */
86 /* The incremental collector actually likes a larger */
87 /* size, since it want to push all marked dirty objs */
88 /* before marking anything new. Currently we let it */
89 /* grow dynamically. */
93 * Limits of stack for GC_mark routine.
94 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
95 * need to be marked from.
98 word GC_n_rescuing_pages
; /* Number of dirty pages we marked from */
99 /* excludes ptrfree pages, etc. */
103 mse
* GC_mark_stack_limit
;
105 word GC_mark_stack_size
= 0;
108 mse
* VOLATILE GC_mark_stack_top
;
110 mse
* GC_mark_stack_top
;
113 static struct hblk
* scan_ptr
;
115 mark_state_t GC_mark_state
= MS_NONE
;
117 GC_bool GC_mark_stack_too_small
= FALSE
;
119 GC_bool GC_objects_are_marked
= FALSE
; /* Are there collectable marked */
120 /* objects in the heap? */
122 /* Is a collection in progress? Note that this can return true in the */
123 /* nonincremental case, if a collection has been abandoned and the */
124 /* mark state is now MS_INVALID. */
125 GC_bool
GC_collection_in_progress()
127 return(GC_mark_state
!= MS_NONE
);
130 /* clear all mark bits in the header */
131 void GC_clear_hdr_marks(hhdr
)
134 # ifdef USE_MARK_BYTES
135 BZERO(hhdr
-> hb_marks
, MARK_BITS_SZ
);
137 BZERO(hhdr
-> hb_marks
, MARK_BITS_SZ
*sizeof(word
));
141 /* Set all mark bits in the header. Used for uncollectable blocks. */
142 void GC_set_hdr_marks(hhdr
)
147 for (i
= 0; i
< MARK_BITS_SZ
; ++i
) {
148 # ifdef USE_MARK_BYTES
149 hhdr
-> hb_marks
[i
] = 1;
151 hhdr
-> hb_marks
[i
] = ONES
;
157 * Clear all mark bits associated with block h.
160 # if defined(__STDC__) || defined(__cplusplus)
161 static void clear_marks_for_block(struct hblk
*h
, word dummy
)
163 static void clear_marks_for_block(h
, dummy
)
168 register hdr
* hhdr
= HDR(h
);
170 if (IS_UNCOLLECTABLE(hhdr
-> hb_obj_kind
)) return;
171 /* Mark bit for these is cleared only once the object is */
172 /* explicitly deallocated. This either frees the block, or */
173 /* the bit is cleared once the object is on the free list. */
174 GC_clear_hdr_marks(hhdr
);
177 /* Slow but general routines for setting/clearing/asking about mark bits */
178 void GC_set_mark_bit(p
)
181 register struct hblk
*h
= HBLKPTR(p
);
182 register hdr
* hhdr
= HDR(h
);
183 register int word_no
= (word
*)p
- (word
*)h
;
185 set_mark_bit_from_hdr(hhdr
, word_no
);
188 void GC_clear_mark_bit(p
)
191 register struct hblk
*h
= HBLKPTR(p
);
192 register hdr
* hhdr
= HDR(h
);
193 register int word_no
= (word
*)p
- (word
*)h
;
195 clear_mark_bit_from_hdr(hhdr
, word_no
);
198 GC_bool
GC_is_marked(p
)
201 register struct hblk
*h
= HBLKPTR(p
);
202 register hdr
* hhdr
= HDR(h
);
203 register int word_no
= (word
*)p
- (word
*)h
;
205 return(mark_bit_from_hdr(hhdr
, word_no
));
210 * Clear mark bits in all allocated heap blocks. This invalidates
211 * the marker invariant, and sets GC_mark_state to reflect this.
212 * (This implicitly starts marking to reestablish the invariant.)
214 void GC_clear_marks()
216 GC_apply_to_all_blocks(clear_marks_for_block
, (word
)0);
217 GC_objects_are_marked
= FALSE
;
218 GC_mark_state
= MS_INVALID
;
221 /* Counters reflect currently marked objects: reset here */
222 GC_composite_in_use
= 0;
223 GC_atomic_in_use
= 0;
228 /* Initiate a garbage collection. Initiates a full collection if the */
229 /* mark state is invalid. */
231 void GC_initiate_gc()
233 if (GC_dirty_maintained
) GC_read_dirty();
234 # ifdef STUBBORN_ALLOC
239 extern void GC_check_dirty();
241 if (GC_dirty_maintained
) GC_check_dirty();
244 GC_n_rescuing_pages
= 0;
245 if (GC_mark_state
== MS_NONE
) {
246 GC_mark_state
= MS_PUSH_RESCUERS
;
247 } else if (GC_mark_state
!= MS_INVALID
) {
248 ABORT("unexpected state");
249 } /* else this is really a full collection, and mark */
250 /* bits are invalid. */
255 static void alloc_mark_stack();
257 /* Perform a small amount of marking. */
258 /* We try to touch roughly a page of memory. */
259 /* Return TRUE if we just finished a mark phase. */
260 /* Cold_gc_frame is an address inside a GC frame that */
261 /* remains valid until all marking is complete. */
262 /* A zero value indicates that it's OK to miss some */
263 /* register values. */
264 GC_bool
GC_mark_some(cold_gc_frame
)
267 #if defined(MSWIN32) && !defined(__GNUC__)
268 /* Windows 98 appears to asynchronously create and remove writable */
269 /* memory mappings, for reasons we haven't yet understood. Since */
270 /* we look for writable regions to determine the root set, we may */
271 /* try to mark from an address range that disappeared since we */
272 /* started the collection. Thus we have to recover from faults here. */
273 /* This code does not appear to be necessary for Windows 95/NT/2000. */
274 /* Note that this code should never generate an incremental GC write */
277 #endif /* defined(MSWIN32) && !defined(__GNUC__) */
278 switch(GC_mark_state
) {
282 case MS_PUSH_RESCUERS
:
283 if (GC_mark_stack_top
284 >= GC_mark_stack_limit
- INITIAL_MARK_STACK_SIZE
/2) {
285 /* Go ahead and mark, even though that might cause us to */
286 /* see more marked dirty objects later on. Avoid this */
288 GC_mark_stack_too_small
= TRUE
;
289 MARK_FROM_MARK_STACK();
292 scan_ptr
= GC_push_next_marked_dirty(scan_ptr
);
295 if (GC_print_stats
) {
296 GC_printf1("Marked from %lu dirty pages\n",
297 (unsigned long)GC_n_rescuing_pages
);
300 GC_push_roots(FALSE
, cold_gc_frame
);
301 GC_objects_are_marked
= TRUE
;
302 if (GC_mark_state
!= MS_INVALID
) {
303 GC_mark_state
= MS_ROOTS_PUSHED
;
309 case MS_PUSH_UNCOLLECTABLE
:
310 if (GC_mark_stack_top
311 >= GC_mark_stack
+ GC_mark_stack_size
/4) {
312 # ifdef PARALLEL_MARK
313 /* Avoid this, since we don't parallelize the marker */
315 if (GC_parallel
) GC_mark_stack_too_small
= TRUE
;
317 MARK_FROM_MARK_STACK();
320 scan_ptr
= GC_push_next_marked_uncollectable(scan_ptr
);
322 GC_push_roots(TRUE
, cold_gc_frame
);
323 GC_objects_are_marked
= TRUE
;
324 if (GC_mark_state
!= MS_INVALID
) {
325 GC_mark_state
= MS_ROOTS_PUSHED
;
331 case MS_ROOTS_PUSHED
:
332 # ifdef PARALLEL_MARK
333 /* In the incremental GC case, this currently doesn't */
334 /* quite do the right thing, since it runs to */
335 /* completion. On the other hand, starting a */
336 /* parallel marker is expensive, so perhaps it is */
337 /* the right thing? */
338 /* Eventually, incremental marking should run */
339 /* asynchronously in multiple threads, without grabbing */
340 /* the allocation lock. */
342 GC_do_parallel_mark();
343 GC_ASSERT(GC_mark_stack_top
< GC_first_nonempty
);
344 GC_mark_stack_top
= GC_mark_stack
- 1;
345 if (GC_mark_stack_too_small
) {
346 alloc_mark_stack(2*GC_mark_stack_size
);
348 if (GC_mark_state
== MS_ROOTS_PUSHED
) {
349 GC_mark_state
= MS_NONE
;
356 if (GC_mark_stack_top
>= GC_mark_stack
) {
357 MARK_FROM_MARK_STACK();
360 GC_mark_state
= MS_NONE
;
361 if (GC_mark_stack_too_small
) {
362 alloc_mark_stack(2*GC_mark_stack_size
);
368 case MS_PARTIALLY_INVALID
:
369 if (!GC_objects_are_marked
) {
370 GC_mark_state
= MS_PUSH_UNCOLLECTABLE
;
373 if (GC_mark_stack_top
>= GC_mark_stack
) {
374 MARK_FROM_MARK_STACK();
377 if (scan_ptr
== 0 && GC_mark_state
== MS_INVALID
) {
378 /* About to start a heap scan for marked objects. */
379 /* Mark stack is empty. OK to reallocate. */
380 if (GC_mark_stack_too_small
) {
381 alloc_mark_stack(2*GC_mark_stack_size
);
383 GC_mark_state
= MS_PARTIALLY_INVALID
;
385 scan_ptr
= GC_push_next_marked(scan_ptr
);
386 if (scan_ptr
== 0 && GC_mark_state
== MS_PARTIALLY_INVALID
) {
387 GC_push_roots(TRUE
, cold_gc_frame
);
388 GC_objects_are_marked
= TRUE
;
389 if (GC_mark_state
!= MS_INVALID
) {
390 GC_mark_state
= MS_ROOTS_PUSHED
;
395 ABORT("GC_mark_some: bad state");
398 #if defined(MSWIN32) && !defined(__GNUC__)
399 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION
?
400 EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH
) {
402 if (GC_print_stats
) {
403 GC_printf0("Caught ACCESS_VIOLATION in marker. "
404 "Memory mapping disappeared.\n");
406 # endif /* CONDPRINT */
407 /* We have bad roots on the stack. Discard mark stack. */
408 /* Rescan from marked objects. Redetermine roots. */
409 GC_invalidate_mark_state();
413 #endif /* defined(MSWIN32) && !defined(__GNUC__) */
417 GC_bool
GC_mark_stack_empty()
419 return(GC_mark_stack_top
< GC_mark_stack
);
423 word GC_prof_array
[10];
424 # define PROF(n) GC_prof_array[n]++
429 /* Given a pointer to someplace other than a small object page or the */
430 /* first page of a large object, either: */
431 /* - return a pointer to somewhere in the first page of the large */
432 /* object, if current points to a large object. */
433 /* In this case *hhdr is replaced with a pointer to the header */
434 /* for the large object. */
435 /* - just return current if it does not point to a large object. */
437 # ifdef PRINT_BLACK_LIST
438 ptr_t
GC_find_start(current
, hhdr
, new_hdr_p
, source
)
441 ptr_t
GC_find_start(current
, hhdr
, new_hdr_p
)
444 register ptr_t current
;
445 register hdr
*hhdr
, **new_hdr_p
;
447 if (GC_all_interior_pointers
) {
449 register ptr_t orig
= current
;
451 current
= (ptr_t
)HBLKPTR(current
);
453 current
= current
- HBLKSIZE
*(word
)hhdr
;
455 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr
));
456 /* current points to the start of the large object */
457 if (hhdr
-> hb_flags
& IGNORE_OFF_PAGE
) return(0);
458 if ((word
*)orig
- (word
*)current
459 >= (ptrdiff_t)(hhdr
->hb_sz
)) {
460 /* Pointer past the end of the block */
474 void GC_invalidate_mark_state()
476 GC_mark_state
= MS_INVALID
;
477 GC_mark_stack_top
= GC_mark_stack
-1;
480 mse
* GC_signal_mark_stack_overflow(msp
)
483 GC_mark_state
= MS_INVALID
;
484 GC_mark_stack_too_small
= TRUE
;
486 if (GC_print_stats
) {
487 GC_printf1("Mark stack overflow; current size = %lu entries\n",
491 return(msp
- GC_MARK_STACK_DISCARDS
);
495 * Mark objects pointed to by the regions described by
496 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
497 * inclusive. Assumes the upper limit of a mark stack entry
498 * is never 0. A mark stack entry never has size 0.
499 * We try to traverse on the order of a hblk of memory before we return.
500 * Caller is responsible for calling this until the mark stack is empty.
501 * Note that this is the most performance critical routine in the
502 * collector. Hence it contains all sorts of ugly hacks to speed
503 * things up. In particular, we avoid procedure calls on the common
504 * path, we take advantage of peculiarities of the mark descriptor
505 * encoding, we optionally maintain a cache for the block address to
506 * header mapping, we prefetch when an object is "grayed", etc.
508 mse
* GC_mark_from(mark_stack_top
, mark_stack
, mark_stack_limit
)
509 mse
* mark_stack_top
;
511 mse
* mark_stack_limit
;
513 int credit
= HBLKSIZE
; /* Remaining credit for marking work */
514 register word
* current_p
; /* Pointer to current candidate ptr. */
515 register word current
; /* Candidate pointer. */
516 register word
* limit
; /* (Incl) limit of current candidate */
519 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
520 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
523 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
525 GC_objects_are_marked
= TRUE
;
527 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
528 while (mark_stack_top
>= mark_stack
&& credit
>= 0) {
530 while ((((ptr_t
)mark_stack_top
- (ptr_t
)mark_stack
) | credit
)
533 current_p
= mark_stack_top
-> mse_start
;
534 descr
= mark_stack_top
-> mse_descr
;
536 /* current_p and descr describe the current object. */
537 /* *mark_stack_top is vacant. */
538 /* The following is 0 only for small objects described by a simple */
539 /* length descriptor. For many applications this is the common */
540 /* case, so we try to detect it quickly. */
541 if (descr
& ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS
) - 1)) | GC_DS_TAGS
)) {
542 word tag
= descr
& GC_DS_TAGS
;
547 /* Process part of the range to avoid pushing too much on the */
549 GC_ASSERT(descr
< GC_greatest_plausible_heap_addr
550 - GC_least_plausible_heap_addr
);
551 # ifdef PARALLEL_MARK
552 # define SHARE_BYTES 2048
553 if (descr
> SHARE_BYTES
&& GC_parallel
554 && mark_stack_top
< mark_stack_limit
- 1) {
555 int new_size
= (descr
/2) & ~(sizeof(word
)-1);
556 mark_stack_top
-> mse_start
= current_p
;
557 mark_stack_top
-> mse_descr
= new_size
+ sizeof(word
);
558 /* makes sure we handle */
559 /* misaligned pointers. */
561 current_p
= (word
*) ((char *)current_p
+ new_size
);
565 # endif /* PARALLEL_MARK */
566 mark_stack_top
-> mse_start
=
567 limit
= current_p
+ SPLIT_RANGE_WORDS
-1;
568 mark_stack_top
-> mse_descr
=
569 descr
- WORDS_TO_BYTES(SPLIT_RANGE_WORDS
-1);
570 /* Make sure that pointers overlapping the two ranges are */
572 limit
= (word
*)((char *)limit
+ sizeof(word
) - ALIGNMENT
);
576 descr
&= ~GC_DS_TAGS
;
577 credit
-= WORDS_TO_BYTES(WORDSZ
/2); /* guess */
579 if ((signed_word
)descr
< 0) {
580 current
= *current_p
;
581 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
583 HC_PUSH_CONTENTS((ptr_t
)current
, mark_stack_top
,
584 mark_stack_limit
, current_p
, exit1
);
593 credit
-= GC_PROC_BYTES
;
596 (current_p
, mark_stack_top
,
597 mark_stack_limit
, ENV(descr
));
599 case GC_DS_PER_OBJECT
:
600 if ((signed_word
)descr
>= 0) {
601 /* Descriptor is in the object. */
602 descr
= *(word
*)((ptr_t
)current_p
+ descr
- GC_DS_PER_OBJECT
);
604 /* Descriptor is in type descriptor pointed to by first */
605 /* word in object. */
606 ptr_t type_descr
= *(ptr_t
*)current_p
;
607 /* type_descr is either a valid pointer to the descriptor */
608 /* structure, or this object was on a free list. If it */
609 /* it was anything but the last object on the free list, */
610 /* we will misinterpret the next object on the free list as */
611 /* the type descriptor, and get a 0 GC descriptor, which */
612 /* is ideal. Unfortunately, we need to check for the last */
613 /* object case explicitly. */
614 if (0 == type_descr
) {
615 /* Rarely executed. */
619 descr
= *(word
*)(type_descr
620 - (descr
- (GC_DS_PER_OBJECT
621 - GC_INDIR_PER_OBJ_BIAS
)));
624 /* Can happen either because we generated a 0 descriptor */
625 /* or we saw a pointer to a free object. */
631 } else /* Small object with length descriptor */ {
633 limit
= (word
*)(((ptr_t
)current_p
) + (word
)descr
);
635 /* The simple case in which we're scanning a range. */
636 GC_ASSERT(!((word
)current_p
& (ALIGNMENT
-1)));
637 credit
-= (ptr_t
)limit
- (ptr_t
)current_p
;
642 # ifndef SMALL_CONFIG
645 /* Try to prefetch the next pointer to be examined asap. */
646 /* Empirically, this also seems to help slightly without */
647 /* prefetches, at least on linux/X86. Presumably this loop */
648 /* ends up with less register pressure, and gcc thus ends up */
649 /* generating slightly better code. Overall gcc code quality */
650 /* for this loop is still not great. */
652 PREFETCH((ptr_t
)limit
- PREF_DIST
*CACHE_LINE_SIZE
);
653 GC_ASSERT(limit
>= current_p
);
655 limit
= (word
*)((char *)limit
- ALIGNMENT
);
656 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
660 if (current_p
> limit
) goto next_object
;
661 /* Unroll once, so we don't do too many of the prefetches */
662 /* based on limit. */
664 limit
= (word
*)((char *)limit
- ALIGNMENT
);
665 if ((ptr_t
)deferred
>= least_ha
&& (ptr_t
)deferred
< greatest_ha
) {
669 if (current_p
> limit
) goto next_object
;
673 while (current_p
<= limit
) {
674 /* Empirically, unrolling this loop doesn't help a lot. */
675 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
677 current
= *current_p
;
678 PREFETCH((ptr_t
)current_p
+ PREF_DIST
*CACHE_LINE_SIZE
);
679 if ((ptr_t
)current
>= least_ha
&& (ptr_t
)current
< greatest_ha
) {
680 /* Prefetch the contents of the object we just pushed. It's */
681 /* likely we will need them soon. */
683 HC_PUSH_CONTENTS((ptr_t
)current
, mark_stack_top
,
684 mark_stack_limit
, current_p
, exit2
);
686 current_p
= (word
*)((char *)current_p
+ ALIGNMENT
);
689 # ifndef SMALL_CONFIG
690 /* We still need to mark the entry we previously prefetched. */
691 /* We alrady know that it passes the preliminary pointer */
693 HC_PUSH_CONTENTS((ptr_t
)deferred
, mark_stack_top
,
694 mark_stack_limit
, current_p
, exit4
);
699 return mark_stack_top
;
704 /* We assume we have an ANSI C Compiler. */
705 GC_bool GC_help_wanted
= FALSE
;
706 unsigned GC_helper_count
= 0;
707 unsigned GC_active_count
= 0;
708 mse
* VOLATILE GC_first_nonempty
;
711 #define LOCAL_MARK_STACK_SIZE HBLKSIZE
712 /* Under normal circumstances, this is big enough to guarantee */
713 /* We don't overflow half of it in a single call to */
717 /* Steal mark stack entries starting at mse low into mark stack local */
718 /* until we either steal mse high, or we have max entries. */
719 /* Return a pointer to the top of the local mark stack. */
720 /* *next is replaced by a pointer to the next unscanned mark stack */
722 mse
* GC_steal_mark_stack(mse
* low
, mse
* high
, mse
* local
,
723 unsigned max
, mse
**next
)
726 mse
*top
= local
- 1;
729 GC_ASSERT(high
>= low
-1 && high
- low
+ 1 <= GC_mark_stack_size
);
730 for (p
= low
; p
<= high
&& i
<= max
; ++p
) {
731 word descr
= *(volatile word
*) &(p
-> mse_descr
);
733 *(volatile word
*) &(p
-> mse_descr
) = 0;
735 top
-> mse_descr
= descr
;
736 top
-> mse_start
= p
-> mse_start
;
737 GC_ASSERT( top
-> mse_descr
& GC_DS_TAGS
!= GC_DS_LENGTH
||
738 top
-> mse_descr
< GC_greatest_plausible_heap_addr
739 - GC_least_plausible_heap_addr
);
740 /* There is no synchronization here. We assume that at */
741 /* least one thread will see the original descriptor. */
742 /* Otherwise we need a barrier. */
743 /* More than one thread may get this entry, but that's only */
744 /* a minor performance problem. */
745 /* If this is a big object, count it as */
746 /* size/256 + 1 objects. */
748 if ((descr
& GC_DS_TAGS
) == GC_DS_LENGTH
) i
+= (descr
>> 8);
755 /* Copy back a local mark stack. */
756 /* low and high are inclusive bounds. */
757 void GC_return_mark_stack(mse
* low
, mse
* high
)
763 if (high
< low
) return;
764 stack_size
= high
- low
+ 1;
765 GC_acquire_mark_lock();
766 my_top
= GC_mark_stack_top
;
767 my_start
= my_top
+ 1;
768 if (my_start
- GC_mark_stack
+ stack_size
> GC_mark_stack_size
) {
770 if (GC_print_stats
) {
771 GC_printf0("No room to copy back mark stack.");
774 GC_mark_state
= MS_INVALID
;
775 GC_mark_stack_too_small
= TRUE
;
776 /* We drop the local mark stack. We'll fix things later. */
778 BCOPY(low
, my_start
, stack_size
* sizeof(mse
));
779 GC_ASSERT(GC_mark_stack_top
= my_top
);
780 # if !defined(IA64) && !defined(HP_PA)
781 GC_memory_write_barrier();
783 /* On IA64, the volatile write acts as a release barrier. */
784 GC_mark_stack_top
= my_top
+ stack_size
;
786 GC_release_mark_lock();
787 GC_notify_all_marker();
790 /* Mark from the local mark stack. */
791 /* On return, the local mark stack is empty. */
792 /* But this may be achieved by copying the */
793 /* local mark stack back into the global one. */
794 void GC_do_local_mark(mse
*local_mark_stack
, mse
*local_top
)
797 # define N_LOCAL_ITERS 1
799 # ifdef GC_ASSERTIONS
800 /* Make sure we don't hold mark lock. */
801 GC_acquire_mark_lock();
802 GC_release_mark_lock();
805 for (n
= 0; n
< N_LOCAL_ITERS
; ++n
) {
806 local_top
= GC_mark_from(local_top
, local_mark_stack
,
807 local_mark_stack
+ LOCAL_MARK_STACK_SIZE
);
808 if (local_top
< local_mark_stack
) return;
809 if (local_top
- local_mark_stack
>= LOCAL_MARK_STACK_SIZE
/2) {
810 GC_return_mark_stack(local_mark_stack
, local_top
);
814 if (GC_mark_stack_top
< GC_first_nonempty
&&
815 GC_active_count
< GC_helper_count
816 && local_top
> local_mark_stack
+ 1) {
817 /* Try to share the load, since the main stack is empty, */
818 /* and helper threads are waiting for a refill. */
819 /* The entries near the bottom of the stack are likely */
820 /* to require more work. Thus we return those, eventhough */
823 mse
* new_bottom
= local_mark_stack
824 + (local_top
- local_mark_stack
)/2;
825 GC_ASSERT(new_bottom
> local_mark_stack
826 && new_bottom
< local_top
);
827 GC_return_mark_stack(local_mark_stack
, new_bottom
- 1);
828 memmove(local_mark_stack
, new_bottom
,
829 (local_top
- new_bottom
+ 1) * sizeof(mse
));
830 local_top
-= (new_bottom
- local_mark_stack
);
835 #define ENTRIES_TO_GET 5
837 long GC_markers
= 2; /* Normally changed by thread-library- */
838 /* -specific code. */
840 /* Mark using the local mark stack until the global mark stack is empty */
841 /* and there are no active workers. Update GC_first_nonempty to reflect */
843 /* Caller does not hold mark lock. */
844 /* Caller has already incremented GC_helper_count. We decrement it, */
845 /* and maintain GC_active_count. */
846 void GC_mark_local(mse
*local_mark_stack
, int id
)
848 mse
* my_first_nonempty
;
850 GC_acquire_mark_lock();
852 my_first_nonempty
= GC_first_nonempty
;
853 GC_ASSERT(GC_first_nonempty
>= GC_mark_stack
&&
854 GC_first_nonempty
<= GC_mark_stack_top
+ 1);
856 GC_printf1("Starting mark helper %lu\n", (unsigned long)id
);
858 GC_release_mark_lock();
865 mse
* global_first_nonempty
= GC_first_nonempty
;
867 GC_ASSERT(my_first_nonempty
>= GC_mark_stack
&&
868 my_first_nonempty
<= GC_mark_stack_top
+ 1);
869 GC_ASSERT(global_first_nonempty
>= GC_mark_stack
&&
870 global_first_nonempty
<= GC_mark_stack_top
+ 1);
871 if (my_first_nonempty
< global_first_nonempty
) {
872 my_first_nonempty
= global_first_nonempty
;
873 } else if (global_first_nonempty
< my_first_nonempty
) {
874 GC_compare_and_exchange((word
*)(&GC_first_nonempty
),
875 (word
) global_first_nonempty
,
876 (word
) my_first_nonempty
);
877 /* If this fails, we just go ahead, without updating */
878 /* GC_first_nonempty. */
880 /* Perhaps we should also update GC_first_nonempty, if it */
881 /* is less. But that would require using atomic updates. */
882 my_top
= GC_mark_stack_top
;
883 n_on_stack
= my_top
- my_first_nonempty
+ 1;
884 if (0 == n_on_stack
) {
885 GC_acquire_mark_lock();
886 my_top
= GC_mark_stack_top
;
887 n_on_stack
= my_top
- my_first_nonempty
+ 1;
888 if (0 == n_on_stack
) {
890 GC_ASSERT(GC_active_count
<= GC_helper_count
);
891 /* Other markers may redeposit objects */
893 if (0 == GC_active_count
) GC_notify_all_marker();
894 while (GC_active_count
> 0
895 && GC_first_nonempty
> GC_mark_stack_top
) {
896 /* We will be notified if either GC_active_count */
897 /* reaches zero, or if more objects are pushed on */
898 /* the global mark stack. */
901 if (GC_active_count
== 0 &&
902 GC_first_nonempty
> GC_mark_stack_top
) {
903 GC_bool need_to_notify
= FALSE
;
904 /* The above conditions can't be falsified while we */
905 /* hold the mark lock, since neither */
906 /* GC_active_count nor GC_mark_stack_top can */
907 /* change. GC_first_nonempty can only be */
908 /* incremented asynchronously. Thus we know that */
909 /* both conditions actually held simultaneously. */
911 if (0 == GC_helper_count
) need_to_notify
= TRUE
;
914 "Finished mark helper %lu\n", (unsigned long)id
);
916 GC_release_mark_lock();
917 if (need_to_notify
) GC_notify_all_marker();
920 /* else there's something on the stack again, or */
921 /* another helper may push something. */
923 GC_ASSERT(GC_active_count
> 0);
924 GC_release_mark_lock();
927 GC_release_mark_lock();
930 n_to_get
= ENTRIES_TO_GET
;
931 if (n_on_stack
< 2 * ENTRIES_TO_GET
) n_to_get
= 1;
932 local_top
= GC_steal_mark_stack(my_first_nonempty
, my_top
,
933 local_mark_stack
, n_to_get
,
935 GC_ASSERT(my_first_nonempty
>= GC_mark_stack
&&
936 my_first_nonempty
<= GC_mark_stack_top
+ 1);
937 GC_do_local_mark(local_mark_stack
, local_top
);
941 /* Perform Parallel mark. */
942 /* We hold the GC lock, not the mark lock. */
943 /* Currently runs until the mark stack is */
945 void GC_do_parallel_mark()
947 mse local_mark_stack
[LOCAL_MARK_STACK_SIZE
];
951 GC_acquire_mark_lock();
952 GC_ASSERT(I_HOLD_LOCK());
953 /* This could be a GC_ASSERT, but it seems safer to keep it on */
954 /* all the time, especially since it's cheap. */
955 if (GC_help_wanted
|| GC_active_count
!= 0 || GC_helper_count
!= 0)
956 ABORT("Tried to start parallel mark in bad state");
958 GC_printf1("Starting marking for mark phase number %lu\n",
959 (unsigned long)GC_mark_no
);
961 GC_first_nonempty
= GC_mark_stack
;
964 GC_help_wanted
= TRUE
;
965 GC_release_mark_lock();
966 GC_notify_all_marker();
967 /* Wake up potential helpers. */
968 GC_mark_local(local_mark_stack
, 0);
969 GC_acquire_mark_lock();
970 GC_help_wanted
= FALSE
;
971 /* Done; clean up. */
972 while (GC_helper_count
> 0) GC_wait_marker();
973 /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
976 "Finished marking for mark phase number %lu\n",
977 (unsigned long)GC_mark_no
);
980 GC_release_mark_lock();
981 GC_notify_all_marker();
985 /* Try to help out the marker, if it's running. */
986 /* We do not hold the GC lock, but the requestor does. */
987 void GC_help_marker(word my_mark_no
)
989 mse local_mark_stack
[LOCAL_MARK_STACK_SIZE
];
991 mse
* my_first_nonempty
;
993 if (!GC_parallel
) return;
994 GC_acquire_mark_lock();
995 while (GC_mark_no
< my_mark_no
996 || !GC_help_wanted
&& GC_mark_no
== my_mark_no
) {
999 my_id
= GC_helper_count
;
1000 if (GC_mark_no
!= my_mark_no
|| my_id
>= GC_markers
) {
1001 /* Second test is useful only if original threads can also */
1002 /* act as helpers. Under Linux they can't. */
1003 GC_release_mark_lock();
1006 GC_helper_count
= my_id
+ 1;
1007 GC_release_mark_lock();
1008 GC_mark_local(local_mark_stack
, my_id
);
1009 /* GC_mark_local decrements GC_helper_count. */
1012 #endif /* PARALLEL_MARK */
1014 /* Allocate or reallocate space for mark stack of size s words */
1015 /* May silently fail. */
1016 static void alloc_mark_stack(n
)
1019 mse
* new_stack
= (mse
*)GC_scratch_alloc(n
* sizeof(struct GC_ms_entry
));
1021 GC_mark_stack_too_small
= FALSE
;
1022 if (GC_mark_stack_size
!= 0) {
1023 if (new_stack
!= 0) {
1024 word displ
= (word
)GC_mark_stack
& (GC_page_size
- 1);
1025 signed_word size
= GC_mark_stack_size
* sizeof(struct GC_ms_entry
);
1027 /* Recycle old space */
1028 if (0 != displ
) displ
= GC_page_size
- displ
;
1029 size
= (size
- displ
) & ~(GC_page_size
- 1);
1031 GC_add_to_heap((struct hblk
*)
1032 ((word
)GC_mark_stack
+ displ
), (word
)size
);
1034 GC_mark_stack
= new_stack
;
1035 GC_mark_stack_size
= n
;
1036 GC_mark_stack_limit
= new_stack
+ n
;
1038 if (GC_print_stats
) {
1039 GC_printf1("Grew mark stack to %lu frames\n",
1040 (unsigned long) GC_mark_stack_size
);
1045 if (GC_print_stats
) {
1046 GC_printf1("Failed to grow mark stack to %lu frames\n",
1052 if (new_stack
== 0) {
1053 GC_err_printf0("No space for mark stack\n");
1056 GC_mark_stack
= new_stack
;
1057 GC_mark_stack_size
= n
;
1058 GC_mark_stack_limit
= new_stack
+ n
;
1060 GC_mark_stack_top
= GC_mark_stack
-1;
1065 alloc_mark_stack(INITIAL_MARK_STACK_SIZE
);
1069 * Push all locations between b and t onto the mark stack.
1070 * b is the first location to be checked. t is one past the last
1071 * location to be checked.
1072 * Should only be used if there is no possibility of mark stack
1075 void GC_push_all(bottom
, top
)
1079 register word length
;
1081 bottom
= (ptr_t
)(((word
) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1082 top
= (ptr_t
)(((word
) top
) & ~(ALIGNMENT
-1));
1083 if (top
== 0 || bottom
== top
) return;
1084 GC_mark_stack_top
++;
1085 if (GC_mark_stack_top
>= GC_mark_stack_limit
) {
1086 ABORT("unexpected mark stack overflow");
1088 length
= top
- bottom
;
1089 # if GC_DS_TAGS > ALIGNMENT - 1
1090 length
+= GC_DS_TAGS
;
1091 length
&= ~GC_DS_TAGS
;
1093 GC_mark_stack_top
-> mse_start
= (word
*)bottom
;
1094 GC_mark_stack_top
-> mse_descr
= length
;
1098 * Analogous to the above, but push only those pages h with dirty_fn(h) != 0.
1099 * We use push_fn to actually push the block.
1100 * Used both to selectively push dirty pages, or to push a block
1101 * in piecemeal fashion, to allow for more marking concurrency.
1102 * Will not overflow mark stack if push_fn pushes a small fixed number
1103 * of entries. (This is invoked only if push_fn pushes a single entry,
1104 * or if it marks each object before pushing it, thus ensuring progress
1105 * in the event of a stack overflow.)
1107 void GC_push_selected(bottom
, top
, dirty_fn
, push_fn
)
1110 int (*dirty_fn
) GC_PROTO((struct hblk
* h
));
1111 void (*push_fn
) GC_PROTO((ptr_t bottom
, ptr_t top
));
1113 register struct hblk
* h
;
1115 bottom
= (ptr_t
)(((long) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1116 top
= (ptr_t
)(((long) top
) & ~(ALIGNMENT
-1));
1118 if (top
== 0 || bottom
== top
) return;
1119 h
= HBLKPTR(bottom
+ HBLKSIZE
);
1120 if (top
<= (ptr_t
) h
) {
1121 if ((*dirty_fn
)(h
-1)) {
1122 (*push_fn
)(bottom
, top
);
1126 if ((*dirty_fn
)(h
-1)) {
1127 (*push_fn
)(bottom
, (ptr_t
)h
);
1129 while ((ptr_t
)(h
+1) <= top
) {
1130 if ((*dirty_fn
)(h
)) {
1131 if ((word
)(GC_mark_stack_top
- GC_mark_stack
)
1132 > 3 * GC_mark_stack_size
/ 4) {
1133 /* Danger of mark stack overflow */
1134 (*push_fn
)((ptr_t
)h
, top
);
1137 (*push_fn
)((ptr_t
)h
, (ptr_t
)(h
+1));
1142 if ((ptr_t
)h
!= top
) {
1143 if ((*dirty_fn
)(h
)) {
1144 (*push_fn
)((ptr_t
)h
, top
);
1147 if (GC_mark_stack_top
>= GC_mark_stack_limit
) {
1148 ABORT("unexpected mark stack overflow");
1152 # ifndef SMALL_CONFIG
1154 #ifdef PARALLEL_MARK
1155 /* Break up root sections into page size chunks to better spread */
1157 GC_bool
GC_true_func(struct hblk
*h
) { return TRUE
; }
1158 # define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
1160 # define GC_PUSH_ALL(b,t) GC_push_all(b,t);
1164 void GC_push_conditional(bottom
, top
, all
)
1170 if (GC_dirty_maintained
) {
1172 /* Pages that were never dirtied cannot contain pointers */
1173 GC_push_selected(bottom
, top
, GC_page_was_ever_dirty
, GC_push_all
);
1175 GC_push_all(bottom
, top
);
1178 GC_push_all(bottom
, top
);
1181 GC_push_selected(bottom
, top
, GC_page_was_dirty
, GC_push_all
);
1186 # if defined(MSWIN32) || defined(MSWINCE)
1187 void __cdecl
GC_push_one(p
)
1193 GC_PUSH_ONE_STACK(p
, MARKED_FROM_REGISTER
);
1196 struct GC_ms_entry
*GC_mark_and_push(obj
, mark_stack_ptr
, mark_stack_limit
, src
)
1198 struct GC_ms_entry
* mark_stack_ptr
;
1199 struct GC_ms_entry
* mark_stack_limit
;
1203 PUSH_CONTENTS(obj
, mark_stack_ptr
/* modified */, mark_stack_limit
, src
,
1204 was_marked
/* internally generated exit label */);
1205 return mark_stack_ptr
;
1209 # define BASE(p) (word)GC_base((void *)(p))
1211 # define BASE(p) (word)GC_base((char *)(p))
1214 /* Mark and push (i.e. gray) a single object p onto the main */
1215 /* mark stack. Consider p to be valid if it is an interior */
1217 /* The object p has passed a preliminary pointer validity */
1218 /* test, but we do not definitely know whether it is valid. */
1219 /* Mark bits are NOT atomically updated. Thus this must be the */
1220 /* only thread setting them. */
1221 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1222 void GC_mark_and_push_stack(p
, source
)
1225 void GC_mark_and_push_stack(p
)
1231 register hdr
* hhdr
;
1235 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
1239 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
1242 register map_entry_type map_entry
;
1244 displ
= HBLKDISPL(p
);
1245 map_entry
= MAP_ENTRY((hhdr
-> hb_map
), displ
);
1246 if (map_entry
>= MAX_OFFSET
) {
1247 if (map_entry
== OFFSET_TOO_BIG
|| !GC_all_interior_pointers
) {
1249 displ
= BYTES_TO_WORDS(HBLKDISPL(r
));
1250 if (r
== 0) hhdr
= 0;
1252 /* Offset invalid, but map reflects interior pointers */
1256 displ
= BYTES_TO_WORDS(displ
);
1258 r
= (word
)((word
*)(HBLKPTR(p
)) + displ
);
1261 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
1262 /* displ is the word index within the block. */
1264 # ifdef PRINT_BLACK_LIST
1265 GC_add_to_black_list_stack(p
, source
);
1267 GC_add_to_black_list_stack(p
);
1269 # undef source /* In case we had to define it. */
1271 if (!mark_bit_from_hdr(hhdr
, displ
)) {
1272 set_mark_bit_from_hdr(hhdr
, displ
);
1273 GC_STORE_BACK_PTR(source
, (ptr_t
)r
);
1274 PUSH_OBJ((word
*)r
, hhdr
, GC_mark_stack_top
,
1275 GC_mark_stack_limit
);
1282 # define TRACE_ENTRIES 1000
1284 struct trace_entry
{
1290 } GC_trace_buf
[TRACE_ENTRIES
];
1292 int GC_trace_buf_ptr
= 0;
1294 void GC_add_trace_entry(char *kind
, word arg1
, word arg2
)
1296 GC_trace_buf
[GC_trace_buf_ptr
].kind
= kind
;
1297 GC_trace_buf
[GC_trace_buf_ptr
].gc_no
= GC_gc_no
;
1298 GC_trace_buf
[GC_trace_buf_ptr
].words_allocd
= GC_words_allocd
;
1299 GC_trace_buf
[GC_trace_buf_ptr
].arg1
= arg1
^ 0x80000000;
1300 GC_trace_buf
[GC_trace_buf_ptr
].arg2
= arg2
^ 0x80000000;
1302 if (GC_trace_buf_ptr
>= TRACE_ENTRIES
) GC_trace_buf_ptr
= 0;
1305 void GC_print_trace(word gc_no
, GC_bool lock
)
1308 struct trace_entry
*p
;
1311 for (i
= GC_trace_buf_ptr
-1; i
!= GC_trace_buf_ptr
; i
--) {
1312 if (i
< 0) i
= TRACE_ENTRIES
-1;
1313 p
= GC_trace_buf
+ i
;
1314 if (p
-> gc_no
< gc_no
|| p
-> kind
== 0) return;
1315 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
1316 p
-> kind
, p
-> gc_no
, p
-> words_allocd
,
1317 (p
-> arg1
) ^ 0x80000000, (p
-> arg2
) ^ 0x80000000);
1319 printf("Trace incomplete\n");
1323 # endif /* TRACE_BUF */
1326 * A version of GC_push_all that treats all interior pointers as valid
1327 * and scans the entire region immediately, in case the contents
1330 void GC_push_all_eager(bottom
, top
)
1334 word
* b
= (word
*)(((long) bottom
+ ALIGNMENT
-1) & ~(ALIGNMENT
-1));
1335 word
* t
= (word
*)(((long) top
) & ~(ALIGNMENT
-1));
1339 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1340 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1341 # define GC_greatest_plausible_heap_addr greatest_ha
1342 # define GC_least_plausible_heap_addr least_ha
1344 if (top
== 0) return;
1345 /* check all pointers in range and put in push if they appear */
1347 lim
= t
- 1 /* longword */;
1348 for (p
= b
; p
<= lim
; p
= (word
*)(((char *)p
) + ALIGNMENT
)) {
1350 GC_PUSH_ONE_STACK(q
, p
);
1352 # undef GC_greatest_plausible_heap_addr
1353 # undef GC_least_plausible_heap_addr
1358 * A version of GC_push_all that treats all interior pointers as valid
1359 * and scans part of the area immediately, to make sure that saved
1360 * register values are not lost.
1361 * Cold_gc_frame delimits the stack section that must be scanned
1362 * eagerly. A zero value indicates that no eager scanning is needed.
1364 void GC_push_all_stack_partially_eager(bottom
, top
, cold_gc_frame
)
1367 ptr_t cold_gc_frame
;
1369 if (GC_all_interior_pointers
) {
1370 # define EAGER_BYTES 1024
1371 /* Push the hot end of the stack eagerly, so that register values */
1372 /* saved inside GC frames are marked before they disappear. */
1373 /* The rest of the marking can be deferred until later. */
1374 if (0 == cold_gc_frame
) {
1375 GC_push_all_stack(bottom
, top
);
1378 # ifdef STACK_GROWS_DOWN
1379 GC_push_all(cold_gc_frame
- sizeof(ptr_t
), top
);
1380 GC_push_all_eager(bottom
, cold_gc_frame
);
1381 # else /* STACK_GROWS_UP */
1382 GC_push_all(bottom
, cold_gc_frame
+ sizeof(ptr_t
));
1383 GC_push_all_eager(cold_gc_frame
, top
);
1384 # endif /* STACK_GROWS_UP */
1386 GC_push_all_eager(bottom
, top
);
1389 GC_add_trace_entry("GC_push_all_stack", bottom
, top
);
1392 #endif /* !THREADS */
1394 void GC_push_all_stack(bottom
, top
)
1398 if (GC_all_interior_pointers
) {
1399 GC_push_all(bottom
, top
);
1401 GC_push_all_eager(bottom
, top
);
1405 #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1406 /* Push all objects reachable from marked objects in the given block */
1407 /* of size 1 objects. */
1408 void GC_push_marked1(h
, hhdr
)
1410 register hdr
* hhdr
;
1412 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1417 register word mark_word
;
1418 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1419 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1420 register mse
* mark_stack_top
= GC_mark_stack_top
;
1421 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1422 # define GC_mark_stack_top mark_stack_top
1423 # define GC_mark_stack_limit mark_stack_limit
1424 # define GC_greatest_plausible_heap_addr greatest_ha
1425 # define GC_least_plausible_heap_addr least_ha
1427 p
= (word
*)(h
->hb_body
);
1428 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1430 /* go through all words in block */
1432 mark_word
= *mark_word_addr
++;
1434 while(mark_word
!= 0) {
1435 if (mark_word
& 1) {
1437 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1444 # undef GC_greatest_plausible_heap_addr
1445 # undef GC_least_plausible_heap_addr
1446 # undef GC_mark_stack_top
1447 # undef GC_mark_stack_limit
1448 GC_mark_stack_top
= mark_stack_top
;
1454 /* Push all objects reachable from marked objects in the given block */
1455 /* of size 2 objects. */
1456 void GC_push_marked2(h
, hhdr
)
1458 register hdr
* hhdr
;
1460 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1465 register word mark_word
;
1466 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1467 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1468 register mse
* mark_stack_top
= GC_mark_stack_top
;
1469 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1470 # define GC_mark_stack_top mark_stack_top
1471 # define GC_mark_stack_limit mark_stack_limit
1472 # define GC_greatest_plausible_heap_addr greatest_ha
1473 # define GC_least_plausible_heap_addr least_ha
1475 p
= (word
*)(h
->hb_body
);
1476 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1478 /* go through all words in block */
1480 mark_word
= *mark_word_addr
++;
1482 while(mark_word
!= 0) {
1483 if (mark_word
& 1) {
1485 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1487 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1494 # undef GC_greatest_plausible_heap_addr
1495 # undef GC_least_plausible_heap_addr
1496 # undef GC_mark_stack_top
1497 # undef GC_mark_stack_limit
1498 GC_mark_stack_top
= mark_stack_top
;
1501 /* Push all objects reachable from marked objects in the given block */
1502 /* of size 4 objects. */
1503 /* There is a risk of mark stack overflow here. But we handle that. */
1504 /* And only unmarked objects get pushed, so it's not very likely. */
1505 void GC_push_marked4(h
, hhdr
)
1507 register hdr
* hhdr
;
1509 word
* mark_word_addr
= &(hhdr
->hb_marks
[0]);
1514 register word mark_word
;
1515 register ptr_t greatest_ha
= GC_greatest_plausible_heap_addr
;
1516 register ptr_t least_ha
= GC_least_plausible_heap_addr
;
1517 register mse
* mark_stack_top
= GC_mark_stack_top
;
1518 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1519 # define GC_mark_stack_top mark_stack_top
1520 # define GC_mark_stack_limit mark_stack_limit
1521 # define GC_greatest_plausible_heap_addr greatest_ha
1522 # define GC_least_plausible_heap_addr least_ha
1524 p
= (word
*)(h
->hb_body
);
1525 plim
= (word
*)(((word
)h
) + HBLKSIZE
);
1527 /* go through all words in block */
1529 mark_word
= *mark_word_addr
++;
1531 while(mark_word
!= 0) {
1532 if (mark_word
& 1) {
1534 GC_PUSH_ONE_HEAP(q
, p
+ i
);
1536 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 1);
1538 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 2);
1540 GC_PUSH_ONE_HEAP(q
, p
+ i
+ 3);
1547 # undef GC_greatest_plausible_heap_addr
1548 # undef GC_least_plausible_heap_addr
1549 # undef GC_mark_stack_top
1550 # undef GC_mark_stack_limit
1551 GC_mark_stack_top
= mark_stack_top
;
1554 #endif /* UNALIGNED */
1556 #endif /* SMALL_CONFIG */
1558 /* Push all objects reachable from marked objects in the given block */
1559 void GC_push_marked(h
, hhdr
)
1561 register hdr
* hhdr
;
1563 register int sz
= hhdr
-> hb_sz
;
1564 register int descr
= hhdr
-> hb_descr
;
1566 register int word_no
;
1567 register word
* lim
;
1568 register mse
* GC_mark_stack_top_reg
;
1569 register mse
* mark_stack_limit
= GC_mark_stack_limit
;
1571 /* Some quick shortcuts: */
1572 if ((0 | GC_DS_LENGTH
) == descr
) return;
1573 if (GC_block_empty(hhdr
)/* nothing marked */) return;
1574 GC_n_rescuing_pages
++;
1575 GC_objects_are_marked
= TRUE
;
1576 if (sz
> MAXOBJSZ
) {
1579 lim
= (word
*)(h
+ 1) - sz
;
1583 # if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)
1585 GC_push_marked1(h
, hhdr
);
1588 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \
1589 !defined(USE_MARK_BYTES)
1591 GC_push_marked2(h
, hhdr
);
1594 GC_push_marked4(h
, hhdr
);
1598 GC_mark_stack_top_reg
= GC_mark_stack_top
;
1599 for (p
= (word
*)h
, word_no
= 0; p
<= lim
; p
+= sz
, word_no
+= sz
) {
1600 if (mark_bit_from_hdr(hhdr
, word_no
)) {
1601 /* Mark from fields inside the object */
1602 PUSH_OBJ((word
*)p
, hhdr
, GC_mark_stack_top_reg
, mark_stack_limit
);
1604 /* Subtract this object from total, since it was */
1605 /* added in twice. */
1606 GC_composite_in_use
-= sz
;
1610 GC_mark_stack_top
= GC_mark_stack_top_reg
;
1614 #ifndef SMALL_CONFIG
1615 /* Test whether any page in the given block is dirty */
1616 GC_bool
GC_block_was_dirty(h
, hhdr
)
1618 register hdr
* hhdr
;
1620 register int sz
= hhdr
-> hb_sz
;
1622 if (sz
< MAXOBJSZ
) {
1623 return(GC_page_was_dirty(h
));
1625 register ptr_t p
= (ptr_t
)h
;
1626 sz
= WORDS_TO_BYTES(sz
);
1627 while (p
< (ptr_t
)h
+ sz
) {
1628 if (GC_page_was_dirty((struct hblk
*)p
)) return(TRUE
);
1634 #endif /* SMALL_CONFIG */
1636 /* Similar to GC_push_next_marked, but return address of next block */
1637 struct hblk
* GC_push_next_marked(h
)
1640 register hdr
* hhdr
;
1642 h
= GC_next_used_block(h
);
1643 if (h
== 0) return(0);
1645 GC_push_marked(h
, hhdr
);
1646 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1649 #ifndef SMALL_CONFIG
1650 /* Identical to above, but mark only from dirty pages */
1651 struct hblk
* GC_push_next_marked_dirty(h
)
1654 register hdr
* hhdr
;
1656 if (!GC_dirty_maintained
) { ABORT("dirty bits not set up"); }
1658 h
= GC_next_used_block(h
);
1659 if (h
== 0) return(0);
1661 # ifdef STUBBORN_ALLOC
1662 if (hhdr
-> hb_obj_kind
== STUBBORN
) {
1663 if (GC_page_was_changed(h
) && GC_block_was_dirty(h
, hhdr
)) {
1667 if (GC_block_was_dirty(h
, hhdr
)) break;
1670 if (GC_block_was_dirty(h
, hhdr
)) break;
1672 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1674 GC_push_marked(h
, hhdr
);
1675 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));
1679 /* Similar to above, but for uncollectable pages. Needed since we */
1680 /* do not clear marks for such pages, even for full collections. */
1681 struct hblk
* GC_push_next_marked_uncollectable(h
)
1684 register hdr
* hhdr
= HDR(h
);
1687 h
= GC_next_used_block(h
);
1688 if (h
== 0) return(0);
1690 if (hhdr
-> hb_obj_kind
== UNCOLLECTABLE
) break;
1691 h
+= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
1693 GC_push_marked(h
, hhdr
);
1694 return(h
+ OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
));