Place constant, uninitialised data in .rodata not .bss, and do not interpret an
[official-gcc.git] / boehm-gc / mark.c
blob461d98979571b27e5eba840f929b6b74c5478283
2 /*
3 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
4 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
18 # include <stdio.h>
19 # include "gc_priv.h"
20 # include "gc_mark.h"
22 /* We put this here to minimize the risk of inlining. */
23 /*VARARGS*/
24 #ifdef __WATCOMC__
25 void GC_noop(void *p, ...) {}
26 #else
27 void GC_noop() {}
28 #endif
30 /* Single argument version, robust against whole program analysis. */
31 void GC_noop1(x)
32 word x;
34 static VOLATILE word sink;
36 sink = x;
39 /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
41 word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
43 /* Initialize GC_obj_kinds properly and standard free lists properly. */
44 /* This must be done statically since they may be accessed before */
45 /* GC_init is called. */
46 /* It's done here, since we need to deal with mark descriptors. */
47 struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
48 /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
49 0 | DS_LENGTH, FALSE, FALSE },
50 /* NORMAL */ { &GC_objfreelist[0], 0,
51 # if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
52 (word)(-ALIGNMENT) | DS_LENGTH,
53 # else
54 0 | DS_LENGTH,
55 # endif
56 TRUE /* add length to descr */, TRUE },
57 /* UNCOLLECTABLE */
58 { &GC_uobjfreelist[0], 0,
59 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
60 # ifdef ATOMIC_UNCOLLECTABLE
61 /* AUNCOLLECTABLE */
62 { &GC_auobjfreelist[0], 0,
63 0 | DS_LENGTH, FALSE /* add length to descr */, FALSE },
64 # endif
65 # ifdef STUBBORN_ALLOC
66 /*STUBBORN*/ { &GC_sobjfreelist[0], 0,
67 0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
68 # endif
71 # ifdef ATOMIC_UNCOLLECTABLE
72 # ifdef STUBBORN_ALLOC
73 int GC_n_kinds = 5;
74 # else
75 int GC_n_kinds = 4;
76 # endif
77 # else
78 # ifdef STUBBORN_ALLOC
79 int GC_n_kinds = 4;
80 # else
81 int GC_n_kinds = 3;
82 # endif
83 # endif
86 # ifndef INITIAL_MARK_STACK_SIZE
87 # define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
88 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */
89 /* multiple of HBLKSIZE. */
90 /* The incremental collector actually likes a larger */
91 /* size, since it want to push all marked dirty objs */
92 /* before marking anything new. Currently we let it */
93 /* grow dynamically. */
94 # endif
97 * Limits of stack for GC_mark routine.
98 * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still
99 * need to be marked from.
102 word GC_n_rescuing_pages; /* Number of dirty pages we marked from */
103 /* excludes ptrfree pages, etc. */
105 mse * GC_mark_stack;
107 word GC_mark_stack_size = 0;
109 mse * GC_mark_stack_top;
111 static struct hblk * scan_ptr;
113 mark_state_t GC_mark_state = MS_NONE;
115 GC_bool GC_mark_stack_too_small = FALSE;
117 GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */
118 /* objects in the heap? */
120 /* Is a collection in progress? Note that this can return true in the */
121 /* nonincremental case, if a collection has been abandoned and the */
122 /* mark state is now MS_INVALID. */
123 GC_bool GC_collection_in_progress()
125 return(GC_mark_state != MS_NONE);
128 /* clear all mark bits in the header */
129 void GC_clear_hdr_marks(hhdr)
130 register hdr * hhdr;
132 BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));
135 /* Set all mark bits in the header. Used for uncollectable blocks. */
136 void GC_set_hdr_marks(hhdr)
137 register hdr * hhdr;
139 register int i;
141 for (i = 0; i < MARK_BITS_SZ; ++i) {
142 hhdr -> hb_marks[i] = ONES;
147 * Clear all mark bits associated with block h.
149 /*ARGSUSED*/
150 static void clear_marks_for_block(h, dummy)
151 struct hblk *h;
152 word dummy;
154 register hdr * hhdr = HDR(h);
156 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
157 /* Mark bit for these is cleared only once the object is */
158 /* explicitly deallocated. This either frees the block, or */
159 /* the bit is cleared once the object is on the free list. */
160 GC_clear_hdr_marks(hhdr);
163 /* Slow but general routines for setting/clearing/asking about mark bits */
164 void GC_set_mark_bit(p)
165 ptr_t p;
167 register struct hblk *h = HBLKPTR(p);
168 register hdr * hhdr = HDR(h);
169 register int word_no = (word *)p - (word *)h;
171 set_mark_bit_from_hdr(hhdr, word_no);
174 void GC_clear_mark_bit(p)
175 ptr_t p;
177 register struct hblk *h = HBLKPTR(p);
178 register hdr * hhdr = HDR(h);
179 register int word_no = (word *)p - (word *)h;
181 clear_mark_bit_from_hdr(hhdr, word_no);
184 GC_bool GC_is_marked(p)
185 ptr_t p;
187 register struct hblk *h = HBLKPTR(p);
188 register hdr * hhdr = HDR(h);
189 register int word_no = (word *)p - (word *)h;
191 return(mark_bit_from_hdr(hhdr, word_no));
196 * Clear mark bits in all allocated heap blocks. This invalidates
197 * the marker invariant, and sets GC_mark_state to reflect this.
198 * (This implicitly starts marking to reestablish the invariant.)
200 void GC_clear_marks()
202 GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
203 GC_objects_are_marked = FALSE;
204 GC_mark_state = MS_INVALID;
205 scan_ptr = 0;
206 # ifdef GATHERSTATS
207 /* Counters reflect currently marked objects: reset here */
208 GC_composite_in_use = 0;
209 GC_atomic_in_use = 0;
210 # endif
214 /* Initiate a garbage collection. Initiates a full collection if the */
215 /* mark state is invalid. */
216 /*ARGSUSED*/
217 void GC_initiate_gc()
219 if (GC_dirty_maintained) GC_read_dirty();
220 # ifdef STUBBORN_ALLOC
221 GC_read_changed();
222 # endif
223 # ifdef CHECKSUMS
225 extern void GC_check_dirty();
227 if (GC_dirty_maintained) GC_check_dirty();
229 # endif
230 # ifdef GATHERSTATS
231 GC_n_rescuing_pages = 0;
232 # endif
233 if (GC_mark_state == MS_NONE) {
234 GC_mark_state = MS_PUSH_RESCUERS;
235 } else if (GC_mark_state != MS_INVALID) {
236 ABORT("unexpected state");
237 } /* else this is really a full collection, and mark */
238 /* bits are invalid. */
239 scan_ptr = 0;
243 static void alloc_mark_stack();
245 /* Perform a small amount of marking. */
246 /* We try to touch roughly a page of memory. */
247 /* Return TRUE if we just finished a mark phase. */
248 /* Cold_gc_frame is an address inside a GC frame that */
249 /* remains valid until all marking is complete. */
250 /* A zero value indicates that it's OK to miss some */
251 /* register values. */
252 GC_bool GC_mark_some(cold_gc_frame)
253 ptr_t cold_gc_frame;
255 #ifdef MSWIN32
256 /* Windows 98 appears to asynchronously create and remove writable */
257 /* memory mappings, for reasons we haven't yet understood. Since */
258 /* we look for writable regions to determine the root set, we may */
259 /* try to mark from an address range that disappeared since we */
260 /* started the collection. Thus we have to recover from faults here. */
261 /* This code does not appear to be necessary for Windows 95/NT/2000. */
262 /* Note that this code should never generate an incremental GC write */
263 /* fault. */
264 __try {
265 #endif
266 switch(GC_mark_state) {
267 case MS_NONE:
268 return(FALSE);
270 case MS_PUSH_RESCUERS:
271 if (GC_mark_stack_top
272 >= GC_mark_stack + GC_mark_stack_size
273 - INITIAL_MARK_STACK_SIZE/2) {
274 /* Go ahead and mark, even though that might cause us to */
275 /* see more marked dirty objects later on. Avoid this */
276 /* in the future. */
277 GC_mark_stack_too_small = TRUE;
278 GC_mark_from_mark_stack();
279 return(FALSE);
280 } else {
281 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
282 if (scan_ptr == 0) {
283 # ifdef PRINTSTATS
284 GC_printf1("Marked from %lu dirty pages\n",
285 (unsigned long)GC_n_rescuing_pages);
286 # endif
287 GC_push_roots(FALSE, cold_gc_frame);
288 GC_objects_are_marked = TRUE;
289 if (GC_mark_state != MS_INVALID) {
290 GC_mark_state = MS_ROOTS_PUSHED;
294 return(FALSE);
296 case MS_PUSH_UNCOLLECTABLE:
297 if (GC_mark_stack_top
298 >= GC_mark_stack + INITIAL_MARK_STACK_SIZE/4) {
299 GC_mark_from_mark_stack();
300 return(FALSE);
301 } else {
302 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
303 if (scan_ptr == 0) {
304 GC_push_roots(TRUE, cold_gc_frame);
305 GC_objects_are_marked = TRUE;
306 if (GC_mark_state != MS_INVALID) {
307 GC_mark_state = MS_ROOTS_PUSHED;
311 return(FALSE);
313 case MS_ROOTS_PUSHED:
314 if (GC_mark_stack_top >= GC_mark_stack) {
315 GC_mark_from_mark_stack();
316 return(FALSE);
317 } else {
318 GC_mark_state = MS_NONE;
319 if (GC_mark_stack_too_small) {
320 alloc_mark_stack(2*GC_mark_stack_size);
322 return(TRUE);
325 case MS_INVALID:
326 case MS_PARTIALLY_INVALID:
327 if (!GC_objects_are_marked) {
328 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
329 return(FALSE);
331 if (GC_mark_stack_top >= GC_mark_stack) {
332 GC_mark_from_mark_stack();
333 return(FALSE);
335 if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
336 /* About to start a heap scan for marked objects. */
337 /* Mark stack is empty. OK to reallocate. */
338 if (GC_mark_stack_too_small) {
339 alloc_mark_stack(2*GC_mark_stack_size);
341 GC_mark_state = MS_PARTIALLY_INVALID;
343 scan_ptr = GC_push_next_marked(scan_ptr);
344 if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
345 GC_push_roots(TRUE, cold_gc_frame);
346 GC_objects_are_marked = TRUE;
347 if (GC_mark_state != MS_INVALID) {
348 GC_mark_state = MS_ROOTS_PUSHED;
351 return(FALSE);
352 default:
353 ABORT("GC_mark_some: bad state");
354 return(FALSE);
356 #ifdef MSWIN32
357 } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
358 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
359 # ifdef PRINTSTATS
360 GC_printf0("Caught ACCESS_VIOLATION in marker. "
361 "Memory mapping disappeared.\n");
362 # endif /* PRINTSTATS */
363 /* We have bad roots on the stack. Discard mark stack. */
364 /* Rescan from marked objects. Redetermine roots. */
365 GC_invalidate_mark_state();
366 scan_ptr = 0;
367 return FALSE;
369 #endif /* MSWIN32 */
373 GC_bool GC_mark_stack_empty()
375 return(GC_mark_stack_top < GC_mark_stack);
378 #ifdef PROF_MARKER
379 word GC_prof_array[10];
380 # define PROF(n) GC_prof_array[n]++
381 #else
382 # define PROF(n)
383 #endif
385 /* Given a pointer to someplace other than a small object page or the */
386 /* first page of a large object, return a pointer either to the */
387 /* start of the large object or NIL. */
388 /* In the latter case black list the address current. */
389 /* Returns NIL without black listing if current points to a block */
390 /* with IGNORE_OFF_PAGE set. */
391 /*ARGSUSED*/
392 # ifdef PRINT_BLACK_LIST
393 ptr_t GC_find_start(current, hhdr, source)
394 word source;
395 # else
396 ptr_t GC_find_start(current, hhdr)
397 # define source 0
398 # endif
399 register ptr_t current;
400 register hdr * hhdr;
402 # ifdef ALL_INTERIOR_POINTERS
403 if (hhdr != 0) {
404 register ptr_t orig = current;
406 current = (ptr_t)HBLKPTR(current) + HDR_BYTES;
407 do {
408 current = current - HBLKSIZE*(word)hhdr;
409 hhdr = HDR(current);
410 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
411 /* current points to the start of the large object */
412 if (hhdr -> hb_flags & IGNORE_OFF_PAGE) return(0);
413 if ((word *)orig - (word *)current
414 >= (ptrdiff_t)(hhdr->hb_sz)) {
415 /* Pointer past the end of the block */
416 GC_ADD_TO_BLACK_LIST_NORMAL(orig, source);
417 return(0);
419 return(current);
420 } else {
421 GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
422 return(0);
424 # else
425 GC_ADD_TO_BLACK_LIST_NORMAL(current, source);
426 return(0);
427 # endif
428 # undef source
431 void GC_invalidate_mark_state()
433 GC_mark_state = MS_INVALID;
434 GC_mark_stack_top = GC_mark_stack-1;
437 mse * GC_signal_mark_stack_overflow(msp)
438 mse * msp;
440 GC_mark_state = MS_INVALID;
441 GC_mark_stack_too_small = TRUE;
442 # ifdef PRINTSTATS
443 GC_printf1("Mark stack overflow; current size = %lu entries\n",
444 GC_mark_stack_size);
445 # endif
446 return(msp-INITIAL_MARK_STACK_SIZE/8);
451 * Mark objects pointed to by the regions described by
452 * mark stack entries between GC_mark_stack and GC_mark_stack_top,
453 * inclusive. Assumes the upper limit of a mark stack entry
454 * is never 0. A mark stack entry never has size 0.
455 * We try to traverse on the order of a hblk of memory before we return.
456 * Caller is responsible for calling this until the mark stack is empty.
457 * Note that this is the most performance critical routine in the
458 * collector. Hence it contains all sorts of ugly hacks to speed
459 * things up. In particular, we avoid procedure calls on the common
460 * path, we take advantage of peculiarities of the mark descriptor
461 * encoding, we optionally maintain a cache for the block address to
462 * header mapping, we prefetch when an object is "grayed", etc.
464 void GC_mark_from_mark_stack()
466 mse * GC_mark_stack_reg = GC_mark_stack;
467 mse * GC_mark_stack_top_reg = GC_mark_stack_top;
468 mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
469 int credit = HBLKSIZE; /* Remaining credit for marking work */
470 register word * current_p; /* Pointer to current candidate ptr. */
471 register word current; /* Candidate pointer. */
472 register word * limit; /* (Incl) limit of current candidate */
473 /* range */
474 register word descr;
475 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
476 register ptr_t least_ha = GC_least_plausible_heap_addr;
477 DECLARE_HDR_CACHE;
479 # define SPLIT_RANGE_WORDS 128 /* Must be power of 2. */
481 GC_objects_are_marked = TRUE;
482 INIT_HDR_CACHE;
483 # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
484 while (GC_mark_stack_top_reg >= GC_mark_stack_reg && credit >= 0) {
485 # else
486 while ((((ptr_t)GC_mark_stack_top_reg - (ptr_t)GC_mark_stack_reg) | credit)
487 >= 0) {
488 # endif
489 current_p = GC_mark_stack_top_reg -> mse_start;
490 descr = GC_mark_stack_top_reg -> mse_descr;
491 retry:
492 /* current_p and descr describe the current object. */
493 /* *GC_mark_stack_top_reg is vacant. */
494 /* The following is 0 only for small objects described by a simple */
495 /* length descriptor. For many applications this is the common */
496 /* case, so we try to detect it quickly. */
497 if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | DS_TAGS)) {
498 word tag = descr & DS_TAGS;
500 switch(tag) {
501 case DS_LENGTH:
502 /* Large length. */
503 /* Process part of the range to avoid pushing too much on the */
504 /* stack. */
505 GC_mark_stack_top_reg -> mse_start =
506 limit = current_p + SPLIT_RANGE_WORDS-1;
507 GC_mark_stack_top_reg -> mse_descr =
508 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
509 /* Make sure that pointers overlapping the two ranges are */
510 /* considered. */
511 limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
512 break;
513 case DS_BITMAP:
514 GC_mark_stack_top_reg--;
515 descr &= ~DS_TAGS;
516 credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
517 while (descr != 0) {
518 if ((signed_word)descr < 0) {
519 current = *current_p;
520 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
521 PREFETCH(current);
522 HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg,
523 mark_stack_limit, current_p, exit1);
526 descr <<= 1;
527 ++ current_p;
529 continue;
530 case DS_PROC:
531 GC_mark_stack_top_reg--;
532 credit -= PROC_BYTES;
533 #ifdef GC_DEBUG
534 current_p = GC_debug_object_start(current_p);
535 #endif
536 GC_mark_stack_top_reg =
537 (*PROC(descr))
538 (current_p, GC_mark_stack_top_reg,
539 mark_stack_limit, ENV(descr));
540 continue;
541 case DS_PER_OBJECT:
542 if ((signed_word)descr >= 0) {
543 /* Descriptor is in the object. */
544 descr = *(word *)((ptr_t)current_p + descr - DS_PER_OBJECT);
545 } else {
546 /* Descriptor is in type descriptor pointed to by first */
547 /* word in object. */
548 ptr_t type_descr = *(ptr_t *)current_p;
549 /* type_descr is either a valid pointer to the descriptor */
550 /* structure, or this object was on a free list. If it */
551 /* it was anything but the last object on the free list, */
552 /* we will misinterpret the next object on the free list as */
553 /* the type descriptor, and get a 0 GC descriptor, which */
554 /* is ideal. Unfortunately, we need to check for the last */
555 /* object case explicitly. */
556 if (0 == type_descr) {
557 /* Rarely executed. */
558 GC_mark_stack_top_reg--;
559 continue;
561 descr = *(word *)(type_descr
562 - (descr - (DS_PER_OBJECT - INDIR_PER_OBJ_BIAS)));
564 goto retry;
566 } else /* Small object with length descriptor */ {
567 GC_mark_stack_top_reg--;
568 limit = (word *)(((ptr_t)current_p) + (word)descr);
570 /* The simple case in which we're scanning a range. */
571 credit -= (ptr_t)limit - (ptr_t)current_p;
572 limit -= 1;
574 # define PREF_DIST 4
576 # ifndef SMALL_CONFIG
577 word deferred;
579 /* Try to prefetch the next pointer to be examined asap. */
580 /* Empirically, this also seems to help slightly without */
581 /* prefetches, at least on linux/X86. Presumably this loop */
582 /* ends up with less register pressure, and gcc thus ends up */
583 /* generating slightly better code. Overall gcc code quality */
584 /* for this loop is still not great. */
585 for(;;) {
586 PREFETCH((ptr_t)limit - PREF_DIST*CACHE_LINE_SIZE);
587 deferred = *limit;
588 limit = (word *)((char *)limit - ALIGNMENT);
589 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
590 PREFETCH(deferred);
591 break;
593 if (current_p > limit) goto next_object;
594 /* Unroll once, so we don't do too many of the prefetches */
595 /* based on limit. */
596 deferred = *limit;
597 limit = (word *)((char *)limit - ALIGNMENT);
598 if ((ptr_t)deferred >= least_ha && (ptr_t)deferred < greatest_ha) {
599 PREFETCH(deferred);
600 break;
602 if (current_p > limit) goto next_object;
604 # endif
606 while (current_p <= limit) {
607 /* Empirically, unrolling this loop doesn't help a lot. */
608 /* Since HC_PUSH_CONTENTS expands to a lot of code, */
609 /* we don't. */
610 current = *current_p;
611 PREFETCH((ptr_t)current_p + PREF_DIST*CACHE_LINE_SIZE);
612 if ((ptr_t)current >= least_ha && (ptr_t)current < greatest_ha) {
613 /* Prefetch the contents of the object we just pushed. It's */
614 /* likely we will need them soon. */
615 PREFETCH(current);
616 HC_PUSH_CONTENTS((ptr_t)current, GC_mark_stack_top_reg,
617 mark_stack_limit, current_p, exit2);
619 current_p = (word *)((char *)current_p + ALIGNMENT);
622 # ifndef SMALL_CONFIG
623 /* We still need to mark the entry we previously prefetched. */
624 /* We alrady know that it passes the preliminary pointer */
625 /* validity test. */
626 HC_PUSH_CONTENTS((ptr_t)deferred, GC_mark_stack_top_reg,
627 mark_stack_limit, current_p, exit4);
628 next_object:;
629 # endif
632 GC_mark_stack_top = GC_mark_stack_top_reg;
635 /* Allocate or reallocate space for mark stack of size s words */
636 /* May silently fail. */
637 static void alloc_mark_stack(n)
638 word n;
640 mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct ms_entry));
642 GC_mark_stack_too_small = FALSE;
643 if (GC_mark_stack_size != 0) {
644 if (new_stack != 0) {
645 word displ = (word)GC_mark_stack & (GC_page_size - 1);
646 signed_word size = GC_mark_stack_size * sizeof(struct ms_entry);
648 /* Recycle old space */
649 if (0 != displ) displ = GC_page_size - displ;
650 size = (size - displ) & ~(GC_page_size - 1);
651 if (size > 0) {
652 GC_add_to_heap((struct hblk *)
653 ((word)GC_mark_stack + displ), (word)size);
655 GC_mark_stack = new_stack;
656 GC_mark_stack_size = n;
657 # ifdef PRINTSTATS
658 GC_printf1("Grew mark stack to %lu frames\n",
659 (unsigned long) GC_mark_stack_size);
660 # endif
661 } else {
662 # ifdef PRINTSTATS
663 GC_printf1("Failed to grow mark stack to %lu frames\n",
664 (unsigned long) n);
665 # endif
667 } else {
668 if (new_stack == 0) {
669 GC_err_printf0("No space for mark stack\n");
670 EXIT();
672 GC_mark_stack = new_stack;
673 GC_mark_stack_size = n;
675 GC_mark_stack_top = GC_mark_stack-1;
678 void GC_mark_init()
680 alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
684 * Push all locations between b and t onto the mark stack.
685 * b is the first location to be checked. t is one past the last
686 * location to be checked.
687 * Should only be used if there is no possibility of mark stack
688 * overflow.
690 void GC_push_all(bottom, top)
691 ptr_t bottom;
692 ptr_t top;
694 register word length;
696 bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
697 top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
698 if (top == 0 || bottom == top) return;
699 GC_mark_stack_top++;
700 if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
701 ABORT("unexpected mark stack overflow");
703 length = top - bottom;
704 # if DS_TAGS > ALIGNMENT - 1
705 length += DS_TAGS;
706 length &= ~DS_TAGS;
707 # endif
708 GC_mark_stack_top -> mse_start = (word *)bottom;
709 GC_mark_stack_top -> mse_descr = length;
713 * Analogous to the above, but push only those pages that may have been
714 * dirtied. A block h is assumed dirty if dirty_fn(h) != 0.
715 * We use push_fn to actually push the block.
716 * Will not overflow mark stack if push_fn pushes a small fixed number
717 * of entries. (This is invoked only if push_fn pushes a single entry,
718 * or if it marks each object before pushing it, thus ensuring progress
719 * in the event of a stack overflow.)
721 void GC_push_dirty(bottom, top, dirty_fn, push_fn)
722 ptr_t bottom;
723 ptr_t top;
724 int (*dirty_fn)(/* struct hblk * h */);
725 void (*push_fn)(/* ptr_t bottom, ptr_t top */);
727 register struct hblk * h;
729 bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
730 top = (ptr_t)(((long) top) & ~(ALIGNMENT-1));
732 if (top == 0 || bottom == top) return;
733 h = HBLKPTR(bottom + HBLKSIZE);
734 if (top <= (ptr_t) h) {
735 if ((*dirty_fn)(h-1)) {
736 (*push_fn)(bottom, top);
738 return;
740 if ((*dirty_fn)(h-1)) {
741 (*push_fn)(bottom, (ptr_t)h);
743 while ((ptr_t)(h+1) <= top) {
744 if ((*dirty_fn)(h)) {
745 if ((word)(GC_mark_stack_top - GC_mark_stack)
746 > 3 * GC_mark_stack_size / 4) {
747 /* Danger of mark stack overflow */
748 (*push_fn)((ptr_t)h, top);
749 return;
750 } else {
751 (*push_fn)((ptr_t)h, (ptr_t)(h+1));
754 h++;
756 if ((ptr_t)h != top) {
757 if ((*dirty_fn)(h)) {
758 (*push_fn)((ptr_t)h, top);
761 if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size) {
762 ABORT("unexpected mark stack overflow");
766 # ifndef SMALL_CONFIG
767 void GC_push_conditional(bottom, top, all)
768 ptr_t bottom;
769 ptr_t top;
770 int all;
772 if (all) {
773 if (GC_dirty_maintained) {
774 # ifdef PROC_VDB
775 /* Pages that were never dirtied cannot contain pointers */
776 GC_push_dirty(bottom, top, GC_page_was_ever_dirty, GC_push_all);
777 # else
778 GC_push_all(bottom, top);
779 # endif
780 } else {
781 GC_push_all(bottom, top);
783 } else {
784 GC_push_dirty(bottom, top, GC_page_was_dirty, GC_push_all);
787 #endif
789 # ifdef MSWIN32
790 void __cdecl GC_push_one(p)
791 # else
792 void GC_push_one(p)
793 # endif
794 word p;
796 # ifdef NURSERY
797 if (0 != GC_push_proc) {
798 GC_push_proc(p);
799 return;
801 # endif
802 GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
805 # ifdef __STDC__
806 # define BASE(p) (word)GC_base((void *)(p))
807 # else
808 # define BASE(p) (word)GC_base((char *)(p))
809 # endif
811 /* As above, but argument passed preliminary test. */
812 # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
813 void GC_push_one_checked(p, interior_ptrs, source)
814 ptr_t source;
815 # else
816 void GC_push_one_checked(p, interior_ptrs)
817 # define source 0
818 # endif
819 register word p;
820 register GC_bool interior_ptrs;
822 register word r;
823 register hdr * hhdr;
824 register int displ;
826 GET_HDR(p, hhdr);
827 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
828 if (hhdr != 0 && interior_ptrs) {
829 r = BASE(p);
830 hhdr = HDR(r);
831 displ = BYTES_TO_WORDS(HBLKDISPL(r));
832 } else {
833 hhdr = 0;
835 } else {
836 register map_entry_type map_entry;
838 displ = HBLKDISPL(p);
839 map_entry = MAP_ENTRY((hhdr -> hb_map), displ);
840 if (map_entry == OBJ_INVALID) {
841 # ifndef ALL_INTERIOR_POINTERS
842 if (interior_ptrs) {
843 r = BASE(p);
844 displ = BYTES_TO_WORDS(HBLKDISPL(r));
845 if (r == 0) hhdr = 0;
846 } else {
847 hhdr = 0;
849 # else
850 /* map already reflects interior pointers */
851 hhdr = 0;
852 # endif
853 } else {
854 displ = BYTES_TO_WORDS(displ);
855 displ -= map_entry;
856 r = (word)((word *)(HBLKPTR(p)) + displ);
859 /* If hhdr != 0 then r == GC_base(p), only we did it faster. */
860 /* displ is the word index within the block. */
861 if (hhdr == 0) {
862 if (interior_ptrs) {
863 # ifdef PRINT_BLACK_LIST
864 GC_add_to_black_list_stack(p, source);
865 # else
866 GC_add_to_black_list_stack(p);
867 # endif
868 } else {
869 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
870 # undef source /* In case we had to define it. */
872 } else {
873 if (!mark_bit_from_hdr(hhdr, displ)) {
874 set_mark_bit_from_hdr(hhdr, displ);
875 GC_STORE_BACK_PTR(source, (ptr_t)r);
876 PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top,
877 &(GC_mark_stack[GC_mark_stack_size]));
882 # ifdef TRACE_BUF
884 # define TRACE_ENTRIES 1000
886 struct trace_entry {
887 char * kind;
888 word gc_no;
889 word words_allocd;
890 word arg1;
891 word arg2;
892 } GC_trace_buf[TRACE_ENTRIES];
894 int GC_trace_buf_ptr = 0;
896 void GC_add_trace_entry(char *kind, word arg1, word arg2)
898 GC_trace_buf[GC_trace_buf_ptr].kind = kind;
899 GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
900 GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
901 GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
902 GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
903 GC_trace_buf_ptr++;
904 if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
907 void GC_print_trace(word gc_no, GC_bool lock)
909 int i;
910 struct trace_entry *p;
912 if (lock) LOCK();
913 for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
914 if (i < 0) i = TRACE_ENTRIES-1;
915 p = GC_trace_buf + i;
916 if (p -> gc_no < gc_no || p -> kind == 0) return;
917 printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
918 p -> kind, p -> gc_no, p -> words_allocd,
919 (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
921 printf("Trace incomplete\n");
922 if (lock) UNLOCK();
925 # endif /* TRACE_BUF */
928 * A version of GC_push_all that treats all interior pointers as valid
929 * and scans the entire region immediately, in case the contents
930 * change.
932 void GC_push_all_eager(bottom, top)
933 ptr_t bottom;
934 ptr_t top;
936 word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
937 word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
938 register word *p;
939 register word q;
940 register word *lim;
941 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
942 register ptr_t least_ha = GC_least_plausible_heap_addr;
943 # define GC_greatest_plausible_heap_addr greatest_ha
944 # define GC_least_plausible_heap_addr least_ha
946 if (top == 0) return;
947 /* check all pointers in range and put in push if they appear */
948 /* to be valid. */
949 lim = t - 1 /* longword */;
950 for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) {
951 q = *p;
952 GC_PUSH_ONE_STACK(q, p);
954 # undef GC_greatest_plausible_heap_addr
955 # undef GC_least_plausible_heap_addr
958 #ifndef THREADS
960 * A version of GC_push_all that treats all interior pointers as valid
961 * and scans part of the area immediately, to make sure that saved
962 * register values are not lost.
963 * Cold_gc_frame delimits the stack section that must be scanned
964 * eagerly. A zero value indicates that no eager scanning is needed.
966 void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)
967 ptr_t bottom;
968 ptr_t top;
969 ptr_t cold_gc_frame;
971 # ifdef ALL_INTERIOR_POINTERS
972 # define EAGER_BYTES 1024
973 /* Push the hot end of the stack eagerly, so that register values */
974 /* saved inside GC frames are marked before they disappear. */
975 /* The rest of the marking can be deferred until later. */
976 if (0 == cold_gc_frame) {
977 GC_push_all_stack(bottom, top);
978 return;
980 # ifdef STACK_GROWS_DOWN
981 GC_push_all_eager(bottom, cold_gc_frame);
982 GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
983 # else /* STACK_GROWS_UP */
984 GC_push_all_eager(cold_gc_frame, top);
985 GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
986 # endif /* STACK_GROWS_UP */
987 # else
988 GC_push_all_eager(bottom, top);
989 # endif
990 # ifdef TRACE_BUF
991 GC_add_trace_entry("GC_push_all_stack", bottom, top);
992 # endif
994 #endif /* !THREADS */
996 void GC_push_all_stack(bottom, top)
997 ptr_t bottom;
998 ptr_t top;
1000 # ifdef ALL_INTERIOR_POINTERS
1001 GC_push_all(bottom, top);
1002 # else
1003 GC_push_all_eager(bottom, top);
1004 # endif
1007 #ifndef SMALL_CONFIG
1008 /* Push all objects reachable from marked objects in the given block */
1009 /* of size 1 objects. */
1010 void GC_push_marked1(h, hhdr)
1011 struct hblk *h;
1012 register hdr * hhdr;
1014 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
1015 register word *p;
1016 word *plim;
1017 register int i;
1018 register word q;
1019 register word mark_word;
1020 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1021 register ptr_t least_ha = GC_least_plausible_heap_addr;
1022 # define GC_greatest_plausible_heap_addr greatest_ha
1023 # define GC_least_plausible_heap_addr least_ha
1025 p = (word *)(h->hb_body);
1026 plim = (word *)(((word)h) + HBLKSIZE);
1028 /* go through all words in block */
1029 while( p < plim ) {
1030 mark_word = *mark_word_addr++;
1031 i = 0;
1032 while(mark_word != 0) {
1033 if (mark_word & 1) {
1034 q = p[i];
1035 GC_PUSH_ONE_HEAP(q, p + i);
1037 i++;
1038 mark_word >>= 1;
1040 p += WORDSZ;
1042 # undef GC_greatest_plausible_heap_addr
1043 # undef GC_least_plausible_heap_addr
1047 #ifndef UNALIGNED
1049 /* Push all objects reachable from marked objects in the given block */
1050 /* of size 2 objects. */
1051 void GC_push_marked2(h, hhdr)
1052 struct hblk *h;
1053 register hdr * hhdr;
1055 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
1056 register word *p;
1057 word *plim;
1058 register int i;
1059 register word q;
1060 register word mark_word;
1061 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1062 register ptr_t least_ha = GC_least_plausible_heap_addr;
1063 # define GC_greatest_plausible_heap_addr greatest_ha
1064 # define GC_least_plausible_heap_addr least_ha
1066 p = (word *)(h->hb_body);
1067 plim = (word *)(((word)h) + HBLKSIZE);
1069 /* go through all words in block */
1070 while( p < plim ) {
1071 mark_word = *mark_word_addr++;
1072 i = 0;
1073 while(mark_word != 0) {
1074 if (mark_word & 1) {
1075 q = p[i];
1076 GC_PUSH_ONE_HEAP(q, p + i);
1077 q = p[i+1];
1078 GC_PUSH_ONE_HEAP(q, p + i);
1080 i += 2;
1081 mark_word >>= 2;
1083 p += WORDSZ;
1085 # undef GC_greatest_plausible_heap_addr
1086 # undef GC_least_plausible_heap_addr
1089 /* Push all objects reachable from marked objects in the given block */
1090 /* of size 4 objects. */
1091 /* There is a risk of mark stack overflow here. But we handle that. */
1092 /* And only unmarked objects get pushed, so it's not very likely. */
1093 void GC_push_marked4(h, hhdr)
1094 struct hblk *h;
1095 register hdr * hhdr;
1097 word * mark_word_addr = &(hhdr->hb_marks[divWORDSZ(HDR_WORDS)]);
1098 register word *p;
1099 word *plim;
1100 register int i;
1101 register word q;
1102 register word mark_word;
1103 register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
1104 register ptr_t least_ha = GC_least_plausible_heap_addr;
1105 # define GC_greatest_plausible_heap_addr greatest_ha
1106 # define GC_least_plausible_heap_addr least_ha
1108 p = (word *)(h->hb_body);
1109 plim = (word *)(((word)h) + HBLKSIZE);
1111 /* go through all words in block */
1112 while( p < plim ) {
1113 mark_word = *mark_word_addr++;
1114 i = 0;
1115 while(mark_word != 0) {
1116 if (mark_word & 1) {
1117 q = p[i];
1118 GC_PUSH_ONE_HEAP(q, p + i);
1119 q = p[i+1];
1120 GC_PUSH_ONE_HEAP(q, p + i + 1);
1121 q = p[i+2];
1122 GC_PUSH_ONE_HEAP(q, p + i + 2);
1123 q = p[i+3];
1124 GC_PUSH_ONE_HEAP(q, p + i + 3);
1126 i += 4;
1127 mark_word >>= 4;
1129 p += WORDSZ;
1131 # undef GC_greatest_plausible_heap_addr
1132 # undef GC_least_plausible_heap_addr
1135 #endif /* UNALIGNED */
1137 #endif /* SMALL_CONFIG */
1139 /* Push all objects reachable from marked objects in the given block */
1140 void GC_push_marked(h, hhdr)
1141 struct hblk *h;
1142 register hdr * hhdr;
1144 register int sz = hhdr -> hb_sz;
1145 register int descr = hhdr -> hb_descr;
1146 register word * p;
1147 register int word_no;
1148 register word * lim;
1149 register mse * GC_mark_stack_top_reg;
1150 register mse * mark_stack_limit = &(GC_mark_stack[GC_mark_stack_size]);
1152 /* Some quick shortcuts: */
1153 if ((0 | DS_LENGTH) == descr) return;
1154 if (GC_block_empty(hhdr)/* nothing marked */) return;
1155 # ifdef GATHERSTATS
1156 GC_n_rescuing_pages++;
1157 # endif
1158 GC_objects_are_marked = TRUE;
1159 if (sz > MAXOBJSZ) {
1160 lim = (word *)h + HDR_WORDS;
1161 } else {
1162 lim = (word *)(h + 1) - sz;
1165 switch(sz) {
1166 # if !defined(SMALL_CONFIG)
1167 case 1:
1168 GC_push_marked1(h, hhdr);
1169 break;
1170 # endif
1171 # if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
1172 case 2:
1173 GC_push_marked2(h, hhdr);
1174 break;
1175 case 4:
1176 GC_push_marked4(h, hhdr);
1177 break;
1178 # endif
1179 default:
1180 GC_mark_stack_top_reg = GC_mark_stack_top;
1181 for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
1182 p += sz, word_no += sz) {
1183 if (mark_bit_from_hdr(hhdr, word_no)) {
1184 /* Mark from fields inside the object */
1185 PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
1186 # ifdef GATHERSTATS
1187 /* Subtract this object from total, since it was */
1188 /* added in twice. */
1189 GC_composite_in_use -= sz;
1190 # endif
1193 GC_mark_stack_top = GC_mark_stack_top_reg;
1197 #ifndef SMALL_CONFIG
1198 /* Test whether any page in the given block is dirty */
1199 GC_bool GC_block_was_dirty(h, hhdr)
1200 struct hblk *h;
1201 register hdr * hhdr;
1203 register int sz = hhdr -> hb_sz;
1205 if (sz < MAXOBJSZ) {
1206 return(GC_page_was_dirty(h));
1207 } else {
1208 register ptr_t p = (ptr_t)h;
1209 sz += HDR_WORDS;
1210 sz = WORDS_TO_BYTES(sz);
1211 while (p < (ptr_t)h + sz) {
1212 if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
1213 p += HBLKSIZE;
1215 return(FALSE);
1218 #endif /* SMALL_CONFIG */
1220 /* Similar to GC_push_next_marked, but return address of next block */
1221 struct hblk * GC_push_next_marked(h)
1222 struct hblk *h;
1224 register hdr * hhdr;
1226 h = GC_next_used_block(h);
1227 if (h == 0) return(0);
1228 hhdr = HDR(h);
1229 GC_push_marked(h, hhdr);
1230 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1233 #ifndef SMALL_CONFIG
1234 /* Identical to above, but mark only from dirty pages */
1235 struct hblk * GC_push_next_marked_dirty(h)
1236 struct hblk *h;
1238 register hdr * hhdr;
1240 if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }
1241 for (;;) {
1242 h = GC_next_used_block(h);
1243 if (h == 0) return(0);
1244 hhdr = HDR(h);
1245 # ifdef STUBBORN_ALLOC
1246 if (hhdr -> hb_obj_kind == STUBBORN) {
1247 if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
1248 break;
1250 } else {
1251 if (GC_block_was_dirty(h, hhdr)) break;
1253 # else
1254 if (GC_block_was_dirty(h, hhdr)) break;
1255 # endif
1256 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1258 GC_push_marked(h, hhdr);
1259 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
1261 #endif
1263 /* Similar to above, but for uncollectable pages. Needed since we */
1264 /* do not clear marks for such pages, even for full collections. */
1265 struct hblk * GC_push_next_marked_uncollectable(h)
1266 struct hblk *h;
1268 register hdr * hhdr = HDR(h);
1270 for (;;) {
1271 h = GC_next_used_block(h);
1272 if (h == 0) return(0);
1273 hhdr = HDR(h);
1274 if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;
1275 h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
1277 GC_push_marked(h, hhdr);
1278 return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));