2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
22 GC_bool GC_use_entire_heap
= 0;
25 * Free heap blocks are kept on one of several free lists,
26 * depending on the size of the block. Each free list is doubly linked.
27 * Adjacent free blocks are coalesced.
31 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
32 /* largest block we will allocate starting on a black */
33 /* listed block. Must be >= HBLKSIZE. */
36 # define UNIQUE_THRESHOLD 32
37 /* Sizes up to this many HBLKs each have their own free list */
38 # define HUGE_THRESHOLD 256
39 /* Sizes of at least this many heap blocks are mapped to a */
40 /* single free list. */
41 # define FL_COMPRESSION 8
42 /* In between sizes map this many distinct sizes to a single */
45 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
48 struct hblk
* GC_hblkfreelist
[N_HBLK_FLS
+1] = { 0 };
50 /* Map a number of blocks to the appropriate large block free list index. */
51 int GC_hblk_fl_from_blocks(blocks_needed
)
54 if (blocks_needed
<= UNIQUE_THRESHOLD
) return blocks_needed
;
55 if (blocks_needed
>= HUGE_THRESHOLD
) return N_HBLK_FLS
;
56 return (blocks_needed
- UNIQUE_THRESHOLD
)/FL_COMPRESSION
61 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
62 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
63 # define NHDR(hhdr) HDR(hhdr -> hb_next)
66 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
67 # else /* !USE_MMAP */
68 # define IS_MAPPED(hhdr) 1
69 # endif /* USE_MUNMAP */
71 # if !defined(NO_DEBUGGING)
72 void GC_print_hblkfreelist()
80 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
81 h
= GC_hblkfreelist
[i
];
82 if (0 != h
) GC_printf1("Free list %ld:\n", (unsigned long)i
);
86 GC_printf2("\t0x%lx size %lu ", (unsigned long)h
, (unsigned long)sz
);
88 if (GC_is_black_listed(h
, HBLKSIZE
) != 0) {
89 GC_printf0("start black listed\n");
90 } else if (GC_is_black_listed(h
, hhdr
-> hb_sz
) != 0) {
91 GC_printf0("partially black listed\n");
93 GC_printf0("not black listed\n");
98 if (total_free
!= GC_large_free_bytes
) {
99 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
100 (unsigned long) GC_large_free_bytes
);
102 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free
);
105 /* Return the free list index on which the block described by the header */
106 /* appears, or -1 if it appears nowhere. */
107 int free_list_index_of(wanted
)
114 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
115 h
= GC_hblkfreelist
[i
];
118 if (hhdr
== wanted
) return i
;
125 void GC_dump_regions()
132 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
133 start
= GC_heap_sects
[i
].hs_start
;
134 bytes
= GC_heap_sects
[i
].hs_bytes
;
136 /* Merge in contiguous sections. */
137 while (i
+1 < GC_n_heap_sects
&& GC_heap_sects
[i
+1].hs_start
== end
) {
139 end
= GC_heap_sects
[i
].hs_start
+ GC_heap_sects
[i
].hs_bytes
;
141 GC_printf2("***Section from 0x%lx to 0x%lx\n", start
, end
);
142 for (p
= start
; p
< end
;) {
144 GC_printf1("\t0x%lx ", (unsigned long)p
);
145 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
146 GC_printf1("Missing header!!\n", hhdr
);
150 if (HBLK_IS_FREE(hhdr
)) {
151 int correct_index
= GC_hblk_fl_from_blocks(
152 divHBLKSZ(hhdr
-> hb_sz
));
155 GC_printf1("\tfree block of size 0x%lx bytes",
156 (unsigned long)(hhdr
-> hb_sz
));
157 if (IS_MAPPED(hhdr
)) {
160 GC_printf0("(unmapped)\n");
162 actual_index
= free_list_index_of(hhdr
);
163 if (-1 == actual_index
) {
164 GC_printf1("\t\tBlock not on free list %ld!!\n",
166 } else if (correct_index
!= actual_index
) {
167 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
168 actual_index
, correct_index
);
172 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
173 (unsigned long)WORDS_TO_BYTES(hhdr
-> hb_sz
));
174 p
+= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
180 # endif /* NO_DEBUGGING */
182 /* Initialize hdr for a block containing the indicated size and */
183 /* kind of objects. */
184 /* Return FALSE on failure. */
185 static GC_bool
setup_header(hhdr
, sz
, kind
, flags
)
187 word sz
; /* object size in words */
193 /* Add description of valid object pointers */
194 if (!GC_add_map_entry(sz
)) return(FALSE
);
195 hhdr
-> hb_map
= GC_obj_map
[sz
> MAXOBJSZ
? 0 : sz
];
197 /* Set size, kind and mark proc fields */
199 hhdr
-> hb_obj_kind
= kind
;
200 hhdr
-> hb_flags
= flags
;
201 descr
= GC_obj_kinds
[kind
].ok_descriptor
;
202 if (GC_obj_kinds
[kind
].ok_relocate_descr
) descr
+= WORDS_TO_BYTES(sz
);
203 hhdr
-> hb_descr
= descr
;
205 /* Clear mark bits */
206 GC_clear_hdr_marks(hhdr
);
208 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
212 #define FL_UNKNOWN -1
214 * Remove hhdr from the appropriate free list.
215 * We assume it is on the nth free list, or on the size
216 * appropriate free list if n is FL_UNKNOWN.
218 void GC_remove_from_fl(hhdr
, n
)
222 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
223 if (hhdr
-> hb_prev
== 0) {
225 if (FL_UNKNOWN
== n
) {
226 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
230 GC_ASSERT(HDR(GC_hblkfreelist
[index
]) == hhdr
);
231 GC_hblkfreelist
[index
] = hhdr
-> hb_next
;
234 GET_HDR(hhdr
-> hb_prev
, phdr
);
235 phdr
-> hb_next
= hhdr
-> hb_next
;
237 if (0 != hhdr
-> hb_next
) {
239 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr
)));
240 GET_HDR(hhdr
-> hb_next
, nhdr
);
241 nhdr
-> hb_prev
= hhdr
-> hb_prev
;
246 * Return a pointer to the free block ending just before h, if any.
248 struct hblk
* GC_free_block_ending_at(h
)
251 struct hblk
* p
= h
- 1;
255 while (0 != phdr
&& IS_FORWARDING_ADDR_OR_NIL(phdr
)) {
256 p
= FORWARDED_ADDR(p
,phdr
);
260 if(HBLK_IS_FREE(phdr
)) {
266 p
= GC_prev_block(h
- 1);
269 if (HBLK_IS_FREE(phdr
) && (ptr_t
)p
+ phdr
-> hb_sz
== (ptr_t
)h
) {
277 * Add hhdr to the appropriate free list.
278 * We maintain individual free lists sorted by address.
280 void GC_add_to_fl(h
, hhdr
)
284 int index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
285 struct hblk
*second
= GC_hblkfreelist
[index
];
287 # ifdef GC_ASSERTIONS
288 struct hblk
*next
= (struct hblk
*)((word
)h
+ hhdr
-> hb_sz
);
289 hdr
* nexthdr
= HDR(next
);
290 struct hblk
*prev
= GC_free_block_ending_at(h
);
291 hdr
* prevhdr
= HDR(prev
);
292 GC_ASSERT(nexthdr
== 0 || !HBLK_IS_FREE(nexthdr
) || !IS_MAPPED(nexthdr
));
293 GC_ASSERT(prev
== 0 || !HBLK_IS_FREE(prevhdr
) || !IS_MAPPED(prevhdr
));
295 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
296 GC_hblkfreelist
[index
] = h
;
297 hhdr
-> hb_next
= second
;
300 GET_HDR(second
, second_hdr
);
301 second_hdr
-> hb_prev
= h
;
303 GC_invalidate_map(hhdr
);
308 /* Unmap blocks that haven't been recently touched. This is the only way */
309 /* way blocks are ever unmapped. */
310 void GC_unmap_old(void)
315 unsigned short last_rec
, threshold
;
317 # define UNMAP_THRESHOLD 6
319 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
320 for (h
= GC_hblkfreelist
[i
]; 0 != h
; h
= hhdr
-> hb_next
) {
322 if (!IS_MAPPED(hhdr
)) continue;
323 threshold
= (unsigned short)(GC_gc_no
- UNMAP_THRESHOLD
);
324 last_rec
= hhdr
-> hb_last_reclaimed
;
325 if (last_rec
> GC_gc_no
326 || last_rec
< threshold
&& threshold
< GC_gc_no
327 /* not recently wrapped */) {
329 GC_unmap((ptr_t
)h
, sz
);
330 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
336 /* Merge all unmapped blocks that are adjacent to other free */
337 /* blocks. This may involve remapping, since all blocks are either */
338 /* fully mapped or fully unmapped. */
339 void GC_merge_unmapped(void)
341 struct hblk
* h
, *next
;
342 hdr
* hhdr
, *nexthdr
;
346 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
347 h
= GC_hblkfreelist
[i
];
351 next
= (struct hblk
*)((word
)h
+ size
);
352 GET_HDR(next
, nexthdr
);
353 /* Coalesce with successor, if possible */
354 if (0 != nexthdr
&& HBLK_IS_FREE(nexthdr
)) {
355 nextsize
= nexthdr
-> hb_sz
;
356 if (IS_MAPPED(hhdr
)) {
357 GC_ASSERT(!IS_MAPPED(nexthdr
));
358 /* make both consistent, so that we can merge */
359 if (size
> nextsize
) {
360 GC_remap((ptr_t
)next
, nextsize
);
362 GC_unmap((ptr_t
)h
, size
);
363 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
365 } else if (IS_MAPPED(nexthdr
)) {
366 GC_ASSERT(!IS_MAPPED(hhdr
));
367 if (size
> nextsize
) {
368 GC_unmap((ptr_t
)next
, nextsize
);
370 GC_remap((ptr_t
)h
, size
);
371 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
374 /* Unmap any gap in the middle */
375 GC_unmap_gap((ptr_t
)h
, size
, (ptr_t
)next
, nexthdr
-> hb_sz
);
377 /* If they are both unmapped, we merge, but leave unmapped. */
378 GC_remove_from_fl(hhdr
, i
);
379 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
380 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
381 GC_remove_header(next
);
382 GC_add_to_fl(h
, hhdr
);
383 /* Start over at beginning of list */
384 h
= GC_hblkfreelist
[i
];
385 } else /* not mergable with successor */ {
388 } /* while (h != 0) ... */
392 #endif /* USE_MUNMAP */
395 * Return a pointer to a block starting at h of length bytes.
396 * Memory for the block is mapped.
397 * Remove the block from its free list, and return the remainder (if any)
398 * to its appropriate free list.
399 * May fail by returning 0.
400 * The header for the returned block must be set up by the caller.
401 * If the return value is not 0, then hhdr is the header for it.
403 struct hblk
* GC_get_first_part(h
, hhdr
, bytes
, index
)
409 word total_size
= hhdr
-> hb_sz
;
413 GC_ASSERT((total_size
& (HBLKSIZE
-1)) == 0);
414 GC_remove_from_fl(hhdr
, index
);
415 if (total_size
== bytes
) return h
;
416 rest
= (struct hblk
*)((word
)h
+ bytes
);
417 rest_hdr
= GC_install_header(rest
);
418 if (0 == rest_hdr
) return(0);
419 rest_hdr
-> hb_sz
= total_size
- bytes
;
420 rest_hdr
-> hb_flags
= 0;
421 # ifdef GC_ASSERTIONS
422 // Mark h not free, to avoid assertion about adjacent free blocks.
425 GC_add_to_fl(rest
, rest_hdr
);
430 * H is a free block. N points at an address inside it.
431 * A new header for n has already been set up. Fix up h's header
432 * to reflect the fact that it is being split, move it to the
433 * appropriate free list.
434 * N replaces h in the original free list.
436 * Nhdr is not completely filled in, since it is about to allocated.
437 * It may in fact end up on the wrong free list for its size.
438 * (Hence adding it to a free list is silly. But this path is hopefully
439 * rare enough that it doesn't matter. The code is cleaner this way.)
441 void GC_split_block(h
, hhdr
, n
, nhdr
, index
)
446 int index
; /* Index of free list */
448 word total_size
= hhdr
-> hb_sz
;
449 word h_size
= (word
)n
- (word
)h
;
450 struct hblk
*prev
= hhdr
-> hb_prev
;
451 struct hblk
*next
= hhdr
-> hb_next
;
453 /* Replace h with n on its freelist */
454 nhdr
-> hb_prev
= prev
;
455 nhdr
-> hb_next
= next
;
456 nhdr
-> hb_sz
= total_size
- h_size
;
457 nhdr
-> hb_flags
= 0;
459 HDR(prev
) -> hb_next
= n
;
461 GC_hblkfreelist
[index
] = n
;
464 HDR(next
) -> hb_prev
= n
;
466 # ifdef GC_ASSERTIONS
467 nhdr
-> hb_map
= 0; /* Don't fail test for consecutive */
468 /* free blocks in GC_add_to_fl. */
471 hhdr
-> hb_last_reclaimed
= GC_gc_no
;
473 hhdr
-> hb_sz
= h_size
;
474 GC_add_to_fl(h
, hhdr
);
475 GC_invalidate_map(nhdr
);
478 struct hblk
* GC_allochblk_nth();
481 * Allocate (and return pointer to) a heap block
482 * for objects of size sz words, searching the nth free list.
484 * NOTE: We set obj_map field in header correctly.
485 * Caller is responsible for building an object freelist in block.
487 * We clear the block if it is destined for large objects, and if
488 * kind requires that newly allocated objects be cleared.
491 GC_allochblk(sz
, kind
, flags
)
494 unsigned char flags
; /* IGNORE_OFF_PAGE or 0 */
496 int start_list
= GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz
));
498 for (i
= start_list
; i
<= N_HBLK_FLS
; ++i
) {
499 struct hblk
* result
= GC_allochblk_nth(sz
, kind
, flags
, i
);
500 if (0 != result
) return result
;
505 * The same, but with search restricted to nth free list.
508 GC_allochblk_nth(sz
, kind
, flags
, n
)
511 unsigned char flags
; /* IGNORE_OFF_PAGE or 0 */
514 register struct hblk
*hbp
;
515 register hdr
* hhdr
; /* Header corr. to hbp */
516 register struct hblk
*thishbp
;
517 register hdr
* thishdr
; /* Header corr. to hbp */
518 signed_word size_needed
; /* number of bytes in requested objects */
519 signed_word size_avail
; /* bytes available in this block */
521 size_needed
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(sz
);
523 /* search for a big enough block in free list */
524 hbp
= GC_hblkfreelist
[n
];
525 for(; 0 != hbp
; hbp
= hhdr
-> hb_next
) {
527 size_avail
= hhdr
->hb_sz
;
528 if (size_avail
< size_needed
) continue;
529 if (!GC_use_entire_heap
) {
530 if (size_avail
!= size_needed
531 && USED_HEAP_SIZE
>= GC_requested_heapsize
532 && !GC_incremental
&& GC_should_collect()) {
536 /* If the next heap block is obviously better, go on. */
537 /* This prevents us from disassembling a single large block */
538 /* to get tiny blocks. */
540 signed_word next_size
;
542 thishbp
= hhdr
-> hb_next
;
544 GET_HDR(thishbp
, thishdr
);
545 next_size
= (signed_word
)(thishdr
-> hb_sz
);
546 if (next_size
< size_avail
547 && next_size
>= size_needed
548 && !GC_is_black_listed(thishbp
, (word
)size_needed
)) {
553 if ( !IS_UNCOLLECTABLE(kind
) &&
554 (kind
!= PTRFREE
|| size_needed
> MAX_BLACK_LIST_ALLOC
)) {
555 struct hblk
* lasthbp
= hbp
;
556 ptr_t search_end
= (ptr_t
)hbp
+ size_avail
- size_needed
;
557 signed_word orig_avail
= size_avail
;
558 signed_word eff_size_needed
= ((flags
& IGNORE_OFF_PAGE
)?
563 while ((ptr_t
)lasthbp
<= search_end
564 && (thishbp
= GC_is_black_listed(lasthbp
,
565 (word
)eff_size_needed
))) {
568 size_avail
-= (ptr_t
)lasthbp
- (ptr_t
)hbp
;
570 if (size_avail
>= size_needed
) {
571 if (thishbp
!= hbp
&&
572 0 != (thishdr
= GC_install_header(thishbp
))) {
573 /* Make sure it's mapped before we mangle it. */
575 if (!IS_MAPPED(hhdr
)) {
576 GC_remap((ptr_t
)hbp
, size_avail
);
577 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
580 /* Split the block at thishbp */
581 GC_split_block(hbp
, hhdr
, thishbp
, thishdr
, n
);
582 /* Advance to thishbp */
585 /* We must now allocate thishbp, since it may */
586 /* be on the wrong free list. */
588 } else if (size_needed
> (signed_word
)BL_LIMIT
589 && orig_avail
- size_needed
590 > (signed_word
)BL_LIMIT
) {
591 /* Punt, since anything else risks unreasonable heap growth. */
592 WARN("Needed to allocate blacklisted block at 0x%lx\n",
594 size_avail
= orig_avail
;
595 } else if (size_avail
== 0 && size_needed
== HBLKSIZE
596 && IS_MAPPED(hhdr
)) {
598 static unsigned count
= 0;
600 /* The block is completely blacklisted. We need */
601 /* to drop some such blocks, since otherwise we spend */
602 /* all our time traversing them if pointerfree */
603 /* blocks are unpopular. */
604 /* A dropped block will be reconsidered at next GC. */
605 if ((++count
& 3) == 0) {
606 /* Allocate and drop the block in small chunks, to */
607 /* maximize the chance that we will recover some */
609 word total_size
= hhdr
-> hb_sz
;
610 struct hblk
* limit
= hbp
+ divHBLKSZ(total_size
);
612 struct hblk
* prev
= hhdr
-> hb_prev
;
614 GC_words_wasted
+= total_size
;
615 GC_large_free_bytes
-= total_size
;
616 GC_remove_from_fl(hhdr
, n
);
617 for (h
= hbp
; h
< limit
; h
++) {
618 if (h
== hbp
|| 0 != (hhdr
= GC_install_header(h
))) {
621 BYTES_TO_WORDS(HBLKSIZE
- HDR_BYTES
),
622 PTRFREE
, 0); /* Cant fail */
623 if (GC_debugging_started
) {
624 BZERO(h
+ HDR_BYTES
, HBLKSIZE
- HDR_BYTES
);
628 /* Restore hbp to point at free block */
631 return GC_allochblk_nth(sz
, kind
, flags
, n
);
638 if( size_avail
>= size_needed
) {
640 if (!IS_MAPPED(hhdr
)) {
641 GC_remap((ptr_t
)hbp
, size_avail
);
642 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
645 /* hbp may be on the wrong freelist; the parameter n */
647 hbp
= GC_get_first_part(hbp
, hhdr
, size_needed
, n
);
652 if (0 == hbp
) return 0;
654 /* Notify virtual dirty bit implementation that we are about to write. */
657 /* Add it to map of valid blocks */
658 if (!GC_install_counts(hbp
, (word
)size_needed
)) return(0);
659 /* This leaks memory under very rare conditions. */
662 if (!setup_header(hhdr
, sz
, kind
, flags
)) {
663 GC_remove_counts(hbp
, (word
)size_needed
);
664 return(0); /* ditto */
667 /* Clear block if necessary */
668 if (GC_debugging_started
669 || sz
> MAXOBJSZ
&& GC_obj_kinds
[kind
].ok_init
) {
670 BZERO(hbp
+ HDR_BYTES
, size_needed
- HDR_BYTES
);
673 /* We just successfully allocated a block. Restart count of */
674 /* consecutive failures. */
676 extern unsigned GC_fail_count
;
681 GC_large_free_bytes
-= size_needed
;
683 GC_ASSERT(IS_MAPPED(hhdr
));
687 struct hblk
* GC_freehblk_ptr
= 0; /* Search position hint for GC_freehblk */
692 * Coalesce the block with its neighbors if possible.
694 * All mark words are assumed to be cleared.
700 struct hblk
*next
, *prev
;
701 hdr
*hhdr
, *prevhdr
, *nexthdr
;
707 size
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(size
);
708 GC_remove_counts(hbp
, (word
)size
);
711 /* Check for duplicate deallocation in the easy case */
712 if (HBLK_IS_FREE(hhdr
)) {
713 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
714 (unsigned long) hbp
);
717 GC_ASSERT(IS_MAPPED(hhdr
));
718 GC_invalidate_map(hhdr
);
719 next
= (struct hblk
*)((word
)hbp
+ size
);
720 GET_HDR(next
, nexthdr
);
721 prev
= GC_free_block_ending_at(hbp
);
722 /* Coalesce with successor, if possible */
723 if(0 != nexthdr
&& HBLK_IS_FREE(nexthdr
) && IS_MAPPED(nexthdr
)) {
724 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
725 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
726 GC_remove_header(next
);
728 /* Coalesce with predecessor, if possible. */
731 if (IS_MAPPED(prevhdr
)) {
732 GC_remove_from_fl(prevhdr
, FL_UNKNOWN
);
733 prevhdr
-> hb_sz
+= hhdr
-> hb_sz
;
734 GC_remove_header(hbp
);
740 GC_large_free_bytes
+= size
;
741 GC_add_to_fl(hbp
, hhdr
);