2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
47 struct hblk
* GC_hblkfreelist
[N_HBLK_FLS
+1] = { 0 };
49 /* Map a number of blocks to the appropriate large block free list index. */
50 int GC_hblk_fl_from_blocks(blocks_needed
)
53 if (blocks_needed
<= UNIQUE_THRESHOLD
) return blocks_needed
;
54 if (blocks_needed
>= HUGE_THRESHOLD
) return N_HBLK_FLS
;
55 return (blocks_needed
- UNIQUE_THRESHOLD
)/FL_COMPRESSION
60 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
61 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
62 # define NHDR(hhdr) HDR(hhdr -> hb_next)
65 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
66 # else /* !USE_MMAP */
67 # define IS_MAPPED(hhdr) 1
68 # endif /* USE_MUNMAP */
70 # if !defined(NO_DEBUGGING)
71 void GC_print_hblkfreelist()
79 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
80 h
= GC_hblkfreelist
[i
];
81 if (0 != h
) GC_printf1("Free list %ld:\n", (unsigned long)i
);
85 GC_printf2("\t0x%lx size %lu ", (unsigned long)h
, (unsigned long)sz
);
87 if (GC_is_black_listed(h
, HBLKSIZE
) != 0) {
88 GC_printf0("start black listed\n");
89 } else if (GC_is_black_listed(h
, hhdr
-> hb_sz
) != 0) {
90 GC_printf0("partially black listed\n");
92 GC_printf0("not black listed\n");
97 if (total_free
!= GC_large_free_bytes
) {
98 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
99 (unsigned long) GC_large_free_bytes
);
101 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free
);
104 /* Return the free list index on which the block described by the header */
105 /* appears, or -1 if it appears nowhere. */
106 int free_list_index_of(wanted
)
113 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
114 h
= GC_hblkfreelist
[i
];
117 if (hhdr
== wanted
) return i
;
124 void GC_dump_regions()
131 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
132 start
= GC_heap_sects
[i
].hs_start
;
133 bytes
= GC_heap_sects
[i
].hs_bytes
;
135 /* Merge in contiguous sections. */
136 while (i
+1 < GC_n_heap_sects
&& GC_heap_sects
[i
+1].hs_start
== end
) {
138 end
= GC_heap_sects
[i
].hs_start
+ GC_heap_sects
[i
].hs_bytes
;
140 GC_printf2("***Section from 0x%lx to 0x%lx\n", start
, end
);
141 for (p
= start
; p
< end
;) {
143 GC_printf1("\t0x%lx ", (unsigned long)p
);
144 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
145 GC_printf1("Missing header!!\n", hhdr
);
149 if (HBLK_IS_FREE(hhdr
)) {
150 int correct_index
= GC_hblk_fl_from_blocks(
151 divHBLKSZ(hhdr
-> hb_sz
));
154 GC_printf1("\tfree block of size 0x%lx bytes",
155 (unsigned long)(hhdr
-> hb_sz
));
156 if (IS_MAPPED(hhdr
)) {
159 GC_printf0("(unmapped)\n");
161 actual_index
= free_list_index_of(hhdr
);
162 if (-1 == actual_index
) {
163 GC_printf1("\t\tBlock not on free list %ld!!\n",
165 } else if (correct_index
!= actual_index
) {
166 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
167 actual_index
, correct_index
);
171 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
172 (unsigned long)WORDS_TO_BYTES(hhdr
-> hb_sz
));
173 p
+= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
179 # endif /* NO_DEBUGGING */
181 /* Initialize hdr for a block containing the indicated size and */
182 /* kind of objects. */
183 /* Return FALSE on failure. */
184 static GC_bool
setup_header(hhdr
, sz
, kind
, flags
)
186 word sz
; /* object size in words */
192 /* Add description of valid object pointers */
193 if (!GC_add_map_entry(sz
)) return(FALSE
);
194 hhdr
-> hb_map
= GC_obj_map
[sz
> MAXOBJSZ
? 0 : sz
];
196 /* Set size, kind and mark proc fields */
198 hhdr
-> hb_obj_kind
= kind
;
199 hhdr
-> hb_flags
= flags
;
200 descr
= GC_obj_kinds
[kind
].ok_descriptor
;
201 if (GC_obj_kinds
[kind
].ok_relocate_descr
) descr
+= WORDS_TO_BYTES(sz
);
202 hhdr
-> hb_descr
= descr
;
204 /* Clear mark bits */
205 GC_clear_hdr_marks(hhdr
);
207 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
211 #define FL_UNKNOWN -1
213 * Remove hhdr from the appropriate free list.
214 * We assume it is on the nth free list, or on the size
215 * appropriate free list if n is FL_UNKNOWN.
217 void GC_remove_from_fl(hhdr
, n
)
221 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
222 if (hhdr
-> hb_prev
== 0) {
224 if (FL_UNKNOWN
== n
) {
225 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
229 GC_ASSERT(HDR(GC_hblkfreelist
[index
]) == hhdr
);
230 GC_hblkfreelist
[index
] = hhdr
-> hb_next
;
232 PHDR(hhdr
) -> hb_next
= hhdr
-> hb_next
;
234 if (0 != hhdr
-> hb_next
) {
235 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr
)));
236 NHDR(hhdr
) -> hb_prev
= hhdr
-> hb_prev
;
241 * Return a pointer to the free block ending just before h, if any.
243 struct hblk
* GC_free_block_ending_at(h
)
246 struct hblk
* p
= h
- 1;
249 while (0 != phdr
&& IS_FORWARDING_ADDR_OR_NIL(phdr
)) {
250 p
= FORWARDED_ADDR(p
,phdr
);
253 if (0 != phdr
&& HBLK_IS_FREE(phdr
)) return p
;
254 p
= GC_prev_block(h
- 1);
257 if (HBLK_IS_FREE(phdr
) && (ptr_t
)p
+ phdr
-> hb_sz
== (ptr_t
)h
) {
265 * Add hhdr to the appropriate free list.
266 * We maintain individual free lists sorted by address.
268 void GC_add_to_fl(h
, hhdr
)
272 int index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
273 struct hblk
*second
= GC_hblkfreelist
[index
];
274 # ifdef GC_ASSERTIONS
275 struct hblk
*next
= (struct hblk
*)((word
)h
+ hhdr
-> hb_sz
);
276 hdr
* nexthdr
= HDR(next
);
277 struct hblk
*prev
= GC_free_block_ending_at(h
);
278 hdr
* prevhdr
= HDR(prev
);
279 GC_ASSERT(nexthdr
== 0 || !HBLK_IS_FREE(nexthdr
) || !IS_MAPPED(nexthdr
));
280 GC_ASSERT(prev
== 0 || !HBLK_IS_FREE(prevhdr
) || !IS_MAPPED(prevhdr
));
282 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
283 GC_hblkfreelist
[index
] = h
;
284 hhdr
-> hb_next
= second
;
286 if (0 != second
) HDR(second
) -> hb_prev
= h
;
287 GC_invalidate_map(hhdr
);
292 /* Unmap blocks that haven't been recently touched. This is the only way */
293 /* way blocks are ever unmapped. */
294 void GC_unmap_old(void)
299 unsigned short last_rec
, threshold
;
301 # define UNMAP_THRESHOLD 6
303 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
304 for (h
= GC_hblkfreelist
[i
]; 0 != h
; h
= hhdr
-> hb_next
) {
306 if (!IS_MAPPED(hhdr
)) continue;
307 threshold
= (unsigned short)(GC_gc_no
- UNMAP_THRESHOLD
);
308 last_rec
= hhdr
-> hb_last_reclaimed
;
309 if (last_rec
> GC_gc_no
310 || last_rec
< threshold
&& threshold
< GC_gc_no
311 /* not recently wrapped */) {
313 GC_unmap((ptr_t
)h
, sz
);
314 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
320 /* Merge all unmapped blocks that are adjacent to other free */
321 /* blocks. This may involve remapping, since all blocks are either */
322 /* fully mapped or fully unmapped. */
323 void GC_merge_unmapped(void)
325 struct hblk
* h
, *next
;
326 hdr
* hhdr
, *nexthdr
;
330 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
331 h
= GC_hblkfreelist
[i
];
335 next
= (struct hblk
*)((word
)h
+ size
);
337 /* Coalesce with successor, if possible */
338 if (0 != nexthdr
&& HBLK_IS_FREE(nexthdr
)) {
339 nextsize
= nexthdr
-> hb_sz
;
340 if (IS_MAPPED(hhdr
)) {
341 GC_ASSERT(!IS_MAPPED(nexthdr
));
342 /* make both consistent, so that we can merge */
343 if (size
> nextsize
) {
344 GC_remap((ptr_t
)next
, nextsize
);
346 GC_unmap((ptr_t
)h
, size
);
347 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
349 } else if (IS_MAPPED(nexthdr
)) {
350 GC_ASSERT(!IS_MAPPED(hhdr
));
351 if (size
> nextsize
) {
352 GC_unmap((ptr_t
)next
, nextsize
);
354 GC_remap((ptr_t
)h
, size
);
355 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
358 /* Unmap any gap in the middle */
359 GC_unmap_gap((ptr_t
)h
, size
, (ptr_t
)next
, nexthdr
-> hb_sz
);
361 /* If they are both unmapped, we merge, but leave unmapped. */
362 GC_remove_from_fl(hhdr
, i
);
363 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
364 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
365 GC_remove_header(next
);
366 GC_add_to_fl(h
, hhdr
);
367 /* Start over at beginning of list */
368 h
= GC_hblkfreelist
[i
];
369 } else /* not mergable with successor */ {
372 } /* while (h != 0) ... */
376 #endif /* USE_MUNMAP */
379 * Return a pointer to a block starting at h of length bytes.
380 * Memory for the block is mapped.
381 * Remove the block from its free list, and return the remainder (if any)
382 * to its appropriate free list.
383 * May fail by returning 0.
384 * The header for the returned block must be set up by the caller.
385 * If the return value is not 0, then hhdr is the header for it.
387 struct hblk
* GC_get_first_part(h
, hhdr
, bytes
, index
)
393 word total_size
= hhdr
-> hb_sz
;
397 GC_ASSERT((total_size
& (HBLKSIZE
-1)) == 0);
398 GC_remove_from_fl(hhdr
, index
);
399 if (total_size
== bytes
) return h
;
400 rest
= (struct hblk
*)((word
)h
+ bytes
);
401 if (!GC_install_header(rest
)) return(0);
402 rest_hdr
= HDR(rest
);
403 rest_hdr
-> hb_sz
= total_size
- bytes
;
404 rest_hdr
-> hb_flags
= 0;
405 # ifdef GC_ASSERTIONS
406 // Mark h not free, to avoid assertion about adjacent free blocks.
409 GC_add_to_fl(rest
, rest_hdr
);
414 * H is a free block. N points at an address inside it.
415 * A new header for n has already been set up. Fix up h's header
416 * to reflect the fact that it is being split, move it to the
417 * appropriate free list.
418 * N replaces h in the original free list.
420 * Nhdr is not completely filled in, since it is about to allocated.
421 * It may in fact end up on the wrong free list for its size.
422 * (Hence adding it to a free list is silly. But this path is hopefully
423 * rare enough that it doesn't matter. The code is cleaner this way.)
425 void GC_split_block(h
, hhdr
, n
, nhdr
, index
)
430 int index
; /* Index of free list */
432 word total_size
= hhdr
-> hb_sz
;
433 word h_size
= (word
)n
- (word
)h
;
434 struct hblk
*prev
= hhdr
-> hb_prev
;
435 struct hblk
*next
= hhdr
-> hb_next
;
437 /* Replace h with n on its freelist */
438 nhdr
-> hb_prev
= prev
;
439 nhdr
-> hb_next
= next
;
440 nhdr
-> hb_sz
= total_size
- h_size
;
441 nhdr
-> hb_flags
= 0;
443 HDR(prev
) -> hb_next
= n
;
445 GC_hblkfreelist
[index
] = n
;
448 HDR(next
) -> hb_prev
= n
;
450 # ifdef GC_ASSERTIONS
451 nhdr
-> hb_map
= 0; /* Don't fail test for consecutive */
452 /* free blocks in GC_add_to_fl. */
455 hhdr
-> hb_last_reclaimed
= GC_gc_no
;
457 hhdr
-> hb_sz
= h_size
;
458 GC_add_to_fl(h
, hhdr
);
459 GC_invalidate_map(nhdr
);
462 struct hblk
* GC_allochblk_nth();
465 * Allocate (and return pointer to) a heap block
466 * for objects of size sz words, searching the nth free list.
468 * NOTE: We set obj_map field in header correctly.
469 * Caller is responsible for building an object freelist in block.
471 * We clear the block if it is destined for large objects, and if
472 * kind requires that newly allocated objects be cleared.
475 GC_allochblk(sz
, kind
, flags
)
478 unsigned char flags
; /* IGNORE_OFF_PAGE or 0 */
480 int start_list
= GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz
));
482 for (i
= start_list
; i
<= N_HBLK_FLS
; ++i
) {
483 struct hblk
* result
= GC_allochblk_nth(sz
, kind
, flags
, i
);
484 if (0 != result
) return result
;
489 * The same, but with search restricted to nth free list.
492 GC_allochblk_nth(sz
, kind
, flags
, n
)
495 unsigned char flags
; /* IGNORE_OFF_PAGE or 0 */
498 register struct hblk
*hbp
;
499 register hdr
* hhdr
; /* Header corr. to hbp */
500 register struct hblk
*thishbp
;
501 register hdr
* thishdr
; /* Header corr. to hbp */
502 signed_word size_needed
; /* number of bytes in requested objects */
503 signed_word size_avail
; /* bytes available in this block */
505 size_needed
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(sz
);
507 /* search for a big enough block in free list */
508 hbp
= GC_hblkfreelist
[n
];
510 for(; 0 != hbp
; hbp
= hhdr
-> hb_next
, hhdr
= HDR(hbp
)) {
511 size_avail
= hhdr
->hb_sz
;
512 if (size_avail
< size_needed
) continue;
513 # ifdef PRESERVE_LAST
514 if (size_avail
!= size_needed
515 && !GC_incremental
&& GC_should_collect()) {
519 /* If the next heap block is obviously better, go on. */
520 /* This prevents us from disassembling a single large block */
521 /* to get tiny blocks. */
523 signed_word next_size
;
525 thishbp
= hhdr
-> hb_next
;
527 thishdr
= HDR(thishbp
);
528 next_size
= (signed_word
)(thishdr
-> hb_sz
);
529 if (next_size
< size_avail
530 && next_size
>= size_needed
531 && !GC_is_black_listed(thishbp
, (word
)size_needed
)) {
536 if ( !IS_UNCOLLECTABLE(kind
) &&
537 (kind
!= PTRFREE
|| size_needed
> MAX_BLACK_LIST_ALLOC
)) {
538 struct hblk
* lasthbp
= hbp
;
539 ptr_t search_end
= (ptr_t
)hbp
+ size_avail
- size_needed
;
540 signed_word orig_avail
= size_avail
;
541 signed_word eff_size_needed
= ((flags
& IGNORE_OFF_PAGE
)?
546 while ((ptr_t
)lasthbp
<= search_end
547 && (thishbp
= GC_is_black_listed(lasthbp
,
548 (word
)eff_size_needed
))) {
551 size_avail
-= (ptr_t
)lasthbp
- (ptr_t
)hbp
;
553 if (size_avail
>= size_needed
) {
554 if (thishbp
!= hbp
&& GC_install_header(thishbp
)) {
555 /* Make sure it's mapped before we mangle it. */
557 if (!IS_MAPPED(hhdr
)) {
558 GC_remap((ptr_t
)hbp
, size_avail
);
559 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
562 /* Split the block at thishbp */
563 thishdr
= HDR(thishbp
);
564 GC_split_block(hbp
, hhdr
, thishbp
, thishdr
, n
);
565 /* Advance to thishbp */
568 /* We must now allocate thishbp, since it may */
569 /* be on the wrong free list. */
571 } else if (size_needed
> (signed_word
)BL_LIMIT
572 && orig_avail
- size_needed
573 > (signed_word
)BL_LIMIT
) {
574 /* Punt, since anything else risks unreasonable heap growth. */
575 WARN("Needed to allocate blacklisted block at 0x%lx\n",
577 size_avail
= orig_avail
;
578 } else if (size_avail
== 0 && size_needed
== HBLKSIZE
579 && IS_MAPPED(hhdr
)) {
581 static unsigned count
= 0;
583 /* The block is completely blacklisted. We need */
584 /* to drop some such blocks, since otherwise we spend */
585 /* all our time traversing them if pointerfree */
586 /* blocks are unpopular. */
587 /* A dropped block will be reconsidered at next GC. */
588 if ((++count
& 3) == 0) {
589 /* Allocate and drop the block in small chunks, to */
590 /* maximize the chance that we will recover some */
592 word total_size
= hhdr
-> hb_sz
;
593 struct hblk
* limit
= hbp
+ divHBLKSZ(total_size
);
595 struct hblk
* prev
= hhdr
-> hb_prev
;
597 GC_words_wasted
+= total_size
;
598 GC_large_free_bytes
-= total_size
;
599 GC_remove_from_fl(hhdr
, n
);
600 for (h
= hbp
; h
< limit
; h
++) {
601 if (h
== hbp
|| GC_install_header(h
)) {
605 BYTES_TO_WORDS(HBLKSIZE
- HDR_BYTES
),
606 PTRFREE
, 0); /* Cant fail */
607 if (GC_debugging_started
) {
608 BZERO(h
+ HDR_BYTES
, HBLKSIZE
- HDR_BYTES
);
612 /* Restore hbp to point at free block */
615 return GC_allochblk_nth(sz
, kind
, flags
, n
);
622 if( size_avail
>= size_needed
) {
624 if (!IS_MAPPED(hhdr
)) {
625 GC_remap((ptr_t
)hbp
, size_avail
);
626 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
629 /* hbp may be on the wrong freelist; the parameter n */
631 hbp
= GC_get_first_part(hbp
, hhdr
, size_needed
, n
);
636 if (0 == hbp
) return 0;
638 /* Notify virtual dirty bit implementation that we are about to write. */
641 /* Add it to map of valid blocks */
642 if (!GC_install_counts(hbp
, (word
)size_needed
)) return(0);
643 /* This leaks memory under very rare conditions. */
646 if (!setup_header(hhdr
, sz
, kind
, flags
)) {
647 GC_remove_counts(hbp
, (word
)size_needed
);
648 return(0); /* ditto */
651 /* Clear block if necessary */
652 if (GC_debugging_started
653 || sz
> MAXOBJSZ
&& GC_obj_kinds
[kind
].ok_init
) {
654 BZERO(hbp
+ HDR_BYTES
, size_needed
- HDR_BYTES
);
657 /* We just successfully allocated a block. Restart count of */
658 /* consecutive failures. */
660 extern unsigned GC_fail_count
;
665 GC_large_free_bytes
-= size_needed
;
667 GC_ASSERT(IS_MAPPED(hhdr
));
671 struct hblk
* GC_freehblk_ptr
= 0; /* Search position hint for GC_freehblk */
676 * Coalesce the block with its neighbors if possible.
678 * All mark words are assumed to be cleared.
684 struct hblk
*next
, *prev
;
685 hdr
*hhdr
, *prevhdr
, *nexthdr
;
691 size
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(size
);
692 GC_remove_counts(hbp
, (word
)size
);
695 /* Check for duplicate deallocation in the easy case */
696 if (HBLK_IS_FREE(hhdr
)) {
697 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
698 (unsigned long) hbp
);
701 GC_ASSERT(IS_MAPPED(hhdr
));
702 GC_invalidate_map(hhdr
);
703 next
= (struct hblk
*)((word
)hbp
+ size
);
705 prev
= GC_free_block_ending_at(hbp
);
706 /* Coalesce with successor, if possible */
707 if(0 != nexthdr
&& HBLK_IS_FREE(nexthdr
) && IS_MAPPED(nexthdr
)) {
708 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
709 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
710 GC_remove_header(next
);
712 /* Coalesce with predecessor, if possible. */
715 if (IS_MAPPED(prevhdr
)) {
716 GC_remove_from_fl(prevhdr
, FL_UNKNOWN
);
717 prevhdr
-> hb_sz
+= hhdr
-> hb_sz
;
718 GC_remove_header(hbp
);
724 GC_large_free_bytes
+= size
;
725 GC_add_to_fl(hbp
, hhdr
);