2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 #include "private/gc_priv.h"
21 GC_bool GC_use_entire_heap
= 0;
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
47 struct hblk
* GC_hblkfreelist
[N_HBLK_FLS
+1] = { 0 };
50 word GC_free_bytes
[N_HBLK_FLS
+1] = { 0 };
51 /* Number of free bytes on each list. */
53 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
54 /* > GC_max_large_allocd_bytes? */
55 GC_bool
GC_enough_large_bytes_left(bytes
,n
)
60 for (i
= N_HBLK_FLS
; i
>= n
; --i
) {
61 bytes
+= GC_free_bytes
[i
];
62 if (bytes
> GC_max_large_allocd_bytes
) return TRUE
;
67 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
69 # define FREE_ASSERT(e) GC_ASSERT(e)
71 #else /* USE_MUNMAP */
73 # define INCR_FREE_BYTES(n, b)
74 # define FREE_ASSERT(e)
76 #endif /* USE_MUNMAP */
78 /* Map a number of blocks to the appropriate large block free list index. */
79 int GC_hblk_fl_from_blocks(blocks_needed
)
82 if (blocks_needed
<= UNIQUE_THRESHOLD
) return blocks_needed
;
83 if (blocks_needed
>= HUGE_THRESHOLD
) return N_HBLK_FLS
;
84 return (blocks_needed
- UNIQUE_THRESHOLD
)/FL_COMPRESSION
89 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
90 # define NHDR(hhdr) HDR(hhdr -> hb_next)
93 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
94 # else /* !USE_MMAP */
95 # define IS_MAPPED(hhdr) 1
96 # endif /* USE_MUNMAP */
98 # if !defined(NO_DEBUGGING)
99 void GC_print_hblkfreelist()
107 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
108 h
= GC_hblkfreelist
[i
];
110 if (0 != h
) GC_printf1("Free list %ld (Total size %ld):\n",
113 if (0 != h
) GC_printf2("Free list %ld (Total size %ld):\n",
115 (unsigned long)GC_free_bytes
[i
]);
120 GC_printf2("\t0x%lx size %lu ", (unsigned long)h
, (unsigned long)sz
);
122 if (GC_is_black_listed(h
, HBLKSIZE
) != 0) {
123 GC_printf0("start black listed\n");
124 } else if (GC_is_black_listed(h
, hhdr
-> hb_sz
) != 0) {
125 GC_printf0("partially black listed\n");
127 GC_printf0("not black listed\n");
132 if (total_free
!= GC_large_free_bytes
) {
133 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
134 (unsigned long) GC_large_free_bytes
);
136 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free
);
139 /* Return the free list index on which the block described by the header */
140 /* appears, or -1 if it appears nowhere. */
141 int free_list_index_of(wanted
)
148 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
149 h
= GC_hblkfreelist
[i
];
152 if (hhdr
== wanted
) return i
;
159 void GC_dump_regions()
166 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
167 start
= GC_heap_sects
[i
].hs_start
;
168 bytes
= GC_heap_sects
[i
].hs_bytes
;
170 /* Merge in contiguous sections. */
171 while (i
+1 < GC_n_heap_sects
&& GC_heap_sects
[i
+1].hs_start
== end
) {
173 end
= GC_heap_sects
[i
].hs_start
+ GC_heap_sects
[i
].hs_bytes
;
175 GC_printf2("***Section from 0x%lx to 0x%lx\n", start
, end
);
176 for (p
= start
; p
< end
;) {
178 GC_printf1("\t0x%lx ", (unsigned long)p
);
179 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
180 GC_printf1("Missing header!!\n", hhdr
);
184 if (HBLK_IS_FREE(hhdr
)) {
185 int correct_index
= GC_hblk_fl_from_blocks(
186 divHBLKSZ(hhdr
-> hb_sz
));
189 GC_printf1("\tfree block of size 0x%lx bytes",
190 (unsigned long)(hhdr
-> hb_sz
));
191 if (IS_MAPPED(hhdr
)) {
194 GC_printf0("(unmapped)\n");
196 actual_index
= free_list_index_of(hhdr
);
197 if (-1 == actual_index
) {
198 GC_printf1("\t\tBlock not on free list %ld!!\n",
200 } else if (correct_index
!= actual_index
) {
201 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
202 actual_index
, correct_index
);
206 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
207 (unsigned long)WORDS_TO_BYTES(hhdr
-> hb_sz
));
208 p
+= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
214 # endif /* NO_DEBUGGING */
216 /* Initialize hdr for a block containing the indicated size and */
217 /* kind of objects. */
218 /* Return FALSE on failure. */
219 static GC_bool
setup_header(hhdr
, sz
, kind
, flags
)
221 word sz
; /* object size in words */
227 /* Add description of valid object pointers */
228 if (!GC_add_map_entry(sz
)) return(FALSE
);
229 hhdr
-> hb_map
= GC_obj_map
[sz
> MAXOBJSZ
? 0 : sz
];
231 /* Set size, kind and mark proc fields */
233 hhdr
-> hb_obj_kind
= kind
;
234 hhdr
-> hb_flags
= flags
;
235 descr
= GC_obj_kinds
[kind
].ok_descriptor
;
236 if (GC_obj_kinds
[kind
].ok_relocate_descr
) descr
+= WORDS_TO_BYTES(sz
);
237 hhdr
-> hb_descr
= descr
;
239 /* Clear mark bits */
240 GC_clear_hdr_marks(hhdr
);
242 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
246 #define FL_UNKNOWN -1
248 * Remove hhdr from the appropriate free list.
249 * We assume it is on the nth free list, or on the size
250 * appropriate free list if n is FL_UNKNOWN.
252 void GC_remove_from_fl(hhdr
, n
)
258 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
260 /* We always need index to mainatin free counts. */
261 if (FL_UNKNOWN
== n
) {
262 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
267 if (hhdr
-> hb_prev
== 0) {
269 if (FL_UNKNOWN
== n
) {
270 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
275 GC_ASSERT(HDR(GC_hblkfreelist
[index
]) == hhdr
);
276 GC_hblkfreelist
[index
] = hhdr
-> hb_next
;
279 GET_HDR(hhdr
-> hb_prev
, phdr
);
280 phdr
-> hb_next
= hhdr
-> hb_next
;
282 INCR_FREE_BYTES(index
, - (signed_word
)(hhdr
-> hb_sz
));
283 FREE_ASSERT(GC_free_bytes
[index
] >= 0);
284 if (0 != hhdr
-> hb_next
) {
286 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr
)));
287 GET_HDR(hhdr
-> hb_next
, nhdr
);
288 nhdr
-> hb_prev
= hhdr
-> hb_prev
;
293 * Return a pointer to the free block ending just before h, if any.
295 struct hblk
* GC_free_block_ending_at(h
)
298 struct hblk
* p
= h
- 1;
302 while (0 != phdr
&& IS_FORWARDING_ADDR_OR_NIL(phdr
)) {
303 p
= FORWARDED_ADDR(p
,phdr
);
307 if(HBLK_IS_FREE(phdr
)) {
313 p
= GC_prev_block(h
- 1);
316 if (HBLK_IS_FREE(phdr
) && (ptr_t
)p
+ phdr
-> hb_sz
== (ptr_t
)h
) {
324 * Add hhdr to the appropriate free list.
325 * We maintain individual free lists sorted by address.
327 void GC_add_to_fl(h
, hhdr
)
331 int index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
332 struct hblk
*second
= GC_hblkfreelist
[index
];
334 # ifdef GC_ASSERTIONS
335 struct hblk
*next
= (struct hblk
*)((word
)h
+ hhdr
-> hb_sz
);
336 hdr
* nexthdr
= HDR(next
);
337 struct hblk
*prev
= GC_free_block_ending_at(h
);
338 hdr
* prevhdr
= HDR(prev
);
339 GC_ASSERT(nexthdr
== 0 || !HBLK_IS_FREE(nexthdr
) || !IS_MAPPED(nexthdr
));
340 GC_ASSERT(prev
== 0 || !HBLK_IS_FREE(prevhdr
) || !IS_MAPPED(prevhdr
));
342 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
343 GC_hblkfreelist
[index
] = h
;
344 INCR_FREE_BYTES(index
, hhdr
-> hb_sz
);
345 FREE_ASSERT(GC_free_bytes
[index
] <= GC_large_free_bytes
)
346 hhdr
-> hb_next
= second
;
349 GET_HDR(second
, second_hdr
);
350 second_hdr
-> hb_prev
= h
;
352 GC_invalidate_map(hhdr
);
357 /* Unmap blocks that haven't been recently touched. This is the only way */
358 /* way blocks are ever unmapped. */
359 void GC_unmap_old(void)
364 unsigned short last_rec
, threshold
;
366 # define UNMAP_THRESHOLD 6
368 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
369 for (h
= GC_hblkfreelist
[i
]; 0 != h
; h
= hhdr
-> hb_next
) {
371 if (!IS_MAPPED(hhdr
)) continue;
372 threshold
= (unsigned short)(GC_gc_no
- UNMAP_THRESHOLD
);
373 last_rec
= hhdr
-> hb_last_reclaimed
;
374 if (last_rec
> GC_gc_no
375 || last_rec
< threshold
&& threshold
< GC_gc_no
376 /* not recently wrapped */) {
378 GC_unmap((ptr_t
)h
, sz
);
379 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
385 /* Merge all unmapped blocks that are adjacent to other free */
386 /* blocks. This may involve remapping, since all blocks are either */
387 /* fully mapped or fully unmapped. */
388 void GC_merge_unmapped(void)
390 struct hblk
* h
, *next
;
391 hdr
* hhdr
, *nexthdr
;
395 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
396 h
= GC_hblkfreelist
[i
];
400 next
= (struct hblk
*)((word
)h
+ size
);
401 GET_HDR(next
, nexthdr
);
402 /* Coalesce with successor, if possible */
403 if (0 != nexthdr
&& HBLK_IS_FREE(nexthdr
)) {
404 nextsize
= nexthdr
-> hb_sz
;
405 if (IS_MAPPED(hhdr
)) {
406 GC_ASSERT(!IS_MAPPED(nexthdr
));
407 /* make both consistent, so that we can merge */
408 if (size
> nextsize
) {
409 GC_remap((ptr_t
)next
, nextsize
);
411 GC_unmap((ptr_t
)h
, size
);
412 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
414 } else if (IS_MAPPED(nexthdr
)) {
415 GC_ASSERT(!IS_MAPPED(hhdr
));
416 if (size
> nextsize
) {
417 GC_unmap((ptr_t
)next
, nextsize
);
419 GC_remap((ptr_t
)h
, size
);
420 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
423 /* Unmap any gap in the middle */
424 GC_unmap_gap((ptr_t
)h
, size
, (ptr_t
)next
, nexthdr
-> hb_sz
);
426 /* If they are both unmapped, we merge, but leave unmapped. */
427 GC_remove_from_fl(hhdr
, i
);
428 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
429 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
430 GC_remove_header(next
);
431 GC_add_to_fl(h
, hhdr
);
432 /* Start over at beginning of list */
433 h
= GC_hblkfreelist
[i
];
434 } else /* not mergable with successor */ {
437 } /* while (h != 0) ... */
441 #endif /* USE_MUNMAP */
444 * Return a pointer to a block starting at h of length bytes.
445 * Memory for the block is mapped.
446 * Remove the block from its free list, and return the remainder (if any)
447 * to its appropriate free list.
448 * May fail by returning 0.
449 * The header for the returned block must be set up by the caller.
450 * If the return value is not 0, then hhdr is the header for it.
452 struct hblk
* GC_get_first_part(h
, hhdr
, bytes
, index
)
458 word total_size
= hhdr
-> hb_sz
;
462 GC_ASSERT((total_size
& (HBLKSIZE
-1)) == 0);
463 GC_remove_from_fl(hhdr
, index
);
464 if (total_size
== bytes
) return h
;
465 rest
= (struct hblk
*)((word
)h
+ bytes
);
466 rest_hdr
= GC_install_header(rest
);
467 if (0 == rest_hdr
) return(0);
468 rest_hdr
-> hb_sz
= total_size
- bytes
;
469 rest_hdr
-> hb_flags
= 0;
470 # ifdef GC_ASSERTIONS
471 /* Mark h not free, to avoid assertion about adjacent free blocks. */
474 GC_add_to_fl(rest
, rest_hdr
);
479 * H is a free block. N points at an address inside it.
480 * A new header for n has already been set up. Fix up h's header
481 * to reflect the fact that it is being split, move it to the
482 * appropriate free list.
483 * N replaces h in the original free list.
485 * Nhdr is not completely filled in, since it is about to allocated.
486 * It may in fact end up on the wrong free list for its size.
487 * (Hence adding it to a free list is silly. But this path is hopefully
488 * rare enough that it doesn't matter. The code is cleaner this way.)
490 void GC_split_block(h
, hhdr
, n
, nhdr
, index
)
495 int index
; /* Index of free list */
497 word total_size
= hhdr
-> hb_sz
;
498 word h_size
= (word
)n
- (word
)h
;
499 struct hblk
*prev
= hhdr
-> hb_prev
;
500 struct hblk
*next
= hhdr
-> hb_next
;
502 /* Replace h with n on its freelist */
503 nhdr
-> hb_prev
= prev
;
504 nhdr
-> hb_next
= next
;
505 nhdr
-> hb_sz
= total_size
- h_size
;
506 nhdr
-> hb_flags
= 0;
508 HDR(prev
) -> hb_next
= n
;
510 GC_hblkfreelist
[index
] = n
;
513 HDR(next
) -> hb_prev
= n
;
515 INCR_FREE_BYTES(index
, -(signed_word
)h_size
);
516 FREE_ASSERT(GC_free_bytes
[index
] > 0);
517 # ifdef GC_ASSERTIONS
518 nhdr
-> hb_map
= 0; /* Don't fail test for consecutive */
519 /* free blocks in GC_add_to_fl. */
522 hhdr
-> hb_last_reclaimed
= GC_gc_no
;
524 hhdr
-> hb_sz
= h_size
;
525 GC_add_to_fl(h
, hhdr
);
526 GC_invalidate_map(nhdr
);
529 struct hblk
* GC_allochblk_nth();
532 * Allocate (and return pointer to) a heap block
533 * for objects of size sz words, searching the nth free list.
535 * NOTE: We set obj_map field in header correctly.
536 * Caller is responsible for building an object freelist in block.
538 * Unlike older versions of the collectors, the client is responsible
539 * for clearing the block, if necessary.
542 GC_allochblk(sz
, kind
, flags
)
545 unsigned flags
; /* IGNORE_OFF_PAGE or 0 */
547 word blocks
= OBJ_SZ_TO_BLOCKS(sz
);
548 int start_list
= GC_hblk_fl_from_blocks(blocks
);
550 for (i
= start_list
; i
<= N_HBLK_FLS
; ++i
) {
551 struct hblk
* result
= GC_allochblk_nth(sz
, kind
, flags
, i
);
559 * The same, but with search restricted to nth free list.
562 GC_allochblk_nth(sz
, kind
, flags
, n
)
565 unsigned char flags
; /* IGNORE_OFF_PAGE or 0 */
568 register struct hblk
*hbp
;
569 register hdr
* hhdr
; /* Header corr. to hbp */
570 register struct hblk
*thishbp
;
571 register hdr
* thishdr
; /* Header corr. to hbp */
572 signed_word size_needed
; /* number of bytes in requested objects */
573 signed_word size_avail
; /* bytes available in this block */
575 size_needed
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(sz
);
577 /* search for a big enough block in free list */
578 hbp
= GC_hblkfreelist
[n
];
579 for(; 0 != hbp
; hbp
= hhdr
-> hb_next
) {
581 size_avail
= hhdr
->hb_sz
;
582 if (size_avail
< size_needed
) continue;
583 if (!GC_use_entire_heap
584 && size_avail
!= size_needed
585 && USED_HEAP_SIZE
>= GC_requested_heapsize
586 && !GC_incremental
&& GC_should_collect()) {
590 /* If we enough large blocks left to cover any */
591 /* previous request for large blocks, we go ahead */
592 /* and split. Assuming a steady state, that should */
593 /* be safe. It means that we can use the full */
594 /* heap if we allocate only small objects. */
595 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes
, n
)) {
598 # endif /* !USE_MUNMAP */
600 /* If the next heap block is obviously better, go on. */
601 /* This prevents us from disassembling a single large block */
602 /* to get tiny blocks. */
604 signed_word next_size
;
606 thishbp
= hhdr
-> hb_next
;
608 GET_HDR(thishbp
, thishdr
);
609 next_size
= (signed_word
)(thishdr
-> hb_sz
);
610 if (next_size
< size_avail
611 && next_size
>= size_needed
612 && !GC_is_black_listed(thishbp
, (word
)size_needed
)) {
617 if ( !IS_UNCOLLECTABLE(kind
) &&
618 (kind
!= PTRFREE
|| size_needed
> MAX_BLACK_LIST_ALLOC
)) {
619 struct hblk
* lasthbp
= hbp
;
620 ptr_t search_end
= (ptr_t
)hbp
+ size_avail
- size_needed
;
621 signed_word orig_avail
= size_avail
;
622 signed_word eff_size_needed
= ((flags
& IGNORE_OFF_PAGE
)?
627 while ((ptr_t
)lasthbp
<= search_end
628 && (thishbp
= GC_is_black_listed(lasthbp
,
629 (word
)eff_size_needed
))
633 size_avail
-= (ptr_t
)lasthbp
- (ptr_t
)hbp
;
635 if (size_avail
>= size_needed
) {
636 if (thishbp
!= hbp
&&
637 0 != (thishdr
= GC_install_header(thishbp
))) {
638 /* Make sure it's mapped before we mangle it. */
640 if (!IS_MAPPED(hhdr
)) {
641 GC_remap((ptr_t
)hbp
, hhdr
-> hb_sz
);
642 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
645 /* Split the block at thishbp */
646 GC_split_block(hbp
, hhdr
, thishbp
, thishdr
, n
);
647 /* Advance to thishbp */
650 /* We must now allocate thishbp, since it may */
651 /* be on the wrong free list. */
653 } else if (size_needed
> (signed_word
)BL_LIMIT
654 && orig_avail
- size_needed
655 > (signed_word
)BL_LIMIT
) {
656 /* Punt, since anything else risks unreasonable heap growth. */
657 if (0 == GETENV("GC_NO_BLACKLIST_WARNING")) {
658 WARN("Needed to allocate blacklisted block at 0x%lx\n",
661 size_avail
= orig_avail
;
662 } else if (size_avail
== 0 && size_needed
== HBLKSIZE
663 && IS_MAPPED(hhdr
)) {
665 static unsigned count
= 0;
667 /* The block is completely blacklisted. We need */
668 /* to drop some such blocks, since otherwise we spend */
669 /* all our time traversing them if pointerfree */
670 /* blocks are unpopular. */
671 /* A dropped block will be reconsidered at next GC. */
672 if ((++count
& 3) == 0) {
673 /* Allocate and drop the block in small chunks, to */
674 /* maximize the chance that we will recover some */
676 word total_size
= hhdr
-> hb_sz
;
677 struct hblk
* limit
= hbp
+ divHBLKSZ(total_size
);
679 struct hblk
* prev
= hhdr
-> hb_prev
;
681 GC_words_wasted
+= total_size
;
682 GC_large_free_bytes
-= total_size
;
683 GC_remove_from_fl(hhdr
, n
);
684 for (h
= hbp
; h
< limit
; h
++) {
685 if (h
== hbp
|| 0 != (hhdr
= GC_install_header(h
))) {
688 BYTES_TO_WORDS(HBLKSIZE
),
689 PTRFREE
, 0); /* Cant fail */
690 if (GC_debugging_started
) {
695 /* Restore hbp to point at free block */
698 return GC_allochblk_nth(sz
, kind
, flags
, n
);
705 if( size_avail
>= size_needed
) {
707 if (!IS_MAPPED(hhdr
)) {
708 GC_remap((ptr_t
)hbp
, hhdr
-> hb_sz
);
709 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
712 /* hbp may be on the wrong freelist; the parameter n */
714 hbp
= GC_get_first_part(hbp
, hhdr
, size_needed
, n
);
719 if (0 == hbp
) return 0;
721 /* Add it to map of valid blocks */
722 if (!GC_install_counts(hbp
, (word
)size_needed
)) return(0);
723 /* This leaks memory under very rare conditions. */
726 if (!setup_header(hhdr
, sz
, kind
, flags
)) {
727 GC_remove_counts(hbp
, (word
)size_needed
);
728 return(0); /* ditto */
731 /* Notify virtual dirty bit implementation that we are about to write. */
732 /* Ensure that pointerfree objects are not protected if it's avoidable. */
733 GC_remove_protection(hbp
, divHBLKSZ(size_needed
),
734 (hhdr
-> hb_descr
== 0) /* pointer-free */);
736 /* We just successfully allocated a block. Restart count of */
737 /* consecutive failures. */
739 extern unsigned GC_fail_count
;
744 GC_large_free_bytes
-= size_needed
;
746 GC_ASSERT(IS_MAPPED(hhdr
));
750 struct hblk
* GC_freehblk_ptr
= 0; /* Search position hint for GC_freehblk */
755 * Coalesce the block with its neighbors if possible.
757 * All mark words are assumed to be cleared.
763 struct hblk
*next
, *prev
;
764 hdr
*hhdr
, *prevhdr
, *nexthdr
;
770 size
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(size
);
771 GC_remove_counts(hbp
, (word
)size
);
774 /* Check for duplicate deallocation in the easy case */
775 if (HBLK_IS_FREE(hhdr
)) {
776 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
777 (unsigned long) hbp
);
778 ABORT("Duplicate large block deallocation");
781 GC_ASSERT(IS_MAPPED(hhdr
));
782 GC_invalidate_map(hhdr
);
783 next
= (struct hblk
*)((word
)hbp
+ size
);
784 GET_HDR(next
, nexthdr
);
785 prev
= GC_free_block_ending_at(hbp
);
786 /* Coalesce with successor, if possible */
787 if(0 != nexthdr
&& HBLK_IS_FREE(nexthdr
) && IS_MAPPED(nexthdr
)) {
788 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
789 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
790 GC_remove_header(next
);
792 /* Coalesce with predecessor, if possible. */
795 if (IS_MAPPED(prevhdr
)) {
796 GC_remove_from_fl(prevhdr
, FL_UNKNOWN
);
797 prevhdr
-> hb_sz
+= hhdr
-> hb_sz
;
798 GC_remove_header(hbp
);
804 GC_large_free_bytes
+= size
;
805 GC_add_to_fl(hbp
, hhdr
);