1999-12-16 Mark Mitchell <mark@codesourcery.com>
[official-gcc.git] / boehm-gc / allchblk.c
blob189b94214a7376ff4f89110723199d92b7b2a0a4
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #define DEBUG
18 #undef DEBUG
19 #include <stdio.h>
20 #include "gc_priv.h"
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
42 /* bin. */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45 + UNIQUE_THRESHOLD
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
49 /* Map a number of blocks to the appropriate large block free list index. */
50 int GC_hblk_fl_from_blocks(blocks_needed)
51 word blocks_needed;
53 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
54 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
55 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
56 + UNIQUE_THRESHOLD;
60 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
61 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
62 # define NHDR(hhdr) HDR(hhdr -> hb_next)
64 # ifdef USE_MUNMAP
65 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
66 # else /* !USE_MMAP */
67 # define IS_MAPPED(hhdr) 1
68 # endif /* USE_MUNMAP */
70 # if !defined(NO_DEBUGGING)
71 void GC_print_hblkfreelist()
73 struct hblk * h;
74 word total_free = 0;
75 hdr * hhdr;
76 word sz;
77 int i;
79 for (i = 0; i <= N_HBLK_FLS; ++i) {
80 h = GC_hblkfreelist[i];
81 if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
82 while (h != 0) {
83 hhdr = HDR(h);
84 sz = hhdr -> hb_sz;
85 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
86 total_free += sz;
87 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
88 GC_printf0("start black listed\n");
89 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
90 GC_printf0("partially black listed\n");
91 } else {
92 GC_printf0("not black listed\n");
94 h = hhdr -> hb_next;
97 if (total_free != GC_large_free_bytes) {
98 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
99 (unsigned long) GC_large_free_bytes);
101 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
104 /* Return the free list index on which the block described by the header */
105 /* appears, or -1 if it appears nowhere. */
106 int free_list_index_of(wanted)
107 hdr * wanted;
109 struct hblk * h;
110 hdr * hhdr;
111 int i;
113 for (i = 0; i <= N_HBLK_FLS; ++i) {
114 h = GC_hblkfreelist[i];
115 while (h != 0) {
116 hhdr = HDR(h);
117 if (hhdr == wanted) return i;
118 h = hhdr -> hb_next;
121 return -1;
124 void GC_dump_regions()
126 unsigned i;
127 ptr_t start, end;
128 ptr_t p;
129 size_t bytes;
130 hdr *hhdr;
131 for (i = 0; i < GC_n_heap_sects; ++i) {
132 start = GC_heap_sects[i].hs_start;
133 bytes = GC_heap_sects[i].hs_bytes;
134 end = start + bytes;
135 /* Merge in contiguous sections. */
136 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
137 ++i;
138 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
140 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
141 for (p = start; p < end;) {
142 hhdr = HDR(p);
143 GC_printf1("\t0x%lx ", (unsigned long)p);
144 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
145 GC_printf1("Missing header!!\n", hhdr);
146 p += HBLKSIZE;
147 continue;
149 if (HBLK_IS_FREE(hhdr)) {
150 int correct_index = GC_hblk_fl_from_blocks(
151 divHBLKSZ(hhdr -> hb_sz));
152 int actual_index;
154 GC_printf1("\tfree block of size 0x%lx bytes",
155 (unsigned long)(hhdr -> hb_sz));
156 if (IS_MAPPED(hhdr)) {
157 GC_printf0("\n");
158 } else {
159 GC_printf0("(unmapped)\n");
161 actual_index = free_list_index_of(hhdr);
162 if (-1 == actual_index) {
163 GC_printf1("\t\tBlock not on free list %ld!!\n",
164 correct_index);
165 } else if (correct_index != actual_index) {
166 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
167 actual_index, correct_index);
169 p += hhdr -> hb_sz;
170 } else {
171 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
172 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
173 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
179 # endif /* NO_DEBUGGING */
181 /* Initialize hdr for a block containing the indicated size and */
182 /* kind of objects. */
183 /* Return FALSE on failure. */
184 static GC_bool setup_header(hhdr, sz, kind, flags)
185 register hdr * hhdr;
186 word sz; /* object size in words */
187 int kind;
188 unsigned char flags;
190 register word descr;
192 /* Add description of valid object pointers */
193 if (!GC_add_map_entry(sz)) return(FALSE);
194 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
196 /* Set size, kind and mark proc fields */
197 hhdr -> hb_sz = sz;
198 hhdr -> hb_obj_kind = kind;
199 hhdr -> hb_flags = flags;
200 descr = GC_obj_kinds[kind].ok_descriptor;
201 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
202 hhdr -> hb_descr = descr;
204 /* Clear mark bits */
205 GC_clear_hdr_marks(hhdr);
207 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
208 return(TRUE);
211 #define FL_UNKNOWN -1
213 * Remove hhdr from the appropriate free list.
214 * We assume it is on the nth free list, or on the size
215 * appropriate free list if n is FL_UNKNOWN.
217 void GC_remove_from_fl(hhdr, n)
218 hdr * hhdr;
219 int n;
221 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
222 if (hhdr -> hb_prev == 0) {
223 int index;
224 if (FL_UNKNOWN == n) {
225 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
226 } else {
227 index = n;
229 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
230 GC_hblkfreelist[index] = hhdr -> hb_next;
231 } else {
232 PHDR(hhdr) -> hb_next = hhdr -> hb_next;
234 if (0 != hhdr -> hb_next) {
235 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
236 NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
241 * Return a pointer to the free block ending just before h, if any.
243 struct hblk * GC_free_block_ending_at(h)
244 struct hblk *h;
246 struct hblk * p = h - 1;
247 hdr * phdr = HDR(p);
249 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
250 p = FORWARDED_ADDR(p,phdr);
251 phdr = HDR(p);
253 if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
254 p = GC_prev_block(h - 1);
255 if (0 != p) {
256 phdr = HDR(p);
257 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
258 return p;
261 return 0;
265 * Add hhdr to the appropriate free list.
266 * We maintain individual free lists sorted by address.
268 void GC_add_to_fl(h, hhdr)
269 struct hblk *h;
270 hdr * hhdr;
272 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
273 struct hblk *second = GC_hblkfreelist[index];
274 # ifdef GC_ASSERTIONS
275 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
276 hdr * nexthdr = HDR(next);
277 struct hblk *prev = GC_free_block_ending_at(h);
278 hdr * prevhdr = HDR(prev);
279 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
280 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
281 # endif
282 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
283 GC_hblkfreelist[index] = h;
284 hhdr -> hb_next = second;
285 hhdr -> hb_prev = 0;
286 if (0 != second) HDR(second) -> hb_prev = h;
287 GC_invalidate_map(hhdr);
290 #ifdef USE_MUNMAP
292 /* Unmap blocks that haven't been recently touched. This is the only way */
293 /* way blocks are ever unmapped. */
294 void GC_unmap_old(void)
296 struct hblk * h;
297 hdr * hhdr;
298 word sz;
299 unsigned short last_rec, threshold;
300 int i;
301 # define UNMAP_THRESHOLD 6
303 for (i = 0; i <= N_HBLK_FLS; ++i) {
304 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
305 hhdr = HDR(h);
306 if (!IS_MAPPED(hhdr)) continue;
307 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
308 last_rec = hhdr -> hb_last_reclaimed;
309 if (last_rec > GC_gc_no
310 || last_rec < threshold && threshold < GC_gc_no
311 /* not recently wrapped */) {
312 sz = hhdr -> hb_sz;
313 GC_unmap((ptr_t)h, sz);
314 hhdr -> hb_flags |= WAS_UNMAPPED;
320 /* Merge all unmapped blocks that are adjacent to other free */
321 /* blocks. This may involve remapping, since all blocks are either */
322 /* fully mapped or fully unmapped. */
323 void GC_merge_unmapped(void)
325 struct hblk * h, *next;
326 hdr * hhdr, *nexthdr;
327 word size, nextsize;
328 int i;
330 for (i = 0; i <= N_HBLK_FLS; ++i) {
331 h = GC_hblkfreelist[i];
332 while (h != 0) {
333 hhdr = HDR(h);
334 size = hhdr->hb_sz;
335 next = (struct hblk *)((word)h + size);
336 nexthdr = HDR(next);
337 /* Coalesce with successor, if possible */
338 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
339 nextsize = nexthdr -> hb_sz;
340 if (IS_MAPPED(hhdr)) {
341 GC_ASSERT(!IS_MAPPED(nexthdr));
342 /* make both consistent, so that we can merge */
343 if (size > nextsize) {
344 GC_remap((ptr_t)next, nextsize);
345 } else {
346 GC_unmap((ptr_t)h, size);
347 hhdr -> hb_flags |= WAS_UNMAPPED;
349 } else if (IS_MAPPED(nexthdr)) {
350 GC_ASSERT(!IS_MAPPED(hhdr));
351 if (size > nextsize) {
352 GC_unmap((ptr_t)next, nextsize);
353 } else {
354 GC_remap((ptr_t)h, size);
355 hhdr -> hb_flags &= ~WAS_UNMAPPED;
357 } else {
358 /* Unmap any gap in the middle */
359 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
361 /* If they are both unmapped, we merge, but leave unmapped. */
362 GC_remove_from_fl(hhdr, i);
363 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
364 hhdr -> hb_sz += nexthdr -> hb_sz;
365 GC_remove_header(next);
366 GC_add_to_fl(h, hhdr);
367 /* Start over at beginning of list */
368 h = GC_hblkfreelist[i];
369 } else /* not mergable with successor */ {
370 h = hhdr -> hb_next;
372 } /* while (h != 0) ... */
373 } /* for ... */
376 #endif /* USE_MUNMAP */
379 * Return a pointer to a block starting at h of length bytes.
380 * Memory for the block is mapped.
381 * Remove the block from its free list, and return the remainder (if any)
382 * to its appropriate free list.
383 * May fail by returning 0.
384 * The header for the returned block must be set up by the caller.
385 * If the return value is not 0, then hhdr is the header for it.
387 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
388 struct hblk *h;
389 hdr * hhdr;
390 word bytes;
391 int index;
393 word total_size = hhdr -> hb_sz;
394 struct hblk * rest;
395 hdr * rest_hdr;
397 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
398 GC_remove_from_fl(hhdr, index);
399 if (total_size == bytes) return h;
400 rest = (struct hblk *)((word)h + bytes);
401 if (!GC_install_header(rest)) return(0);
402 rest_hdr = HDR(rest);
403 rest_hdr -> hb_sz = total_size - bytes;
404 rest_hdr -> hb_flags = 0;
405 # ifdef GC_ASSERTIONS
406 // Mark h not free, to avoid assertion about adjacent free blocks.
407 hhdr -> hb_map = 0;
408 # endif
409 GC_add_to_fl(rest, rest_hdr);
410 return h;
414 * H is a free block. N points at an address inside it.
415 * A new header for n has already been set up. Fix up h's header
416 * to reflect the fact that it is being split, move it to the
417 * appropriate free list.
418 * N replaces h in the original free list.
420 * Nhdr is not completely filled in, since it is about to allocated.
421 * It may in fact end up on the wrong free list for its size.
422 * (Hence adding it to a free list is silly. But this path is hopefully
423 * rare enough that it doesn't matter. The code is cleaner this way.)
425 void GC_split_block(h, hhdr, n, nhdr, index)
426 struct hblk *h;
427 hdr * hhdr;
428 struct hblk *n;
429 hdr * nhdr;
430 int index; /* Index of free list */
432 word total_size = hhdr -> hb_sz;
433 word h_size = (word)n - (word)h;
434 struct hblk *prev = hhdr -> hb_prev;
435 struct hblk *next = hhdr -> hb_next;
437 /* Replace h with n on its freelist */
438 nhdr -> hb_prev = prev;
439 nhdr -> hb_next = next;
440 nhdr -> hb_sz = total_size - h_size;
441 nhdr -> hb_flags = 0;
442 if (0 != prev) {
443 HDR(prev) -> hb_next = n;
444 } else {
445 GC_hblkfreelist[index] = n;
447 if (0 != next) {
448 HDR(next) -> hb_prev = n;
450 # ifdef GC_ASSERTIONS
451 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
452 /* free blocks in GC_add_to_fl. */
453 # endif
454 # ifdef USE_MUNMAP
455 hhdr -> hb_last_reclaimed = GC_gc_no;
456 # endif
457 hhdr -> hb_sz = h_size;
458 GC_add_to_fl(h, hhdr);
459 GC_invalidate_map(nhdr);
462 struct hblk * GC_allochblk_nth();
465 * Allocate (and return pointer to) a heap block
466 * for objects of size sz words, searching the nth free list.
468 * NOTE: We set obj_map field in header correctly.
469 * Caller is responsible for building an object freelist in block.
471 * We clear the block if it is destined for large objects, and if
472 * kind requires that newly allocated objects be cleared.
474 struct hblk *
475 GC_allochblk(sz, kind, flags)
476 word sz;
477 int kind;
478 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
480 int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
481 int i;
482 for (i = start_list; i <= N_HBLK_FLS; ++i) {
483 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
484 if (0 != result) return result;
486 return 0;
489 * The same, but with search restricted to nth free list.
491 struct hblk *
492 GC_allochblk_nth(sz, kind, flags, n)
493 word sz;
494 int kind;
495 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
496 int n;
498 register struct hblk *hbp;
499 register hdr * hhdr; /* Header corr. to hbp */
500 register struct hblk *thishbp;
501 register hdr * thishdr; /* Header corr. to hbp */
502 signed_word size_needed; /* number of bytes in requested objects */
503 signed_word size_avail; /* bytes available in this block */
505 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
507 /* search for a big enough block in free list */
508 hbp = GC_hblkfreelist[n];
509 hhdr = HDR(hbp);
510 for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
511 size_avail = hhdr->hb_sz;
512 if (size_avail < size_needed) continue;
513 # ifdef PRESERVE_LAST
514 if (size_avail != size_needed
515 && !GC_incremental && GC_should_collect()) {
516 continue;
518 # endif
519 /* If the next heap block is obviously better, go on. */
520 /* This prevents us from disassembling a single large block */
521 /* to get tiny blocks. */
523 signed_word next_size;
525 thishbp = hhdr -> hb_next;
526 if (thishbp != 0) {
527 thishdr = HDR(thishbp);
528 next_size = (signed_word)(thishdr -> hb_sz);
529 if (next_size < size_avail
530 && next_size >= size_needed
531 && !GC_is_black_listed(thishbp, (word)size_needed)) {
532 continue;
536 if ( !IS_UNCOLLECTABLE(kind) &&
537 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
538 struct hblk * lasthbp = hbp;
539 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
540 signed_word orig_avail = size_avail;
541 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
542 HBLKSIZE
543 : size_needed);
546 while ((ptr_t)lasthbp <= search_end
547 && (thishbp = GC_is_black_listed(lasthbp,
548 (word)eff_size_needed))) {
549 lasthbp = thishbp;
551 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
552 thishbp = lasthbp;
553 if (size_avail >= size_needed) {
554 if (thishbp != hbp && GC_install_header(thishbp)) {
555 /* Make sure it's mapped before we mangle it. */
556 # ifdef USE_MUNMAP
557 if (!IS_MAPPED(hhdr)) {
558 GC_remap((ptr_t)hbp, size_avail);
559 hhdr -> hb_flags &= ~WAS_UNMAPPED;
561 # endif
562 /* Split the block at thishbp */
563 thishdr = HDR(thishbp);
564 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
565 /* Advance to thishbp */
566 hbp = thishbp;
567 hhdr = thishdr;
568 /* We must now allocate thishbp, since it may */
569 /* be on the wrong free list. */
571 } else if (size_needed > (signed_word)BL_LIMIT
572 && orig_avail - size_needed
573 > (signed_word)BL_LIMIT) {
574 /* Punt, since anything else risks unreasonable heap growth. */
575 WARN("Needed to allocate blacklisted block at 0x%lx\n",
576 (word)hbp);
577 size_avail = orig_avail;
578 } else if (size_avail == 0 && size_needed == HBLKSIZE
579 && IS_MAPPED(hhdr)) {
580 if (!GC_find_leak) {
581 static unsigned count = 0;
583 /* The block is completely blacklisted. We need */
584 /* to drop some such blocks, since otherwise we spend */
585 /* all our time traversing them if pointerfree */
586 /* blocks are unpopular. */
587 /* A dropped block will be reconsidered at next GC. */
588 if ((++count & 3) == 0) {
589 /* Allocate and drop the block in small chunks, to */
590 /* maximize the chance that we will recover some */
591 /* later. */
592 word total_size = hhdr -> hb_sz;
593 struct hblk * limit = hbp + divHBLKSZ(total_size);
594 struct hblk * h;
595 struct hblk * prev = hhdr -> hb_prev;
597 GC_words_wasted += total_size;
598 GC_large_free_bytes -= total_size;
599 GC_remove_from_fl(hhdr, n);
600 for (h = hbp; h < limit; h++) {
601 if (h == hbp || GC_install_header(h)) {
602 hhdr = HDR(h);
603 (void) setup_header(
604 hhdr,
605 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
606 PTRFREE, 0); /* Cant fail */
607 if (GC_debugging_started) {
608 BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
612 /* Restore hbp to point at free block */
613 hbp = prev;
614 if (0 == hbp) {
615 return GC_allochblk_nth(sz, kind, flags, n);
617 hhdr = HDR(hbp);
622 if( size_avail >= size_needed ) {
623 # ifdef USE_MUNMAP
624 if (!IS_MAPPED(hhdr)) {
625 GC_remap((ptr_t)hbp, size_avail);
626 hhdr -> hb_flags &= ~WAS_UNMAPPED;
628 # endif
629 /* hbp may be on the wrong freelist; the parameter n */
630 /* is important. */
631 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
632 break;
636 if (0 == hbp) return 0;
638 /* Notify virtual dirty bit implementation that we are about to write. */
639 GC_write_hint(hbp);
641 /* Add it to map of valid blocks */
642 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
643 /* This leaks memory under very rare conditions. */
645 /* Set up header */
646 if (!setup_header(hhdr, sz, kind, flags)) {
647 GC_remove_counts(hbp, (word)size_needed);
648 return(0); /* ditto */
651 /* Clear block if necessary */
652 if (GC_debugging_started
653 || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
654 BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
657 /* We just successfully allocated a block. Restart count of */
658 /* consecutive failures. */
660 extern unsigned GC_fail_count;
662 GC_fail_count = 0;
665 GC_large_free_bytes -= size_needed;
667 GC_ASSERT(IS_MAPPED(hhdr));
668 return( hbp );
671 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
674 * Free a heap block.
676 * Coalesce the block with its neighbors if possible.
678 * All mark words are assumed to be cleared.
680 void
681 GC_freehblk(hbp)
682 struct hblk *hbp;
684 struct hblk *next, *prev;
685 hdr *hhdr, *prevhdr, *nexthdr;
686 signed_word size;
689 hhdr = HDR(hbp);
690 size = hhdr->hb_sz;
691 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
692 GC_remove_counts(hbp, (word)size);
693 hhdr->hb_sz = size;
695 /* Check for duplicate deallocation in the easy case */
696 if (HBLK_IS_FREE(hhdr)) {
697 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
698 (unsigned long) hbp);
701 GC_ASSERT(IS_MAPPED(hhdr));
702 GC_invalidate_map(hhdr);
703 next = (struct hblk *)((word)hbp + size);
704 nexthdr = HDR(next);
705 prev = GC_free_block_ending_at(hbp);
706 /* Coalesce with successor, if possible */
707 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
708 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
709 hhdr -> hb_sz += nexthdr -> hb_sz;
710 GC_remove_header(next);
712 /* Coalesce with predecessor, if possible. */
713 if (0 != prev) {
714 prevhdr = HDR(prev);
715 if (IS_MAPPED(prevhdr)) {
716 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
717 prevhdr -> hb_sz += hhdr -> hb_sz;
718 GC_remove_header(hbp);
719 hbp = prev;
720 hhdr = prevhdr;
724 GC_large_free_bytes += size;
725 GC_add_to_fl(hbp, hhdr);