* c-decl.c (duplicate_decls): Conditionalize DECL_SAVED_TREE copy.
[official-gcc.git] / boehm-gc / allchblk.c
blob06582ec792ec7fa72f35b17e9cdd97b29c8a8191
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 /* #define DEBUG */
18 #include <stdio.h>
19 #include "private/gc_priv.h"
21 GC_bool GC_use_entire_heap = 0;
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
42 /* bin. */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45 + UNIQUE_THRESHOLD
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
49 #ifndef USE_MUNMAP
50 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
51 /* Number of free bytes on each list. */
53 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
54 /* > GC_max_large_allocd_bytes? */
55 GC_bool GC_enough_large_bytes_left(bytes,n)
56 word bytes;
57 int n;
59 int i;
60 for (i = N_HBLK_FLS; i >= n; --i) {
61 bytes += GC_free_bytes[i];
62 if (bytes > GC_max_large_allocd_bytes) return TRUE;
64 return FALSE;
67 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
69 # define FREE_ASSERT(e) GC_ASSERT(e)
71 #else /* USE_MUNMAP */
73 # define INCR_FREE_BYTES(n, b)
74 # define FREE_ASSERT(e)
76 #endif /* USE_MUNMAP */
78 /* Map a number of blocks to the appropriate large block free list index. */
79 int GC_hblk_fl_from_blocks(blocks_needed)
80 word blocks_needed;
82 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
83 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
84 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
85 + UNIQUE_THRESHOLD;
89 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
90 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
91 # define NHDR(hhdr) HDR(hhdr -> hb_next)
93 # ifdef USE_MUNMAP
94 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
95 # else /* !USE_MMAP */
96 # define IS_MAPPED(hhdr) 1
97 # endif /* USE_MUNMAP */
99 # if !defined(NO_DEBUGGING)
100 void GC_print_hblkfreelist()
102 struct hblk * h;
103 word total_free = 0;
104 hdr * hhdr;
105 word sz;
106 int i;
108 for (i = 0; i <= N_HBLK_FLS; ++i) {
109 h = GC_hblkfreelist[i];
110 # ifdef USE_MUNMAP
111 if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
112 (unsigned long)i);
113 # else
114 if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
115 (unsigned long)i,
116 (unsigned long)GC_free_bytes[i]);
117 # endif
118 while (h != 0) {
119 hhdr = HDR(h);
120 sz = hhdr -> hb_sz;
121 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
122 total_free += sz;
123 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
124 GC_printf0("start black listed\n");
125 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
126 GC_printf0("partially black listed\n");
127 } else {
128 GC_printf0("not black listed\n");
130 h = hhdr -> hb_next;
133 if (total_free != GC_large_free_bytes) {
134 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
135 (unsigned long) GC_large_free_bytes);
137 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
140 /* Return the free list index on which the block described by the header */
141 /* appears, or -1 if it appears nowhere. */
142 int free_list_index_of(wanted)
143 hdr * wanted;
145 struct hblk * h;
146 hdr * hhdr;
147 int i;
149 for (i = 0; i <= N_HBLK_FLS; ++i) {
150 h = GC_hblkfreelist[i];
151 while (h != 0) {
152 hhdr = HDR(h);
153 if (hhdr == wanted) return i;
154 h = hhdr -> hb_next;
157 return -1;
160 void GC_dump_regions()
162 unsigned i;
163 ptr_t start, end;
164 ptr_t p;
165 size_t bytes;
166 hdr *hhdr;
167 for (i = 0; i < GC_n_heap_sects; ++i) {
168 start = GC_heap_sects[i].hs_start;
169 bytes = GC_heap_sects[i].hs_bytes;
170 end = start + bytes;
171 /* Merge in contiguous sections. */
172 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
173 ++i;
174 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
176 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
177 for (p = start; p < end;) {
178 hhdr = HDR(p);
179 GC_printf1("\t0x%lx ", (unsigned long)p);
180 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
181 GC_printf1("Missing header!!\n", hhdr);
182 p += HBLKSIZE;
183 continue;
185 if (HBLK_IS_FREE(hhdr)) {
186 int correct_index = GC_hblk_fl_from_blocks(
187 divHBLKSZ(hhdr -> hb_sz));
188 int actual_index;
190 GC_printf1("\tfree block of size 0x%lx bytes",
191 (unsigned long)(hhdr -> hb_sz));
192 if (IS_MAPPED(hhdr)) {
193 GC_printf0("\n");
194 } else {
195 GC_printf0("(unmapped)\n");
197 actual_index = free_list_index_of(hhdr);
198 if (-1 == actual_index) {
199 GC_printf1("\t\tBlock not on free list %ld!!\n",
200 correct_index);
201 } else if (correct_index != actual_index) {
202 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
203 actual_index, correct_index);
205 p += hhdr -> hb_sz;
206 } else {
207 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
208 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
209 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
215 # endif /* NO_DEBUGGING */
217 /* Initialize hdr for a block containing the indicated size and */
218 /* kind of objects. */
219 /* Return FALSE on failure. */
220 static GC_bool setup_header(hhdr, sz, kind, flags)
221 register hdr * hhdr;
222 word sz; /* object size in words */
223 int kind;
224 unsigned char flags;
226 register word descr;
228 /* Add description of valid object pointers */
229 if (!GC_add_map_entry(sz)) return(FALSE);
230 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
232 /* Set size, kind and mark proc fields */
233 hhdr -> hb_sz = sz;
234 hhdr -> hb_obj_kind = kind;
235 hhdr -> hb_flags = flags;
236 descr = GC_obj_kinds[kind].ok_descriptor;
237 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
238 hhdr -> hb_descr = descr;
240 /* Clear mark bits */
241 GC_clear_hdr_marks(hhdr);
243 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
244 return(TRUE);
247 #define FL_UNKNOWN -1
249 * Remove hhdr from the appropriate free list.
250 * We assume it is on the nth free list, or on the size
251 * appropriate free list if n is FL_UNKNOWN.
253 void GC_remove_from_fl(hhdr, n)
254 hdr * hhdr;
255 int n;
257 int index;
259 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
260 # ifndef USE_MUNMAP
261 /* We always need index to mainatin free counts. */
262 if (FL_UNKNOWN == n) {
263 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
264 } else {
265 index = n;
267 # endif
268 if (hhdr -> hb_prev == 0) {
269 # ifdef USE_MUNMAP
270 if (FL_UNKNOWN == n) {
271 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
272 } else {
273 index = n;
275 # endif
276 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
277 GC_hblkfreelist[index] = hhdr -> hb_next;
278 } else {
279 hdr *phdr;
280 GET_HDR(hhdr -> hb_prev, phdr);
281 phdr -> hb_next = hhdr -> hb_next;
283 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
284 FREE_ASSERT(GC_free_bytes[index] >= 0);
285 if (0 != hhdr -> hb_next) {
286 hdr * nhdr;
287 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
288 GET_HDR(hhdr -> hb_next, nhdr);
289 nhdr -> hb_prev = hhdr -> hb_prev;
294 * Return a pointer to the free block ending just before h, if any.
296 struct hblk * GC_free_block_ending_at(h)
297 struct hblk *h;
299 struct hblk * p = h - 1;
300 hdr * phdr;
302 GET_HDR(p, phdr);
303 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
304 p = FORWARDED_ADDR(p,phdr);
305 phdr = HDR(p);
307 if (0 != phdr) {
308 if(HBLK_IS_FREE(phdr)) {
309 return p;
310 } else {
311 return 0;
314 p = GC_prev_block(h - 1);
315 if (0 != p) {
316 phdr = HDR(p);
317 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
318 return p;
321 return 0;
325 * Add hhdr to the appropriate free list.
326 * We maintain individual free lists sorted by address.
328 void GC_add_to_fl(h, hhdr)
329 struct hblk *h;
330 hdr * hhdr;
332 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
333 struct hblk *second = GC_hblkfreelist[index];
334 hdr * second_hdr;
335 # ifdef GC_ASSERTIONS
336 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
337 hdr * nexthdr = HDR(next);
338 struct hblk *prev = GC_free_block_ending_at(h);
339 hdr * prevhdr = HDR(prev);
340 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
341 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
342 # endif
343 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
344 GC_hblkfreelist[index] = h;
345 INCR_FREE_BYTES(index, hhdr -> hb_sz);
346 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
347 hhdr -> hb_next = second;
348 hhdr -> hb_prev = 0;
349 if (0 != second) {
350 GET_HDR(second, second_hdr);
351 second_hdr -> hb_prev = h;
353 GC_invalidate_map(hhdr);
356 #ifdef USE_MUNMAP
358 /* Unmap blocks that haven't been recently touched. This is the only way */
359 /* way blocks are ever unmapped. */
360 void GC_unmap_old(void)
362 struct hblk * h;
363 hdr * hhdr;
364 word sz;
365 unsigned short last_rec, threshold;
366 int i;
367 # define UNMAP_THRESHOLD 6
369 for (i = 0; i <= N_HBLK_FLS; ++i) {
370 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
371 hhdr = HDR(h);
372 if (!IS_MAPPED(hhdr)) continue;
373 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
374 last_rec = hhdr -> hb_last_reclaimed;
375 if (last_rec > GC_gc_no
376 || last_rec < threshold && threshold < GC_gc_no
377 /* not recently wrapped */) {
378 sz = hhdr -> hb_sz;
379 GC_unmap((ptr_t)h, sz);
380 hhdr -> hb_flags |= WAS_UNMAPPED;
386 /* Merge all unmapped blocks that are adjacent to other free */
387 /* blocks. This may involve remapping, since all blocks are either */
388 /* fully mapped or fully unmapped. */
389 void GC_merge_unmapped(void)
391 struct hblk * h, *next;
392 hdr * hhdr, *nexthdr;
393 word size, nextsize;
394 int i;
396 for (i = 0; i <= N_HBLK_FLS; ++i) {
397 h = GC_hblkfreelist[i];
398 while (h != 0) {
399 GET_HDR(h, hhdr);
400 size = hhdr->hb_sz;
401 next = (struct hblk *)((word)h + size);
402 GET_HDR(next, nexthdr);
403 /* Coalesce with successor, if possible */
404 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
405 nextsize = nexthdr -> hb_sz;
406 if (IS_MAPPED(hhdr)) {
407 GC_ASSERT(!IS_MAPPED(nexthdr));
408 /* make both consistent, so that we can merge */
409 if (size > nextsize) {
410 GC_remap((ptr_t)next, nextsize);
411 } else {
412 GC_unmap((ptr_t)h, size);
413 hhdr -> hb_flags |= WAS_UNMAPPED;
415 } else if (IS_MAPPED(nexthdr)) {
416 GC_ASSERT(!IS_MAPPED(hhdr));
417 if (size > nextsize) {
418 GC_unmap((ptr_t)next, nextsize);
419 } else {
420 GC_remap((ptr_t)h, size);
421 hhdr -> hb_flags &= ~WAS_UNMAPPED;
423 } else {
424 /* Unmap any gap in the middle */
425 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
427 /* If they are both unmapped, we merge, but leave unmapped. */
428 GC_remove_from_fl(hhdr, i);
429 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
430 hhdr -> hb_sz += nexthdr -> hb_sz;
431 GC_remove_header(next);
432 GC_add_to_fl(h, hhdr);
433 /* Start over at beginning of list */
434 h = GC_hblkfreelist[i];
435 } else /* not mergable with successor */ {
436 h = hhdr -> hb_next;
438 } /* while (h != 0) ... */
439 } /* for ... */
442 #endif /* USE_MUNMAP */
445 * Return a pointer to a block starting at h of length bytes.
446 * Memory for the block is mapped.
447 * Remove the block from its free list, and return the remainder (if any)
448 * to its appropriate free list.
449 * May fail by returning 0.
450 * The header for the returned block must be set up by the caller.
451 * If the return value is not 0, then hhdr is the header for it.
453 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
454 struct hblk *h;
455 hdr * hhdr;
456 word bytes;
457 int index;
459 word total_size = hhdr -> hb_sz;
460 struct hblk * rest;
461 hdr * rest_hdr;
463 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
464 GC_remove_from_fl(hhdr, index);
465 if (total_size == bytes) return h;
466 rest = (struct hblk *)((word)h + bytes);
467 rest_hdr = GC_install_header(rest);
468 if (0 == rest_hdr) return(0);
469 rest_hdr -> hb_sz = total_size - bytes;
470 rest_hdr -> hb_flags = 0;
471 # ifdef GC_ASSERTIONS
472 /* Mark h not free, to avoid assertion about adjacent free blocks. */
473 hhdr -> hb_map = 0;
474 # endif
475 GC_add_to_fl(rest, rest_hdr);
476 return h;
480 * H is a free block. N points at an address inside it.
481 * A new header for n has already been set up. Fix up h's header
482 * to reflect the fact that it is being split, move it to the
483 * appropriate free list.
484 * N replaces h in the original free list.
486 * Nhdr is not completely filled in, since it is about to allocated.
487 * It may in fact end up on the wrong free list for its size.
488 * (Hence adding it to a free list is silly. But this path is hopefully
489 * rare enough that it doesn't matter. The code is cleaner this way.)
491 void GC_split_block(h, hhdr, n, nhdr, index)
492 struct hblk *h;
493 hdr * hhdr;
494 struct hblk *n;
495 hdr * nhdr;
496 int index; /* Index of free list */
498 word total_size = hhdr -> hb_sz;
499 word h_size = (word)n - (word)h;
500 struct hblk *prev = hhdr -> hb_prev;
501 struct hblk *next = hhdr -> hb_next;
503 /* Replace h with n on its freelist */
504 nhdr -> hb_prev = prev;
505 nhdr -> hb_next = next;
506 nhdr -> hb_sz = total_size - h_size;
507 nhdr -> hb_flags = 0;
508 if (0 != prev) {
509 HDR(prev) -> hb_next = n;
510 } else {
511 GC_hblkfreelist[index] = n;
513 if (0 != next) {
514 HDR(next) -> hb_prev = n;
516 INCR_FREE_BYTES(index, -(signed_word)h_size);
517 FREE_ASSERT(GC_free_bytes[index] > 0);
518 # ifdef GC_ASSERTIONS
519 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
520 /* free blocks in GC_add_to_fl. */
521 # endif
522 # ifdef USE_MUNMAP
523 hhdr -> hb_last_reclaimed = GC_gc_no;
524 # endif
525 hhdr -> hb_sz = h_size;
526 GC_add_to_fl(h, hhdr);
527 GC_invalidate_map(nhdr);
530 struct hblk * GC_allochblk_nth();
533 * Allocate (and return pointer to) a heap block
534 * for objects of size sz words, searching the nth free list.
536 * NOTE: We set obj_map field in header correctly.
537 * Caller is responsible for building an object freelist in block.
539 * Unlike older versions of the collectors, the client is responsible
540 * for clearing the block, if necessary.
542 struct hblk *
543 GC_allochblk(sz, kind, flags)
544 word sz;
545 int kind;
546 unsigned flags; /* IGNORE_OFF_PAGE or 0 */
548 word blocks = OBJ_SZ_TO_BLOCKS(sz);
549 int start_list = GC_hblk_fl_from_blocks(blocks);
550 int i;
551 for (i = start_list; i <= N_HBLK_FLS; ++i) {
552 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
553 if (0 != result) {
554 return result;
557 return 0;
560 * The same, but with search restricted to nth free list.
562 struct hblk *
563 GC_allochblk_nth(sz, kind, flags, n)
564 word sz;
565 int kind;
566 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
567 int n;
569 register struct hblk *hbp;
570 register hdr * hhdr; /* Header corr. to hbp */
571 register struct hblk *thishbp;
572 register hdr * thishdr; /* Header corr. to hbp */
573 signed_word size_needed; /* number of bytes in requested objects */
574 signed_word size_avail; /* bytes available in this block */
576 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
578 /* search for a big enough block in free list */
579 hbp = GC_hblkfreelist[n];
580 for(; 0 != hbp; hbp = hhdr -> hb_next) {
581 GET_HDR(hbp, hhdr);
582 size_avail = hhdr->hb_sz;
583 if (size_avail < size_needed) continue;
584 if (!GC_use_entire_heap
585 && size_avail != size_needed
586 && USED_HEAP_SIZE >= GC_requested_heapsize
587 && !GC_incremental && GC_should_collect()) {
588 # ifdef USE_MUNMAP
589 continue;
590 # else
591 /* If we enough large blocks left to cover any */
592 /* previous request for large blocks, we go ahead */
593 /* and split. Assuming a steady state, that should */
594 /* be safe. It means that we can use the full */
595 /* heap if we allocate only small objects. */
596 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
597 continue;
599 # endif /* !USE_MUNMAP */
601 /* If the next heap block is obviously better, go on. */
602 /* This prevents us from disassembling a single large block */
603 /* to get tiny blocks. */
605 signed_word next_size;
607 thishbp = hhdr -> hb_next;
608 if (thishbp != 0) {
609 GET_HDR(thishbp, thishdr);
610 next_size = (signed_word)(thishdr -> hb_sz);
611 if (next_size < size_avail
612 && next_size >= size_needed
613 && !GC_is_black_listed(thishbp, (word)size_needed)) {
614 continue;
618 if ( !IS_UNCOLLECTABLE(kind) &&
619 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
620 struct hblk * lasthbp = hbp;
621 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
622 signed_word orig_avail = size_avail;
623 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
624 HBLKSIZE
625 : size_needed);
628 while ((ptr_t)lasthbp <= search_end
629 && (thishbp = GC_is_black_listed(lasthbp,
630 (word)eff_size_needed))
631 != 0) {
632 lasthbp = thishbp;
634 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
635 thishbp = lasthbp;
636 if (size_avail >= size_needed) {
637 if (thishbp != hbp &&
638 0 != (thishdr = GC_install_header(thishbp))) {
639 /* Make sure it's mapped before we mangle it. */
640 # ifdef USE_MUNMAP
641 if (!IS_MAPPED(hhdr)) {
642 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
643 hhdr -> hb_flags &= ~WAS_UNMAPPED;
645 # endif
646 /* Split the block at thishbp */
647 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
648 /* Advance to thishbp */
649 hbp = thishbp;
650 hhdr = thishdr;
651 /* We must now allocate thishbp, since it may */
652 /* be on the wrong free list. */
654 } else if (size_needed > (signed_word)BL_LIMIT
655 && orig_avail - size_needed
656 > (signed_word)BL_LIMIT) {
657 /* Punt, since anything else risks unreasonable heap growth. */
658 if (0 == GETENV("GC_NO_BLACKLIST_WARNING")) {
659 WARN("Needed to allocate blacklisted block at 0x%lx\n",
660 (word)hbp);
662 size_avail = orig_avail;
663 } else if (size_avail == 0 && size_needed == HBLKSIZE
664 && IS_MAPPED(hhdr)) {
665 if (!GC_find_leak) {
666 static unsigned count = 0;
668 /* The block is completely blacklisted. We need */
669 /* to drop some such blocks, since otherwise we spend */
670 /* all our time traversing them if pointerfree */
671 /* blocks are unpopular. */
672 /* A dropped block will be reconsidered at next GC. */
673 if ((++count & 3) == 0) {
674 /* Allocate and drop the block in small chunks, to */
675 /* maximize the chance that we will recover some */
676 /* later. */
677 word total_size = hhdr -> hb_sz;
678 struct hblk * limit = hbp + divHBLKSZ(total_size);
679 struct hblk * h;
680 struct hblk * prev = hhdr -> hb_prev;
682 GC_words_wasted += total_size;
683 GC_large_free_bytes -= total_size;
684 GC_remove_from_fl(hhdr, n);
685 for (h = hbp; h < limit; h++) {
686 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
687 (void) setup_header(
688 hhdr,
689 BYTES_TO_WORDS(HBLKSIZE),
690 PTRFREE, 0); /* Cant fail */
691 if (GC_debugging_started) {
692 BZERO(h, HBLKSIZE);
696 /* Restore hbp to point at free block */
697 hbp = prev;
698 if (0 == hbp) {
699 return GC_allochblk_nth(sz, kind, flags, n);
701 hhdr = HDR(hbp);
706 if( size_avail >= size_needed ) {
707 # ifdef USE_MUNMAP
708 if (!IS_MAPPED(hhdr)) {
709 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
710 hhdr -> hb_flags &= ~WAS_UNMAPPED;
712 # endif
713 /* hbp may be on the wrong freelist; the parameter n */
714 /* is important. */
715 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
716 break;
720 if (0 == hbp) return 0;
722 /* Notify virtual dirty bit implementation that we are about to write. */
723 GC_write_hint(hbp);
725 /* Add it to map of valid blocks */
726 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
727 /* This leaks memory under very rare conditions. */
729 /* Set up header */
730 if (!setup_header(hhdr, sz, kind, flags)) {
731 GC_remove_counts(hbp, (word)size_needed);
732 return(0); /* ditto */
735 /* We just successfully allocated a block. Restart count of */
736 /* consecutive failures. */
738 extern unsigned GC_fail_count;
740 GC_fail_count = 0;
743 GC_large_free_bytes -= size_needed;
745 GC_ASSERT(IS_MAPPED(hhdr));
746 return( hbp );
749 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
752 * Free a heap block.
754 * Coalesce the block with its neighbors if possible.
756 * All mark words are assumed to be cleared.
758 void
759 GC_freehblk(hbp)
760 struct hblk *hbp;
762 struct hblk *next, *prev;
763 hdr *hhdr, *prevhdr, *nexthdr;
764 signed_word size;
767 GET_HDR(hbp, hhdr);
768 size = hhdr->hb_sz;
769 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
770 GC_remove_counts(hbp, (word)size);
771 hhdr->hb_sz = size;
773 /* Check for duplicate deallocation in the easy case */
774 if (HBLK_IS_FREE(hhdr)) {
775 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
776 (unsigned long) hbp);
777 ABORT("Duplicate large block deallocation");
780 GC_ASSERT(IS_MAPPED(hhdr));
781 GC_invalidate_map(hhdr);
782 next = (struct hblk *)((word)hbp + size);
783 GET_HDR(next, nexthdr);
784 prev = GC_free_block_ending_at(hbp);
785 /* Coalesce with successor, if possible */
786 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
787 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
788 hhdr -> hb_sz += nexthdr -> hb_sz;
789 GC_remove_header(next);
791 /* Coalesce with predecessor, if possible. */
792 if (0 != prev) {
793 prevhdr = HDR(prev);
794 if (IS_MAPPED(prevhdr)) {
795 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
796 prevhdr -> hb_sz += hhdr -> hb_sz;
797 GC_remove_header(hbp);
798 hbp = prev;
799 hhdr = prevhdr;
803 GC_large_free_bytes += size;
804 GC_add_to_fl(hbp, hhdr);