PR target/11183
[official-gcc.git] / boehm-gc / allchblk.c
blob7d4cbd82f1314ef9761706e656c18f38f4856623
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 /* #define DEBUG */
18 #include <stdio.h>
19 #include "private/gc_priv.h"
21 GC_bool GC_use_entire_heap = 0;
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
42 /* bin. */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
45 + UNIQUE_THRESHOLD
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
49 #ifndef USE_MUNMAP
50 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
51 /* Number of free bytes on each list. */
53 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
54 /* > GC_max_large_allocd_bytes? */
55 GC_bool GC_enough_large_bytes_left(bytes,n)
56 word bytes;
57 int n;
59 int i;
60 for (i = N_HBLK_FLS; i >= n; --i) {
61 bytes += GC_free_bytes[i];
62 if (bytes > GC_max_large_allocd_bytes) return TRUE;
64 return FALSE;
67 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
69 # define FREE_ASSERT(e) GC_ASSERT(e)
71 #else /* USE_MUNMAP */
73 # define INCR_FREE_BYTES(n, b)
74 # define FREE_ASSERT(e)
76 #endif /* USE_MUNMAP */
78 /* Map a number of blocks to the appropriate large block free list index. */
79 int GC_hblk_fl_from_blocks(blocks_needed)
80 word blocks_needed;
82 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
83 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
84 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
85 + UNIQUE_THRESHOLD;
89 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
90 # define NHDR(hhdr) HDR(hhdr -> hb_next)
92 # ifdef USE_MUNMAP
93 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
94 # else /* !USE_MMAP */
95 # define IS_MAPPED(hhdr) 1
96 # endif /* USE_MUNMAP */
98 # if !defined(NO_DEBUGGING)
99 void GC_print_hblkfreelist()
101 struct hblk * h;
102 word total_free = 0;
103 hdr * hhdr;
104 word sz;
105 int i;
107 for (i = 0; i <= N_HBLK_FLS; ++i) {
108 h = GC_hblkfreelist[i];
109 # ifdef USE_MUNMAP
110 if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
111 (unsigned long)i);
112 # else
113 if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
114 (unsigned long)i,
115 (unsigned long)GC_free_bytes[i]);
116 # endif
117 while (h != 0) {
118 hhdr = HDR(h);
119 sz = hhdr -> hb_sz;
120 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
121 total_free += sz;
122 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
123 GC_printf0("start black listed\n");
124 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
125 GC_printf0("partially black listed\n");
126 } else {
127 GC_printf0("not black listed\n");
129 h = hhdr -> hb_next;
132 if (total_free != GC_large_free_bytes) {
133 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
134 (unsigned long) GC_large_free_bytes);
136 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
139 /* Return the free list index on which the block described by the header */
140 /* appears, or -1 if it appears nowhere. */
141 int free_list_index_of(wanted)
142 hdr * wanted;
144 struct hblk * h;
145 hdr * hhdr;
146 int i;
148 for (i = 0; i <= N_HBLK_FLS; ++i) {
149 h = GC_hblkfreelist[i];
150 while (h != 0) {
151 hhdr = HDR(h);
152 if (hhdr == wanted) return i;
153 h = hhdr -> hb_next;
156 return -1;
159 void GC_dump_regions()
161 unsigned i;
162 ptr_t start, end;
163 ptr_t p;
164 size_t bytes;
165 hdr *hhdr;
166 for (i = 0; i < GC_n_heap_sects; ++i) {
167 start = GC_heap_sects[i].hs_start;
168 bytes = GC_heap_sects[i].hs_bytes;
169 end = start + bytes;
170 /* Merge in contiguous sections. */
171 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
172 ++i;
173 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
175 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
176 for (p = start; p < end;) {
177 hhdr = HDR(p);
178 GC_printf1("\t0x%lx ", (unsigned long)p);
179 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
180 GC_printf1("Missing header!!\n", hhdr);
181 p += HBLKSIZE;
182 continue;
184 if (HBLK_IS_FREE(hhdr)) {
185 int correct_index = GC_hblk_fl_from_blocks(
186 divHBLKSZ(hhdr -> hb_sz));
187 int actual_index;
189 GC_printf1("\tfree block of size 0x%lx bytes",
190 (unsigned long)(hhdr -> hb_sz));
191 if (IS_MAPPED(hhdr)) {
192 GC_printf0("\n");
193 } else {
194 GC_printf0("(unmapped)\n");
196 actual_index = free_list_index_of(hhdr);
197 if (-1 == actual_index) {
198 GC_printf1("\t\tBlock not on free list %ld!!\n",
199 correct_index);
200 } else if (correct_index != actual_index) {
201 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
202 actual_index, correct_index);
204 p += hhdr -> hb_sz;
205 } else {
206 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
207 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
208 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
214 # endif /* NO_DEBUGGING */
216 /* Initialize hdr for a block containing the indicated size and */
217 /* kind of objects. */
218 /* Return FALSE on failure. */
219 static GC_bool setup_header(hhdr, sz, kind, flags)
220 register hdr * hhdr;
221 word sz; /* object size in words */
222 int kind;
223 unsigned char flags;
225 register word descr;
227 /* Add description of valid object pointers */
228 if (!GC_add_map_entry(sz)) return(FALSE);
229 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
231 /* Set size, kind and mark proc fields */
232 hhdr -> hb_sz = sz;
233 hhdr -> hb_obj_kind = kind;
234 hhdr -> hb_flags = flags;
235 descr = GC_obj_kinds[kind].ok_descriptor;
236 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
237 hhdr -> hb_descr = descr;
239 /* Clear mark bits */
240 GC_clear_hdr_marks(hhdr);
242 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
243 return(TRUE);
246 #define FL_UNKNOWN -1
248 * Remove hhdr from the appropriate free list.
249 * We assume it is on the nth free list, or on the size
250 * appropriate free list if n is FL_UNKNOWN.
252 void GC_remove_from_fl(hhdr, n)
253 hdr * hhdr;
254 int n;
256 int index;
258 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
259 # ifndef USE_MUNMAP
260 /* We always need index to mainatin free counts. */
261 if (FL_UNKNOWN == n) {
262 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
263 } else {
264 index = n;
266 # endif
267 if (hhdr -> hb_prev == 0) {
268 # ifdef USE_MUNMAP
269 if (FL_UNKNOWN == n) {
270 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
271 } else {
272 index = n;
274 # endif
275 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
276 GC_hblkfreelist[index] = hhdr -> hb_next;
277 } else {
278 hdr *phdr;
279 GET_HDR(hhdr -> hb_prev, phdr);
280 phdr -> hb_next = hhdr -> hb_next;
282 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
283 FREE_ASSERT(GC_free_bytes[index] >= 0);
284 if (0 != hhdr -> hb_next) {
285 hdr * nhdr;
286 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
287 GET_HDR(hhdr -> hb_next, nhdr);
288 nhdr -> hb_prev = hhdr -> hb_prev;
293 * Return a pointer to the free block ending just before h, if any.
295 struct hblk * GC_free_block_ending_at(h)
296 struct hblk *h;
298 struct hblk * p = h - 1;
299 hdr * phdr;
301 GET_HDR(p, phdr);
302 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
303 p = FORWARDED_ADDR(p,phdr);
304 phdr = HDR(p);
306 if (0 != phdr) {
307 if(HBLK_IS_FREE(phdr)) {
308 return p;
309 } else {
310 return 0;
313 p = GC_prev_block(h - 1);
314 if (0 != p) {
315 phdr = HDR(p);
316 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
317 return p;
320 return 0;
324 * Add hhdr to the appropriate free list.
325 * We maintain individual free lists sorted by address.
327 void GC_add_to_fl(h, hhdr)
328 struct hblk *h;
329 hdr * hhdr;
331 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
332 struct hblk *second = GC_hblkfreelist[index];
333 hdr * second_hdr;
334 # ifdef GC_ASSERTIONS
335 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
336 hdr * nexthdr = HDR(next);
337 struct hblk *prev = GC_free_block_ending_at(h);
338 hdr * prevhdr = HDR(prev);
339 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
340 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
341 # endif
342 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
343 GC_hblkfreelist[index] = h;
344 INCR_FREE_BYTES(index, hhdr -> hb_sz);
345 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
346 hhdr -> hb_next = second;
347 hhdr -> hb_prev = 0;
348 if (0 != second) {
349 GET_HDR(second, second_hdr);
350 second_hdr -> hb_prev = h;
352 GC_invalidate_map(hhdr);
355 #ifdef USE_MUNMAP
357 /* Unmap blocks that haven't been recently touched. This is the only way */
358 /* way blocks are ever unmapped. */
359 void GC_unmap_old(void)
361 struct hblk * h;
362 hdr * hhdr;
363 word sz;
364 unsigned short last_rec, threshold;
365 int i;
366 # define UNMAP_THRESHOLD 6
368 for (i = 0; i <= N_HBLK_FLS; ++i) {
369 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
370 hhdr = HDR(h);
371 if (!IS_MAPPED(hhdr)) continue;
372 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
373 last_rec = hhdr -> hb_last_reclaimed;
374 if (last_rec > GC_gc_no
375 || last_rec < threshold && threshold < GC_gc_no
376 /* not recently wrapped */) {
377 sz = hhdr -> hb_sz;
378 GC_unmap((ptr_t)h, sz);
379 hhdr -> hb_flags |= WAS_UNMAPPED;
385 /* Merge all unmapped blocks that are adjacent to other free */
386 /* blocks. This may involve remapping, since all blocks are either */
387 /* fully mapped or fully unmapped. */
388 void GC_merge_unmapped(void)
390 struct hblk * h, *next;
391 hdr * hhdr, *nexthdr;
392 word size, nextsize;
393 int i;
395 for (i = 0; i <= N_HBLK_FLS; ++i) {
396 h = GC_hblkfreelist[i];
397 while (h != 0) {
398 GET_HDR(h, hhdr);
399 size = hhdr->hb_sz;
400 next = (struct hblk *)((word)h + size);
401 GET_HDR(next, nexthdr);
402 /* Coalesce with successor, if possible */
403 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
404 nextsize = nexthdr -> hb_sz;
405 if (IS_MAPPED(hhdr)) {
406 GC_ASSERT(!IS_MAPPED(nexthdr));
407 /* make both consistent, so that we can merge */
408 if (size > nextsize) {
409 GC_remap((ptr_t)next, nextsize);
410 } else {
411 GC_unmap((ptr_t)h, size);
412 hhdr -> hb_flags |= WAS_UNMAPPED;
414 } else if (IS_MAPPED(nexthdr)) {
415 GC_ASSERT(!IS_MAPPED(hhdr));
416 if (size > nextsize) {
417 GC_unmap((ptr_t)next, nextsize);
418 } else {
419 GC_remap((ptr_t)h, size);
420 hhdr -> hb_flags &= ~WAS_UNMAPPED;
422 } else {
423 /* Unmap any gap in the middle */
424 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
426 /* If they are both unmapped, we merge, but leave unmapped. */
427 GC_remove_from_fl(hhdr, i);
428 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
429 hhdr -> hb_sz += nexthdr -> hb_sz;
430 GC_remove_header(next);
431 GC_add_to_fl(h, hhdr);
432 /* Start over at beginning of list */
433 h = GC_hblkfreelist[i];
434 } else /* not mergable with successor */ {
435 h = hhdr -> hb_next;
437 } /* while (h != 0) ... */
438 } /* for ... */
441 #endif /* USE_MUNMAP */
444 * Return a pointer to a block starting at h of length bytes.
445 * Memory for the block is mapped.
446 * Remove the block from its free list, and return the remainder (if any)
447 * to its appropriate free list.
448 * May fail by returning 0.
449 * The header for the returned block must be set up by the caller.
450 * If the return value is not 0, then hhdr is the header for it.
452 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
453 struct hblk *h;
454 hdr * hhdr;
455 word bytes;
456 int index;
458 word total_size = hhdr -> hb_sz;
459 struct hblk * rest;
460 hdr * rest_hdr;
462 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
463 GC_remove_from_fl(hhdr, index);
464 if (total_size == bytes) return h;
465 rest = (struct hblk *)((word)h + bytes);
466 rest_hdr = GC_install_header(rest);
467 if (0 == rest_hdr) return(0);
468 rest_hdr -> hb_sz = total_size - bytes;
469 rest_hdr -> hb_flags = 0;
470 # ifdef GC_ASSERTIONS
471 /* Mark h not free, to avoid assertion about adjacent free blocks. */
472 hhdr -> hb_map = 0;
473 # endif
474 GC_add_to_fl(rest, rest_hdr);
475 return h;
479 * H is a free block. N points at an address inside it.
480 * A new header for n has already been set up. Fix up h's header
481 * to reflect the fact that it is being split, move it to the
482 * appropriate free list.
483 * N replaces h in the original free list.
485 * Nhdr is not completely filled in, since it is about to allocated.
486 * It may in fact end up on the wrong free list for its size.
487 * (Hence adding it to a free list is silly. But this path is hopefully
488 * rare enough that it doesn't matter. The code is cleaner this way.)
490 void GC_split_block(h, hhdr, n, nhdr, index)
491 struct hblk *h;
492 hdr * hhdr;
493 struct hblk *n;
494 hdr * nhdr;
495 int index; /* Index of free list */
497 word total_size = hhdr -> hb_sz;
498 word h_size = (word)n - (word)h;
499 struct hblk *prev = hhdr -> hb_prev;
500 struct hblk *next = hhdr -> hb_next;
502 /* Replace h with n on its freelist */
503 nhdr -> hb_prev = prev;
504 nhdr -> hb_next = next;
505 nhdr -> hb_sz = total_size - h_size;
506 nhdr -> hb_flags = 0;
507 if (0 != prev) {
508 HDR(prev) -> hb_next = n;
509 } else {
510 GC_hblkfreelist[index] = n;
512 if (0 != next) {
513 HDR(next) -> hb_prev = n;
515 INCR_FREE_BYTES(index, -(signed_word)h_size);
516 FREE_ASSERT(GC_free_bytes[index] > 0);
517 # ifdef GC_ASSERTIONS
518 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
519 /* free blocks in GC_add_to_fl. */
520 # endif
521 # ifdef USE_MUNMAP
522 hhdr -> hb_last_reclaimed = GC_gc_no;
523 # endif
524 hhdr -> hb_sz = h_size;
525 GC_add_to_fl(h, hhdr);
526 GC_invalidate_map(nhdr);
529 struct hblk * GC_allochblk_nth();
532 * Allocate (and return pointer to) a heap block
533 * for objects of size sz words, searching the nth free list.
535 * NOTE: We set obj_map field in header correctly.
536 * Caller is responsible for building an object freelist in block.
538 * Unlike older versions of the collectors, the client is responsible
539 * for clearing the block, if necessary.
541 struct hblk *
542 GC_allochblk(sz, kind, flags)
543 word sz;
544 int kind;
545 unsigned flags; /* IGNORE_OFF_PAGE or 0 */
547 word blocks = OBJ_SZ_TO_BLOCKS(sz);
548 int start_list = GC_hblk_fl_from_blocks(blocks);
549 int i;
550 for (i = start_list; i <= N_HBLK_FLS; ++i) {
551 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
552 if (0 != result) {
553 return result;
556 return 0;
559 * The same, but with search restricted to nth free list.
561 struct hblk *
562 GC_allochblk_nth(sz, kind, flags, n)
563 word sz;
564 int kind;
565 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
566 int n;
568 register struct hblk *hbp;
569 register hdr * hhdr; /* Header corr. to hbp */
570 register struct hblk *thishbp;
571 register hdr * thishdr; /* Header corr. to hbp */
572 signed_word size_needed; /* number of bytes in requested objects */
573 signed_word size_avail; /* bytes available in this block */
575 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
577 /* search for a big enough block in free list */
578 hbp = GC_hblkfreelist[n];
579 for(; 0 != hbp; hbp = hhdr -> hb_next) {
580 GET_HDR(hbp, hhdr);
581 size_avail = hhdr->hb_sz;
582 if (size_avail < size_needed) continue;
583 if (!GC_use_entire_heap
584 && size_avail != size_needed
585 && USED_HEAP_SIZE >= GC_requested_heapsize
586 && !GC_incremental && GC_should_collect()) {
587 # ifdef USE_MUNMAP
588 continue;
589 # else
590 /* If we enough large blocks left to cover any */
591 /* previous request for large blocks, we go ahead */
592 /* and split. Assuming a steady state, that should */
593 /* be safe. It means that we can use the full */
594 /* heap if we allocate only small objects. */
595 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
596 continue;
598 # endif /* !USE_MUNMAP */
600 /* If the next heap block is obviously better, go on. */
601 /* This prevents us from disassembling a single large block */
602 /* to get tiny blocks. */
604 signed_word next_size;
606 thishbp = hhdr -> hb_next;
607 if (thishbp != 0) {
608 GET_HDR(thishbp, thishdr);
609 next_size = (signed_word)(thishdr -> hb_sz);
610 if (next_size < size_avail
611 && next_size >= size_needed
612 && !GC_is_black_listed(thishbp, (word)size_needed)) {
613 continue;
617 if ( !IS_UNCOLLECTABLE(kind) &&
618 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
619 struct hblk * lasthbp = hbp;
620 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
621 signed_word orig_avail = size_avail;
622 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
623 HBLKSIZE
624 : size_needed);
627 while ((ptr_t)lasthbp <= search_end
628 && (thishbp = GC_is_black_listed(lasthbp,
629 (word)eff_size_needed))
630 != 0) {
631 lasthbp = thishbp;
633 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
634 thishbp = lasthbp;
635 if (size_avail >= size_needed) {
636 if (thishbp != hbp &&
637 0 != (thishdr = GC_install_header(thishbp))) {
638 /* Make sure it's mapped before we mangle it. */
639 # ifdef USE_MUNMAP
640 if (!IS_MAPPED(hhdr)) {
641 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
642 hhdr -> hb_flags &= ~WAS_UNMAPPED;
644 # endif
645 /* Split the block at thishbp */
646 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
647 /* Advance to thishbp */
648 hbp = thishbp;
649 hhdr = thishdr;
650 /* We must now allocate thishbp, since it may */
651 /* be on the wrong free list. */
653 } else if (size_needed > (signed_word)BL_LIMIT
654 && orig_avail - size_needed
655 > (signed_word)BL_LIMIT) {
656 /* Punt, since anything else risks unreasonable heap growth. */
657 if (++GC_large_alloc_warn_suppressed
658 >= GC_large_alloc_warn_interval) {
659 WARN("Repeated allocation of very large block "
660 "(appr. size %ld):\n"
661 "\tMay lead to memory leak and poor performance.\n",
662 size_needed);
663 GC_large_alloc_warn_suppressed = 0;
665 size_avail = orig_avail;
666 } else if (size_avail == 0 && size_needed == HBLKSIZE
667 && IS_MAPPED(hhdr)) {
668 if (!GC_find_leak) {
669 static unsigned count = 0;
671 /* The block is completely blacklisted. We need */
672 /* to drop some such blocks, since otherwise we spend */
673 /* all our time traversing them if pointerfree */
674 /* blocks are unpopular. */
675 /* A dropped block will be reconsidered at next GC. */
676 if ((++count & 3) == 0) {
677 /* Allocate and drop the block in small chunks, to */
678 /* maximize the chance that we will recover some */
679 /* later. */
680 word total_size = hhdr -> hb_sz;
681 struct hblk * limit = hbp + divHBLKSZ(total_size);
682 struct hblk * h;
683 struct hblk * prev = hhdr -> hb_prev;
685 GC_words_wasted += total_size;
686 GC_large_free_bytes -= total_size;
687 GC_remove_from_fl(hhdr, n);
688 for (h = hbp; h < limit; h++) {
689 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
690 (void) setup_header(
691 hhdr,
692 BYTES_TO_WORDS(HBLKSIZE),
693 PTRFREE, 0); /* Cant fail */
694 if (GC_debugging_started) {
695 BZERO(h, HBLKSIZE);
699 /* Restore hbp to point at free block */
700 hbp = prev;
701 if (0 == hbp) {
702 return GC_allochblk_nth(sz, kind, flags, n);
704 hhdr = HDR(hbp);
709 if( size_avail >= size_needed ) {
710 # ifdef USE_MUNMAP
711 if (!IS_MAPPED(hhdr)) {
712 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
713 hhdr -> hb_flags &= ~WAS_UNMAPPED;
715 # endif
716 /* hbp may be on the wrong freelist; the parameter n */
717 /* is important. */
718 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
719 break;
723 if (0 == hbp) return 0;
725 /* Add it to map of valid blocks */
726 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
727 /* This leaks memory under very rare conditions. */
729 /* Set up header */
730 if (!setup_header(hhdr, sz, kind, flags)) {
731 GC_remove_counts(hbp, (word)size_needed);
732 return(0); /* ditto */
735 /* Notify virtual dirty bit implementation that we are about to write. */
736 /* Ensure that pointerfree objects are not protected if it's avoidable. */
737 GC_remove_protection(hbp, divHBLKSZ(size_needed),
738 (hhdr -> hb_descr == 0) /* pointer-free */);
740 /* We just successfully allocated a block. Restart count of */
741 /* consecutive failures. */
743 extern unsigned GC_fail_count;
745 GC_fail_count = 0;
748 GC_large_free_bytes -= size_needed;
750 GC_ASSERT(IS_MAPPED(hhdr));
751 return( hbp );
754 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
757 * Free a heap block.
759 * Coalesce the block with its neighbors if possible.
761 * All mark words are assumed to be cleared.
763 void
764 GC_freehblk(hbp)
765 struct hblk *hbp;
767 struct hblk *next, *prev;
768 hdr *hhdr, *prevhdr, *nexthdr;
769 signed_word size;
772 GET_HDR(hbp, hhdr);
773 size = hhdr->hb_sz;
774 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
775 GC_remove_counts(hbp, (word)size);
776 hhdr->hb_sz = size;
778 /* Check for duplicate deallocation in the easy case */
779 if (HBLK_IS_FREE(hhdr)) {
780 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
781 (unsigned long) hbp);
782 ABORT("Duplicate large block deallocation");
785 GC_ASSERT(IS_MAPPED(hhdr));
786 GC_invalidate_map(hhdr);
787 next = (struct hblk *)((word)hbp + size);
788 GET_HDR(next, nexthdr);
789 prev = GC_free_block_ending_at(hbp);
790 /* Coalesce with successor, if possible */
791 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
792 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
793 hhdr -> hb_sz += nexthdr -> hb_sz;
794 GC_remove_header(next);
796 /* Coalesce with predecessor, if possible. */
797 if (0 != prev) {
798 prevhdr = HDR(prev);
799 if (IS_MAPPED(prevhdr)) {
800 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
801 prevhdr -> hb_sz += hhdr -> hb_sz;
802 GC_remove_header(hbp);
803 hbp = prev;
804 hhdr = prevhdr;
808 GC_large_free_bytes += size;
809 GC_add_to_fl(hbp, hhdr);