* pretty-print.c (pp_base_maybe_space): New function.
[official-gcc.git] / gcc / ggc-zone.c
blob51fb1f938cf48f07339c6e4e4cc5522439afca4a
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
45 # else
46 # include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
68 #endif
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
78 #endif
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
90 /* NOTES:
91 If we track inter-zone pointers, we can mark single zones at a
92 time.
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
97 marking quickly. */
98 /* Stategy:
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
128 #endif
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
133 #endif
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
144 struct alloc_chunk {
145 #ifdef COOKIE_CHECKING
146 unsigned int magic;
147 #endif
148 unsigned int type:1;
149 unsigned int typecode:14;
150 unsigned int large:1;
151 unsigned int size:15;
152 unsigned int mark:1;
153 union {
154 struct alloc_chunk *next_free;
155 char data[1];
157 /* Make sure the data is sufficiently aligned. */
158 HOST_WIDEST_INT align_i;
159 #ifdef HAVE_LONG_DOUBLE
160 long double align_d;
161 #else
162 double align_d;
163 #endif
164 } u;
165 } __attribute__ ((packed));
167 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
169 /* We maintain several bins of free lists for chunks for very small
170 objects. We never exhaustively search other bins -- if we don't
171 find one of the proper size, we allocate from the "larger" bin. */
173 /* Decreasing the number of free bins increases the time it takes to allocate.
174 Similar with increasing max_free_bin_size without increasing num_free_bins.
176 After much histogramming of allocation sizes and time spent on gc,
177 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
178 these were determined to be the optimal values. */
179 #define NUM_FREE_BINS 64
180 #define MAX_FREE_BIN_SIZE 256
181 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
182 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
183 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
185 /* Marker used as chunk->size for a large object. Should correspond
186 to the size of the bitfield above. */
187 #define LARGE_OBJECT_SIZE 0x7fff
189 /* We use this structure to determine the alignment required for
190 allocations. For power-of-two sized allocations, that's not a
191 problem, but it does matter for odd-sized allocations. */
193 struct max_alignment {
194 char c;
195 union {
196 HOST_WIDEST_INT i;
197 #ifdef HAVE_LONG_DOUBLE
198 long double d;
199 #else
200 double d;
201 #endif
202 } u;
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest nonnegative number which when added to X gives
210 a multiple of F. */
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Compute the smallest multiple of F that is >= X. */
216 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* A page_entry records the status of an allocation page. */
220 typedef struct page_entry
222 /* The next page-entry with objects of the same size, or NULL if
223 this is the last page-entry. */
224 struct page_entry *next;
226 /* The number of bytes allocated. (This will always be a multiple
227 of the host system page size.) */
228 size_t bytes;
230 /* How many collections we've survived. */
231 size_t survived;
233 /* The address at which the memory is allocated. */
234 char *page;
236 /* Context depth of this page. */
237 unsigned short context_depth;
239 /* Does this page contain small objects, or one large object? */
240 bool large_p;
242 /* The zone that this page entry belongs to. */
243 struct alloc_zone *zone;
244 } page_entry;
247 /* The global variables. */
248 static struct globals
250 /* The linked list of zones. */
251 struct alloc_zone *zones;
253 /* The system's page size. */
254 size_t pagesize;
255 size_t lg_pagesize;
257 /* A file descriptor open to /dev/zero for reading. */
258 #if defined (HAVE_MMAP_DEV_ZERO)
259 int dev_zero_fd;
260 #endif
262 /* The file descriptor for debugging output. */
263 FILE *debug_file;
264 } G;
266 /* The zone allocation structure. */
267 struct alloc_zone
269 /* Name of the zone. */
270 const char *name;
272 /* Linked list of pages in a zone. */
273 page_entry *pages;
275 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
276 FREE_BIN_DELTA. All other chunks are in slot 0. */
277 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
279 /* Bytes currently allocated. */
280 size_t allocated;
282 /* Bytes currently allocated at the end of the last collection. */
283 size_t allocated_last_gc;
285 /* Total amount of memory mapped. */
286 size_t bytes_mapped;
288 /* Bit N set if any allocations have been done at context depth N. */
289 unsigned long context_depth_allocations;
291 /* Bit N set if any collections have been done at context depth N. */
292 unsigned long context_depth_collections;
294 /* The current depth in the context stack. */
295 unsigned short context_depth;
297 /* A cache of free system pages. */
298 page_entry *free_pages;
300 /* Next zone in the linked list of zones. */
301 struct alloc_zone *next_zone;
303 /* True if this zone was collected during this collection. */
304 bool was_collected;
306 /* True if this zone should be destroyed after the next collection. */
307 bool dead;
308 } main_zone;
310 struct alloc_zone *rtl_zone;
311 struct alloc_zone *garbage_zone;
312 struct alloc_zone *tree_zone;
314 /* Allocate pages in chunks of this size, to throttle calls to memory
315 allocation routines. The first page is used, the rest go onto the
316 free list. This cannot be larger than HOST_BITS_PER_INT for the
317 in_use bitmask for page_group. */
318 #define GGC_QUIRE_SIZE 16
320 static int ggc_allocated_p (const void *);
321 #ifdef USING_MMAP
322 static char *alloc_anon (char *, size_t, struct alloc_zone *);
323 #endif
324 static struct page_entry * alloc_small_page ( struct alloc_zone *);
325 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
326 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
327 static void free_page (struct page_entry *);
328 static void release_pages (struct alloc_zone *);
329 static void sweep_pages (struct alloc_zone *);
330 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
331 static bool ggc_collect_1 (struct alloc_zone *, bool);
332 static void check_cookies (void);
335 /* Returns nonzero if P was allocated in GC'able memory. */
337 static inline int
338 ggc_allocated_p (const void *p)
340 struct alloc_chunk *chunk;
341 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
342 #ifdef COOKIE_CHECKING
343 if (chunk->magic != CHUNK_MAGIC)
344 abort ();
345 #endif
346 if (chunk->type == 1)
347 return true;
348 return false;
352 #ifdef USING_MMAP
353 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
354 (if non-null). The ifdef structure here is intended to cause a
355 compile error unless exactly one of the HAVE_* is defined. */
357 static inline char *
358 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
360 #ifdef HAVE_MMAP_ANON
361 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
363 #endif
364 #ifdef HAVE_MMAP_DEV_ZERO
365 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
366 MAP_PRIVATE, G.dev_zero_fd, 0);
367 #endif
368 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
370 if (page == (char *) MAP_FAILED)
372 perror ("virtual memory exhausted");
373 exit (FATAL_EXIT_CODE);
376 /* Remember that we allocated this memory. */
377 zone->bytes_mapped += size;
378 /* Pretend we don't have access to the allocated pages. We'll enable
379 access to smaller pieces of the area in ggc_alloc. Discard the
380 handle to avoid handle leak. */
381 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
382 return page;
384 #endif
386 /* Allocate a new page for allocating objects of size 2^ORDER,
387 and return an entry for it. */
389 static inline struct page_entry *
390 alloc_small_page (struct alloc_zone *zone)
392 struct page_entry *entry;
393 char *page;
395 page = NULL;
397 /* Check the list of free pages for one we can use. */
398 entry = zone->free_pages;
399 if (entry != NULL)
401 /* Recycle the allocated memory from this page ... */
402 zone->free_pages = entry->next;
403 page = entry->page;
407 #ifdef USING_MMAP
408 else
410 /* We want just one page. Allocate a bunch of them and put the
411 extras on the freelist. (Can only do this optimization with
412 mmap for backing store.) */
413 struct page_entry *e, *f = zone->free_pages;
414 int i;
416 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
418 /* This loop counts down so that the chain will be in ascending
419 memory order. */
420 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
422 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
423 e->bytes = G.pagesize;
424 e->page = page + (i << G.lg_pagesize);
425 e->next = f;
426 f = e;
429 zone->free_pages = f;
431 #endif
432 if (entry == NULL)
433 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
435 entry->next = 0;
436 entry->bytes = G.pagesize;
437 entry->page = page;
438 entry->context_depth = zone->context_depth;
439 entry->large_p = false;
440 entry->zone = zone;
441 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
443 if (GGC_DEBUG_LEVEL >= 2)
444 fprintf (G.debug_file,
445 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
446 (PTR) entry, page, page + G.pagesize - 1);
448 return entry;
450 /* Compute the smallest multiple of F that is >= X. */
452 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
454 /* Allocate a large page of size SIZE in ZONE. */
456 static inline struct page_entry *
457 alloc_large_page (size_t size, struct alloc_zone *zone)
459 struct page_entry *entry;
460 char *page;
461 size = ROUND_UP (size, 1024);
462 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
463 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
465 entry->next = 0;
466 entry->bytes = size;
467 entry->page = page;
468 entry->context_depth = zone->context_depth;
469 entry->large_p = true;
470 entry->zone = zone;
471 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
473 if (GGC_DEBUG_LEVEL >= 2)
474 fprintf (G.debug_file,
475 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
476 (PTR) entry, page, page + size - 1);
478 return entry;
482 /* For a page that is no longer needed, put it on the free page list. */
484 static inline void
485 free_page (page_entry *entry)
487 if (GGC_DEBUG_LEVEL >= 2)
488 fprintf (G.debug_file,
489 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
490 entry->page, entry->page + entry->bytes - 1);
492 if (entry->large_p)
494 free (entry->page);
495 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
497 else
499 /* Mark the page as inaccessible. Discard the handle to
500 avoid handle leak. */
501 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
503 entry->next = entry->zone->free_pages;
504 entry->zone->free_pages = entry;
508 /* Release the free page cache to the system. */
510 static void
511 release_pages (struct alloc_zone *zone)
513 #ifdef USING_MMAP
514 page_entry *p, *next;
515 char *start;
516 size_t len;
518 /* Gather up adjacent pages so they are unmapped together. */
519 p = zone->free_pages;
521 while (p)
523 start = p->page;
524 next = p->next;
525 len = p->bytes;
526 free (p);
527 p = next;
529 while (p && p->page == start + len)
531 next = p->next;
532 len += p->bytes;
533 free (p);
534 p = next;
537 munmap (start, len);
538 zone->bytes_mapped -= len;
541 zone->free_pages = NULL;
542 #endif
545 /* Place CHUNK of size SIZE on the free list for ZONE. */
547 static inline void
548 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
550 size_t bin = 0;
552 bin = SIZE_BIN_DOWN (size);
553 if (bin == 0)
554 abort ();
555 if (bin > NUM_FREE_BINS)
556 bin = 0;
557 #ifdef COOKIE_CHECKING
558 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
559 abort ();
560 chunk->magic = DEADCHUNK_MAGIC;
561 #endif
562 chunk->u.next_free = zone->free_chunks[bin];
563 zone->free_chunks[bin] = chunk;
564 if (GGC_DEBUG_LEVEL >= 3)
565 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
566 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
569 /* Allocate a chunk of memory of SIZE bytes. */
571 static void *
572 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
574 size_t bin = 0;
575 size_t lsize = 0;
576 struct page_entry *entry;
577 struct alloc_chunk *chunk, *lchunk, **pp;
578 void *result;
580 /* Align size, so that we're assured of aligned allocations. */
581 if (size < FREE_BIN_DELTA)
582 size = FREE_BIN_DELTA;
583 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
585 /* Large objects are handled specially. */
586 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
588 size = ROUND_UP (size, 1024);
589 entry = alloc_large_page (size, zone);
590 entry->survived = 0;
591 entry->next = entry->zone->pages;
592 entry->zone->pages = entry;
594 chunk = (struct alloc_chunk *) entry->page;
595 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
596 chunk->large = 1;
597 chunk->size = CEIL (size, 1024);
599 goto found;
602 /* First look for a tiny object already segregated into its own
603 size bucket. */
604 bin = SIZE_BIN_UP (size);
605 if (bin <= NUM_FREE_BINS)
607 chunk = zone->free_chunks[bin];
608 if (chunk)
610 zone->free_chunks[bin] = chunk->u.next_free;
611 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
612 goto found;
616 /* Failing that, look through the "other" bucket for a chunk
617 that is large enough. */
618 pp = &(zone->free_chunks[0]);
619 chunk = *pp;
620 while (chunk && chunk->size < size)
622 pp = &chunk->u.next_free;
623 chunk = *pp;
626 /* Failing that, allocate new storage. */
627 if (!chunk)
629 entry = alloc_small_page (zone);
630 entry->next = entry->zone->pages;
631 entry->zone->pages = entry;
633 chunk = (struct alloc_chunk *) entry->page;
634 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
635 chunk->size = G.pagesize - CHUNK_OVERHEAD;
636 chunk->large = 0;
638 else
640 *pp = chunk->u.next_free;
641 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
642 chunk->large = 0;
644 /* Release extra memory from a chunk that's too big. */
645 lsize = chunk->size - size;
646 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
648 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
649 chunk->size = size;
651 lsize -= CHUNK_OVERHEAD;
652 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
653 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
654 #ifdef COOKIE_CHECKING
655 lchunk->magic = CHUNK_MAGIC;
656 #endif
657 lchunk->type = 0;
658 lchunk->mark = 0;
659 lchunk->size = lsize;
660 lchunk->large = 0;
661 free_chunk (lchunk, lsize, zone);
663 /* Calculate the object's address. */
664 found:
665 #ifdef COOKIE_CHECKING
666 chunk->magic = CHUNK_MAGIC;
667 #endif
668 chunk->type = 1;
669 chunk->mark = 0;
670 chunk->typecode = type;
671 result = chunk->u.data;
673 #ifdef ENABLE_GC_CHECKING
674 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
675 exact same semantics in presence of memory bugs, regardless of
676 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
677 handle to avoid handle leak. */
678 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
680 /* `Poison' the entire allocated object. */
681 memset (result, 0xaf, size);
682 #endif
684 /* Tell Valgrind that the memory is there, but its content isn't
685 defined. The bytes at the end of the object are still marked
686 unaccessible. */
687 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
689 /* Keep track of how many bytes are being allocated. This
690 information is used in deciding when to collect. */
691 zone->allocated += size + CHUNK_OVERHEAD;
693 if (GGC_DEBUG_LEVEL >= 3)
694 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
695 (void *)chunk, (unsigned long) size, result);
697 return result;
700 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
701 for that type. */
703 void *
704 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
706 switch (gte)
708 case gt_ggc_e_14lang_tree_node:
709 return ggc_alloc_zone_1 (size, tree_zone, gte);
711 case gt_ggc_e_7rtx_def:
712 return ggc_alloc_zone_1 (size, rtl_zone, gte);
714 case gt_ggc_e_9rtvec_def:
715 return ggc_alloc_zone_1 (size, rtl_zone, gte);
717 default:
718 return ggc_alloc_zone_1 (size, &main_zone, gte);
722 /* Normal ggc_alloc simply allocates into the main zone. */
724 void *
725 ggc_alloc (size_t size)
727 return ggc_alloc_zone_1 (size, &main_zone, -1);
730 /* Zone allocation allocates into the specified zone. */
732 void *
733 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
735 return ggc_alloc_zone_1 (size, zone, -1);
738 /* Poison the chunk. */
739 #ifdef ENABLE_GC_CHECKING
740 #define poison_chunk(CHUNK, SIZE) \
741 memset ((CHUNK)->u.data, 0xa5, (SIZE))
742 #else
743 #define poison_chunk(CHUNK, SIZE)
744 #endif
746 /* Free the object at P. */
748 void
749 ggc_free (void *p)
751 struct alloc_chunk *chunk;
753 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
755 /* Poison the chunk. */
756 poison_chunk (chunk, ggc_get_size (p));
758 /* XXX: We only deal with explicitly freeing large objects ATM. */
759 if (chunk->large)
760 free (p);
763 /* If P is not marked, mark it and return false. Otherwise return true.
764 P must have been allocated by the GC allocator; it mustn't point to
765 static objects, stack variables, or memory allocated with malloc. */
768 ggc_set_mark (const void *p)
770 struct alloc_chunk *chunk;
772 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
773 #ifdef COOKIE_CHECKING
774 if (chunk->magic != CHUNK_MAGIC)
775 abort ();
776 #endif
777 if (chunk->mark)
778 return 1;
779 chunk->mark = 1;
781 if (GGC_DEBUG_LEVEL >= 4)
782 fprintf (G.debug_file, "Marking %p\n", p);
784 return 0;
787 /* Return 1 if P has been marked, zero otherwise.
788 P must have been allocated by the GC allocator; it mustn't point to
789 static objects, stack variables, or memory allocated with malloc. */
792 ggc_marked_p (const void *p)
794 struct alloc_chunk *chunk;
796 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
797 #ifdef COOKIE_CHECKING
798 if (chunk->magic != CHUNK_MAGIC)
799 abort ();
800 #endif
801 return chunk->mark;
804 /* Return the size of the gc-able object P. */
806 size_t
807 ggc_get_size (const void *p)
809 struct alloc_chunk *chunk;
811 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
812 #ifdef COOKIE_CHECKING
813 if (chunk->magic != CHUNK_MAGIC)
814 abort ();
815 #endif
816 if (chunk->large)
817 return chunk->size * 1024;
819 return chunk->size;
822 /* Initialize the ggc-zone-mmap allocator. */
823 void
824 init_ggc (void)
826 /* Set up the main zone by hand. */
827 main_zone.name = "Main zone";
828 G.zones = &main_zone;
830 /* Allocate the default zones. */
831 rtl_zone = new_ggc_zone ("RTL zone");
832 tree_zone = new_ggc_zone ("Tree zone");
833 garbage_zone = new_ggc_zone ("Garbage zone");
835 G.pagesize = getpagesize();
836 G.lg_pagesize = exact_log2 (G.pagesize);
837 #ifdef HAVE_MMAP_DEV_ZERO
838 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
839 if (G.dev_zero_fd == -1)
840 abort ();
841 #endif
843 #if 0
844 G.debug_file = fopen ("ggc-mmap.debug", "w");
845 setlinebuf (G.debug_file);
846 #else
847 G.debug_file = stdout;
848 #endif
850 #ifdef USING_MMAP
851 /* StunOS has an amazing off-by-one error for the first mmap allocation
852 after fiddling with RLIMIT_STACK. The result, as hard as it is to
853 believe, is an unaligned page allocation, which would cause us to
854 hork badly if we tried to use it. */
856 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
857 struct page_entry *e;
858 if ((size_t)p & (G.pagesize - 1))
860 /* How losing. Discard this one and try another. If we still
861 can't get something useful, give up. */
863 p = alloc_anon (NULL, G.pagesize, &main_zone);
864 if ((size_t)p & (G.pagesize - 1))
865 abort ();
868 /* We have a good page, might as well hold onto it... */
869 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
870 e->bytes = G.pagesize;
871 e->page = p;
872 e->next = main_zone.free_pages;
873 main_zone.free_pages = e;
875 #endif
878 /* Start a new GGC zone. */
880 struct alloc_zone *
881 new_ggc_zone (const char * name)
883 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
884 new_zone->name = name;
885 new_zone->next_zone = G.zones->next_zone;
886 G.zones->next_zone = new_zone;
887 return new_zone;
890 /* Destroy a GGC zone. */
891 void
892 destroy_ggc_zone (struct alloc_zone * dead_zone)
894 struct alloc_zone *z;
896 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
897 /* Just find that zone. */ ;
899 #ifdef ENABLE_CHECKING
900 /* We should have found the zone in the list. Anything else is fatal. */
901 if (!z)
902 abort ();
903 #endif
905 /* z is dead, baby. z is dead. */
906 z->dead= true;
909 /* Increment the `GC context'. Objects allocated in an outer context
910 are never freed, eliminating the need to register their roots. */
912 void
913 ggc_push_context (void)
915 struct alloc_zone *zone;
916 for (zone = G.zones; zone; zone = zone->next_zone)
917 ++(zone->context_depth);
918 /* Die on wrap. */
919 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
920 abort ();
923 /* Decrement the `GC context'. All objects allocated since the
924 previous ggc_push_context are migrated to the outer context. */
926 static void
927 ggc_pop_context_1 (struct alloc_zone *zone)
929 unsigned long omask;
930 unsigned depth;
931 page_entry *p;
933 depth = --(zone->context_depth);
934 omask = (unsigned long)1 << (depth + 1);
936 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
937 return;
939 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
940 zone->context_depth_allocations &= omask - 1;
941 zone->context_depth_collections &= omask - 1;
943 /* Any remaining pages in the popped context are lowered to the new
944 current context; i.e. objects allocated in the popped context and
945 left over are imported into the previous context. */
946 for (p = zone->pages; p != NULL; p = p->next)
947 if (p->context_depth > depth)
948 p->context_depth = depth;
951 /* Pop all the zone contexts. */
953 void
954 ggc_pop_context (void)
956 struct alloc_zone *zone;
957 for (zone = G.zones; zone; zone = zone->next_zone)
958 ggc_pop_context_1 (zone);
961 /* Free all empty pages and objects within a page for a given zone */
963 static void
964 sweep_pages (struct alloc_zone *zone)
966 page_entry **pp, *p, *next;
967 struct alloc_chunk *chunk, *last_free, *end;
968 size_t last_free_size, allocated = 0;
969 bool nomarksinpage;
970 /* First, reset the free_chunks lists, since we are going to
971 re-free free chunks in hopes of coalescing them into large chunks. */
972 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
973 pp = &zone->pages;
974 for (p = zone->pages; p ; p = next)
976 next = p->next;
977 /* Large pages are all or none affairs. Either they are
978 completely empty, or they are completely full.
980 XXX: Should we bother to increment allocated. */
981 if (p->large_p)
983 if (((struct alloc_chunk *)p->page)->mark == 1)
985 ((struct alloc_chunk *)p->page)->mark = 0;
987 else
989 *pp = next;
990 #ifdef ENABLE_GC_CHECKING
991 /* Poison the page. */
992 memset (p->page, 0xb5, p->bytes);
993 #endif
994 free_page (p);
996 continue;
999 /* This page has now survived another collection. */
1000 p->survived++;
1002 /* Which leaves full and partial pages. Step through all chunks,
1003 consolidate those that are free and insert them into the free
1004 lists. Note that consolidation slows down collection
1005 slightly. */
1007 chunk = (struct alloc_chunk *)p->page;
1008 end = (struct alloc_chunk *)(p->page + G.pagesize);
1009 last_free = NULL;
1010 last_free_size = 0;
1011 nomarksinpage = true;
1014 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1015 if (chunk->mark || p->context_depth < zone->context_depth)
1017 nomarksinpage = false;
1018 if (last_free)
1020 last_free->type = 0;
1021 last_free->size = last_free_size;
1022 last_free->mark = 0;
1023 poison_chunk (last_free, last_free_size);
1024 free_chunk (last_free, last_free_size, zone);
1025 last_free = NULL;
1027 if (chunk->mark)
1029 allocated += chunk->size + CHUNK_OVERHEAD;
1031 chunk->mark = 0;
1033 else
1035 if (last_free)
1037 last_free_size += CHUNK_OVERHEAD + chunk->size;
1039 else
1041 last_free = chunk;
1042 last_free_size = chunk->size;
1046 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1048 while (chunk < end);
1050 if (nomarksinpage)
1052 *pp = next;
1053 #ifdef ENABLE_GC_CHECKING
1054 /* Poison the page. */
1055 memset (p->page, 0xb5, p->bytes);
1056 #endif
1057 free_page (p);
1058 continue;
1060 else if (last_free)
1062 last_free->type = 0;
1063 last_free->size = last_free_size;
1064 last_free->mark = 0;
1065 poison_chunk (last_free, last_free_size);
1066 free_chunk (last_free, last_free_size, zone);
1068 pp = &p->next;
1071 zone->allocated = allocated;
1074 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1075 is true if we need to mark before sweeping, false if some other
1076 zone collection has already performed marking for us. Returns true
1077 if we collected, false otherwise. */
1079 static bool
1080 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1082 if (!zone->dead)
1084 /* Avoid frequent unnecessary work by skipping collection if the
1085 total allocations haven't expanded much since the last
1086 collection. */
1087 float allocated_last_gc =
1088 MAX (zone->allocated_last_gc,
1089 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1091 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1093 if (zone->allocated < allocated_last_gc + min_expand)
1094 return false;
1097 if (!quiet_flag)
1098 fprintf (stderr, " {%s GC %luk -> ",
1099 zone->name, (unsigned long) zone->allocated / 1024);
1101 /* Zero the total allocated bytes. This will be recalculated in the
1102 sweep phase. */
1103 zone->allocated = 0;
1105 /* Release the pages we freed the last time we collected, but didn't
1106 reuse in the interim. */
1107 release_pages (zone);
1109 /* Indicate that we've seen collections at this context depth. */
1110 zone->context_depth_collections
1111 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1112 if (need_marking)
1113 ggc_mark_roots ();
1114 sweep_pages (zone);
1115 zone->was_collected = true;
1116 zone->allocated_last_gc = zone->allocated;
1118 if (!quiet_flag)
1119 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1120 return true;
1123 /* Calculate the average page survival rate in terms of number of
1124 collections. */
1126 static float
1127 calculate_average_page_survival (struct alloc_zone *zone)
1129 float count = 0.0;
1130 float survival = 0.0;
1131 page_entry *p;
1132 for (p = zone->pages; p; p = p->next)
1134 count += 1.0;
1135 survival += p->survived;
1137 return survival/count;
1140 /* Check the magic cookies all of the chunks contain, to make sure we
1141 aren't doing anything stupid, like stomping on alloc_chunk
1142 structures. */
1144 static inline void
1145 check_cookies (void)
1147 #ifdef COOKIE_CHECKING
1148 page_entry *p;
1149 struct alloc_zone *zone;
1151 for (zone = G.zones; zone; zone = zone->next_zone)
1153 for (p = zone->pages; p; p = p->next)
1155 if (!p->large_p)
1157 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1158 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1161 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1162 abort ();
1163 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1165 while (chunk < end);
1169 #endif
1171 /* Top level collection routine. */
1173 void
1174 ggc_collect (void)
1176 struct alloc_zone *zone;
1177 bool marked = false;
1178 float f;
1180 timevar_push (TV_GC);
1181 check_cookies ();
1182 /* Start by possibly collecting the main zone. */
1183 main_zone.was_collected = false;
1184 marked |= ggc_collect_1 (&main_zone, true);
1186 /* In order to keep the number of collections down, we don't
1187 collect other zones unless we are collecting the main zone. This
1188 gives us roughly the same number of collections as we used to
1189 have with the old gc. The number of collection is important
1190 because our main slowdown (according to profiling) is now in
1191 marking. So if we mark twice as often as we used to, we'll be
1192 twice as slow. Hopefully we'll avoid this cost when we mark
1193 zone-at-a-time. */
1195 if (main_zone.was_collected)
1197 struct alloc_zone *zone;
1199 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1201 check_cookies ();
1202 zone->was_collected = false;
1203 marked |= ggc_collect_1 (zone, !marked);
1207 /* Print page survival stats, if someone wants them. */
1208 if (GGC_DEBUG_LEVEL >= 2)
1210 for (zone = G.zones; zone; zone = zone->next_zone)
1212 if (zone->was_collected)
1214 f = calculate_average_page_survival (zone);
1215 printf ("Average page survival in zone `%s' is %f\n",
1216 zone->name, f);
1221 /* Since we don't mark zone at a time right now, marking in any
1222 zone means marking in every zone. So we have to clear all the
1223 marks in all the zones that weren't collected already. */
1224 if (marked)
1226 page_entry *p;
1227 for (zone = G.zones; zone; zone = zone->next_zone)
1229 if (zone->was_collected)
1230 continue;
1231 for (p = zone->pages; p; p = p->next)
1233 if (!p->large_p)
1235 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1236 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1239 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1240 if (chunk->mark || p->context_depth < zone->context_depth)
1242 chunk->mark = 0;
1244 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1246 while (chunk < end);
1248 else
1250 ((struct alloc_chunk *)p->page)->mark = 0;
1256 /* Free dead zones. */
1257 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1259 if (zone->next_zone->dead)
1261 struct alloc_zone *dead_zone = zone->next_zone;
1263 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1265 /* The zone must be empty. */
1266 if (dead_zone->allocated != 0)
1267 abort ();
1269 /* Unchain the dead zone, release all its pages and free it. */
1270 zone->next_zone = zone->next_zone->next_zone;
1271 release_pages (dead_zone);
1272 free (dead_zone);
1276 timevar_pop (TV_GC);
1279 /* Print allocation statistics. */
1281 void
1282 ggc_print_statistics (void)
1286 struct ggc_pch_data
1288 struct ggc_pch_ondisk
1290 unsigned total;
1291 } d;
1292 size_t base;
1293 size_t written;
1296 /* Initialize the PCH datastructure. */
1298 struct ggc_pch_data *
1299 init_ggc_pch (void)
1301 return xcalloc (sizeof (struct ggc_pch_data), 1);
1304 /* Add the size of object X to the size of the PCH data. */
1306 void
1307 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1308 size_t size, bool is_string)
1310 if (!is_string)
1312 d->d.total += size + CHUNK_OVERHEAD;
1314 else
1315 d->d.total += size;
1318 /* Return the total size of the PCH data. */
1320 size_t
1321 ggc_pch_total_size (struct ggc_pch_data *d)
1323 return d->d.total;
1326 /* Set the base address for the objects in the PCH file. */
1328 void
1329 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1331 d->base = (size_t) base;
1334 /* Allocate a place for object X of size SIZE in the PCH file. */
1336 char *
1337 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1338 size_t size, bool is_string)
1340 char *result;
1341 result = (char *)d->base;
1342 if (!is_string)
1344 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1345 if (chunk->large)
1346 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1347 else
1348 d->base += chunk->size + CHUNK_OVERHEAD;
1349 return result + CHUNK_OVERHEAD;
1351 else
1353 d->base += size;
1354 return result;
1359 /* Prepare to write out the PCH data to file F. */
1361 void
1362 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1363 FILE *f ATTRIBUTE_UNUSED)
1365 /* Nothing to do. */
1368 /* Write out object X of SIZE to file F. */
1370 void
1371 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1372 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1373 size_t size, bool is_string)
1375 if (!is_string)
1377 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1378 size = ggc_get_size (x);
1379 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1380 fatal_error ("can't write PCH file: %m");
1381 d->written += size + CHUNK_OVERHEAD;
1383 else
1385 if (fwrite (x, size, 1, f) != 1)
1386 fatal_error ("can't write PCH file: %m");
1387 d->written += size;
1391 void
1392 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1394 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1395 fatal_error ("can't write PCH file: %m");
1396 free (d);
1398 void
1399 ggc_pch_read (FILE *f, void *addr)
1401 struct ggc_pch_ondisk d;
1402 struct page_entry *entry;
1403 struct alloc_zone *pch_zone;
1404 if (fread (&d, sizeof (d), 1, f) != 1)
1405 fatal_error ("can't read PCH file: %m");
1406 entry = xcalloc (1, sizeof (struct page_entry));
1407 entry->bytes = d.total;
1408 entry->page = addr;
1409 entry->context_depth = 0;
1410 pch_zone = new_ggc_zone ("PCH zone");
1411 entry->zone = pch_zone;
1412 entry->next = entry->zone->pages;
1413 entry->zone->pages = entry;