* cp-tree.h (enum cp_storage_class): Remove trailing comma.
[official-gcc.git] / gcc / ggc-zone.c
blobda58116fabba410ef0d57519777addedb4d798bf
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
45 # else
46 # include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
68 #endif
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
78 #endif
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
90 /* NOTES:
91 If we track inter-zone pointers, we can mark single zones at a
92 time.
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
97 marking quickly. */
98 /* Stategy:
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
128 #endif
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
133 #endif
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
144 struct alloc_chunk {
145 #ifdef COOKIE_CHECKING
146 unsigned int magic;
147 #endif
148 unsigned int type:1;
149 unsigned int typecode:14;
150 unsigned int large:1;
151 unsigned int size:15;
152 unsigned int mark:1;
153 union {
154 struct alloc_chunk *next_free;
155 char data[1];
157 /* Make sure the data is sufficiently aligned. */
158 HOST_WIDEST_INT align_i;
159 #ifdef HAVE_LONG_DOUBLE
160 long double align_d;
161 #else
162 double align_d;
163 #endif
164 } u;
165 } __attribute__ ((packed));
167 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
169 /* We maintain several bins of free lists for chunks for very small
170 objects. We never exhaustively search other bins -- if we don't
171 find one of the proper size, we allocate from the "larger" bin. */
173 /* Decreasing the number of free bins increases the time it takes to allocate.
174 Similar with increasing max_free_bin_size without increasing num_free_bins.
176 After much histogramming of allocation sizes and time spent on gc,
177 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
178 these were determined to be the optimal values. */
179 #define NUM_FREE_BINS 64
180 #define MAX_FREE_BIN_SIZE 256
181 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
182 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
183 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
185 /* Marker used as chunk->size for a large object. Should correspond
186 to the size of the bitfield above. */
187 #define LARGE_OBJECT_SIZE 0x7fff
189 /* We use this structure to determine the alignment required for
190 allocations. For power-of-two sized allocations, that's not a
191 problem, but it does matter for odd-sized allocations. */
193 struct max_alignment {
194 char c;
195 union {
196 HOST_WIDEST_INT i;
197 #ifdef HAVE_LONG_DOUBLE
198 long double d;
199 #else
200 double d;
201 #endif
202 } u;
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest nonnegative number which when added to X gives
210 a multiple of F. */
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Compute the smallest multiple of F that is >= X. */
216 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* A page_entry records the status of an allocation page. */
220 typedef struct page_entry
222 /* The next page-entry with objects of the same size, or NULL if
223 this is the last page-entry. */
224 struct page_entry *next;
226 /* The number of bytes allocated. (This will always be a multiple
227 of the host system page size.) */
228 size_t bytes;
230 /* How many collections we've survived. */
231 size_t survived;
233 /* The address at which the memory is allocated. */
234 char *page;
236 /* Context depth of this page. */
237 unsigned short context_depth;
239 /* Does this page contain small objects, or one large object? */
240 bool large_p;
242 /* The zone that this page entry belongs to. */
243 struct alloc_zone *zone;
244 } page_entry;
247 /* The global variables. */
248 static struct globals
250 /* The linked list of zones. */
251 struct alloc_zone *zones;
253 /* The system's page size. */
254 size_t pagesize;
255 size_t lg_pagesize;
257 /* A file descriptor open to /dev/zero for reading. */
258 #if defined (HAVE_MMAP_DEV_ZERO)
259 int dev_zero_fd;
260 #endif
262 /* The file descriptor for debugging output. */
263 FILE *debug_file;
264 } G;
266 /* The zone allocation structure. */
267 struct alloc_zone
269 /* Name of the zone. */
270 const char *name;
272 /* Linked list of pages in a zone. */
273 page_entry *pages;
275 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
276 FREE_BIN_DELTA. All other chunks are in slot 0. */
277 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
279 /* Bytes currently allocated. */
280 size_t allocated;
282 /* Bytes currently allocated at the end of the last collection. */
283 size_t allocated_last_gc;
285 /* Total amount of memory mapped. */
286 size_t bytes_mapped;
288 /* Bit N set if any allocations have been done at context depth N. */
289 unsigned long context_depth_allocations;
291 /* Bit N set if any collections have been done at context depth N. */
292 unsigned long context_depth_collections;
294 /* The current depth in the context stack. */
295 unsigned short context_depth;
297 /* A cache of free system pages. */
298 page_entry *free_pages;
300 /* Next zone in the linked list of zones. */
301 struct alloc_zone *next_zone;
303 /* True if this zone was collected during this collection. */
304 bool was_collected;
306 /* True if this zone should be destroyed after the next collection. */
307 bool dead;
308 } main_zone;
310 struct alloc_zone *rtl_zone;
311 struct alloc_zone *garbage_zone;
312 struct alloc_zone *tree_zone;
314 /* Allocate pages in chunks of this size, to throttle calls to memory
315 allocation routines. The first page is used, the rest go onto the
316 free list. This cannot be larger than HOST_BITS_PER_INT for the
317 in_use bitmask for page_group. */
318 #define GGC_QUIRE_SIZE 16
320 static int ggc_allocated_p (const void *);
321 #ifdef USING_MMAP
322 static char *alloc_anon (char *, size_t, struct alloc_zone *);
323 #endif
324 static struct page_entry * alloc_small_page ( struct alloc_zone *);
325 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
326 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
327 static void free_page (struct page_entry *);
328 static void release_pages (struct alloc_zone *);
329 static void sweep_pages (struct alloc_zone *);
330 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
331 static bool ggc_collect_1 (struct alloc_zone *, bool);
332 static void check_cookies (void);
335 /* Returns nonzero if P was allocated in GC'able memory. */
337 static inline int
338 ggc_allocated_p (const void *p)
340 struct alloc_chunk *chunk;
341 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
342 #ifdef COOKIE_CHECKING
343 if (chunk->magic != CHUNK_MAGIC)
344 abort ();
345 #endif
346 if (chunk->type == 1)
347 return true;
348 return false;
352 #ifdef USING_MMAP
353 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
354 (if non-null). The ifdef structure here is intended to cause a
355 compile error unless exactly one of the HAVE_* is defined. */
357 static inline char *
358 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
360 #ifdef HAVE_MMAP_ANON
361 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
362 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
363 #endif
364 #ifdef HAVE_MMAP_DEV_ZERO
365 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
366 MAP_PRIVATE, G.dev_zero_fd, 0);
367 #endif
368 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
370 if (page == (char *) MAP_FAILED)
372 perror ("virtual memory exhausted");
373 exit (FATAL_EXIT_CODE);
376 /* Remember that we allocated this memory. */
377 zone->bytes_mapped += size;
378 /* Pretend we don't have access to the allocated pages. We'll enable
379 access to smaller pieces of the area in ggc_alloc. Discard the
380 handle to avoid handle leak. */
381 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
382 return page;
384 #endif
386 /* Allocate a new page for allocating objects of size 2^ORDER,
387 and return an entry for it. */
389 static inline struct page_entry *
390 alloc_small_page (struct alloc_zone *zone)
392 struct page_entry *entry;
393 char *page;
395 page = NULL;
397 /* Check the list of free pages for one we can use. */
398 entry = zone->free_pages;
399 if (entry != NULL)
401 /* Recycle the allocated memory from this page ... */
402 zone->free_pages = entry->next;
403 page = entry->page;
407 #ifdef USING_MMAP
408 else
410 /* We want just one page. Allocate a bunch of them and put the
411 extras on the freelist. (Can only do this optimization with
412 mmap for backing store.) */
413 struct page_entry *e, *f = zone->free_pages;
414 int i;
416 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
418 /* This loop counts down so that the chain will be in ascending
419 memory order. */
420 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
422 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
423 e->bytes = G.pagesize;
424 e->page = page + (i << G.lg_pagesize);
425 e->next = f;
426 f = e;
429 zone->free_pages = f;
431 #endif
432 if (entry == NULL)
433 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
435 entry->next = 0;
436 entry->bytes = G.pagesize;
437 entry->page = page;
438 entry->context_depth = zone->context_depth;
439 entry->large_p = false;
440 entry->zone = zone;
441 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
443 if (GGC_DEBUG_LEVEL >= 2)
444 fprintf (G.debug_file,
445 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
446 (PTR) entry, page, page + G.pagesize - 1);
448 return entry;
450 /* Compute the smallest multiple of F that is >= X. */
452 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
454 /* Allocate a large page of size SIZE in ZONE. */
456 static inline struct page_entry *
457 alloc_large_page (size_t size, struct alloc_zone *zone)
459 struct page_entry *entry;
460 char *page;
461 size = ROUND_UP (size, 1024);
462 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
463 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
465 entry->next = 0;
466 entry->bytes = size;
467 entry->page = page;
468 entry->context_depth = zone->context_depth;
469 entry->large_p = true;
470 entry->zone = zone;
471 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
473 if (GGC_DEBUG_LEVEL >= 2)
474 fprintf (G.debug_file,
475 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
476 (PTR) entry, page, page + size - 1);
478 return entry;
482 /* For a page that is no longer needed, put it on the free page list. */
484 static inline void
485 free_page (page_entry *entry)
487 if (GGC_DEBUG_LEVEL >= 2)
488 fprintf (G.debug_file,
489 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
490 entry->page, entry->page + entry->bytes - 1);
492 if (entry->large_p)
494 free (entry->page);
495 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
497 else
499 /* Mark the page as inaccessible. Discard the handle to
500 avoid handle leak. */
501 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
503 entry->next = entry->zone->free_pages;
504 entry->zone->free_pages = entry;
508 /* Release the free page cache to the system. */
510 static void
511 release_pages (struct alloc_zone *zone)
513 #ifdef USING_MMAP
514 page_entry *p, *next;
515 char *start;
516 size_t len;
518 /* Gather up adjacent pages so they are unmapped together. */
519 p = zone->free_pages;
521 while (p)
523 start = p->page;
524 next = p->next;
525 len = p->bytes;
526 free (p);
527 p = next;
529 while (p && p->page == start + len)
531 next = p->next;
532 len += p->bytes;
533 free (p);
534 p = next;
537 munmap (start, len);
538 zone->bytes_mapped -= len;
541 zone->free_pages = NULL;
542 #endif
545 /* Place CHUNK of size SIZE on the free list for ZONE. */
547 static inline void
548 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
550 size_t bin = 0;
552 bin = SIZE_BIN_DOWN (size);
553 if (bin == 0)
554 abort ();
555 if (bin > NUM_FREE_BINS)
556 bin = 0;
557 #ifdef COOKIE_CHECKING
558 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
559 abort ();
560 chunk->magic = DEADCHUNK_MAGIC;
561 #endif
562 chunk->u.next_free = zone->free_chunks[bin];
563 zone->free_chunks[bin] = chunk;
564 if (GGC_DEBUG_LEVEL >= 3)
565 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
566 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
569 /* Allocate a chunk of memory of SIZE bytes. */
571 static void *
572 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type
573 MEM_STAT_DECL)
575 size_t bin = 0;
576 size_t lsize = 0;
577 struct page_entry *entry;
578 struct alloc_chunk *chunk, *lchunk, **pp;
579 void *result;
581 /* Align size, so that we're assured of aligned allocations. */
582 if (size < FREE_BIN_DELTA)
583 size = FREE_BIN_DELTA;
584 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
586 /* Large objects are handled specially. */
587 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
589 size = ROUND_UP (size, 1024);
590 entry = alloc_large_page (size, zone);
591 entry->survived = 0;
592 entry->next = entry->zone->pages;
593 entry->zone->pages = entry;
595 chunk = (struct alloc_chunk *) entry->page;
596 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
597 chunk->large = 1;
598 chunk->size = CEIL (size, 1024);
600 goto found;
603 /* First look for a tiny object already segregated into its own
604 size bucket. */
605 bin = SIZE_BIN_UP (size);
606 if (bin <= NUM_FREE_BINS)
608 chunk = zone->free_chunks[bin];
609 if (chunk)
611 zone->free_chunks[bin] = chunk->u.next_free;
612 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
613 goto found;
617 /* Failing that, look through the "other" bucket for a chunk
618 that is large enough. */
619 pp = &(zone->free_chunks[0]);
620 chunk = *pp;
621 while (chunk && chunk->size < size)
623 pp = &chunk->u.next_free;
624 chunk = *pp;
627 /* Failing that, allocate new storage. */
628 if (!chunk)
630 entry = alloc_small_page (zone);
631 entry->next = entry->zone->pages;
632 entry->zone->pages = entry;
634 chunk = (struct alloc_chunk *) entry->page;
635 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
636 chunk->size = G.pagesize - CHUNK_OVERHEAD;
637 chunk->large = 0;
639 else
641 *pp = chunk->u.next_free;
642 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
643 chunk->large = 0;
645 /* Release extra memory from a chunk that's too big. */
646 lsize = chunk->size - size;
647 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
649 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
650 chunk->size = size;
652 lsize -= CHUNK_OVERHEAD;
653 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
654 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
655 #ifdef COOKIE_CHECKING
656 lchunk->magic = CHUNK_MAGIC;
657 #endif
658 lchunk->type = 0;
659 lchunk->mark = 0;
660 lchunk->size = lsize;
661 lchunk->large = 0;
662 free_chunk (lchunk, lsize, zone);
663 lsize = 0;
665 #ifdef GATHER_STATISTICS
666 ggc_record_overhead (size, lsize PASS_MEM_STAT);
667 #endif
669 /* Calculate the object's address. */
670 found:
671 #ifdef COOKIE_CHECKING
672 chunk->magic = CHUNK_MAGIC;
673 #endif
674 chunk->type = 1;
675 chunk->mark = 0;
676 chunk->typecode = type;
677 result = chunk->u.data;
679 #ifdef ENABLE_GC_CHECKING
680 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
681 exact same semantics in presence of memory bugs, regardless of
682 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
683 handle to avoid handle leak. */
684 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
686 /* `Poison' the entire allocated object. */
687 memset (result, 0xaf, size);
688 #endif
690 /* Tell Valgrind that the memory is there, but its content isn't
691 defined. The bytes at the end of the object are still marked
692 unaccessible. */
693 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
695 /* Keep track of how many bytes are being allocated. This
696 information is used in deciding when to collect. */
697 zone->allocated += size + CHUNK_OVERHEAD;
699 if (GGC_DEBUG_LEVEL >= 3)
700 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
701 (void *)chunk, (unsigned long) size, result);
703 return result;
706 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
707 for that type. */
709 void *
710 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
711 MEM_STAT_DECL)
713 switch (gte)
715 case gt_ggc_e_14lang_tree_node:
716 return ggc_alloc_zone_1 (size, tree_zone, gte PASS_MEM_STAT);
718 case gt_ggc_e_7rtx_def:
719 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
721 case gt_ggc_e_9rtvec_def:
722 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
724 default:
725 return ggc_alloc_zone_1 (size, &main_zone, gte PASS_MEM_STAT);
729 /* Normal ggc_alloc simply allocates into the main zone. */
731 void *
732 ggc_alloc_stat (size_t size MEM_STAT_DECL)
734 return ggc_alloc_zone_1 (size, &main_zone, -1 PASS_MEM_STAT);
737 /* Zone allocation allocates into the specified zone. */
739 void *
740 ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
742 return ggc_alloc_zone_1 (size, zone, -1 PASS_MEM_STAT);
745 /* Poison the chunk. */
746 #ifdef ENABLE_GC_CHECKING
747 #define poison_chunk(CHUNK, SIZE) \
748 memset ((CHUNK)->u.data, 0xa5, (SIZE))
749 #else
750 #define poison_chunk(CHUNK, SIZE)
751 #endif
753 /* Free the object at P. */
755 void
756 ggc_free (void *p)
758 struct alloc_chunk *chunk;
760 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
762 /* Poison the chunk. */
763 poison_chunk (chunk, ggc_get_size (p));
765 /* XXX: We only deal with explicitly freeing large objects ATM. */
766 if (chunk->large)
767 free (p);
770 /* If P is not marked, mark it and return false. Otherwise return true.
771 P must have been allocated by the GC allocator; it mustn't point to
772 static objects, stack variables, or memory allocated with malloc. */
775 ggc_set_mark (const void *p)
777 struct alloc_chunk *chunk;
779 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
780 #ifdef COOKIE_CHECKING
781 if (chunk->magic != CHUNK_MAGIC)
782 abort ();
783 #endif
784 if (chunk->mark)
785 return 1;
786 chunk->mark = 1;
788 if (GGC_DEBUG_LEVEL >= 4)
789 fprintf (G.debug_file, "Marking %p\n", p);
791 return 0;
794 /* Return 1 if P has been marked, zero otherwise.
795 P must have been allocated by the GC allocator; it mustn't point to
796 static objects, stack variables, or memory allocated with malloc. */
799 ggc_marked_p (const void *p)
801 struct alloc_chunk *chunk;
803 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
804 #ifdef COOKIE_CHECKING
805 if (chunk->magic != CHUNK_MAGIC)
806 abort ();
807 #endif
808 return chunk->mark;
811 /* Return the size of the gc-able object P. */
813 size_t
814 ggc_get_size (const void *p)
816 struct alloc_chunk *chunk;
818 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
819 #ifdef COOKIE_CHECKING
820 if (chunk->magic != CHUNK_MAGIC)
821 abort ();
822 #endif
823 if (chunk->large)
824 return chunk->size * 1024;
826 return chunk->size;
829 /* Initialize the ggc-zone-mmap allocator. */
830 void
831 init_ggc (void)
833 /* Set up the main zone by hand. */
834 main_zone.name = "Main zone";
835 G.zones = &main_zone;
837 /* Allocate the default zones. */
838 rtl_zone = new_ggc_zone ("RTL zone");
839 tree_zone = new_ggc_zone ("Tree zone");
840 garbage_zone = new_ggc_zone ("Garbage zone");
842 G.pagesize = getpagesize();
843 G.lg_pagesize = exact_log2 (G.pagesize);
844 #ifdef HAVE_MMAP_DEV_ZERO
845 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
846 if (G.dev_zero_fd == -1)
847 abort ();
848 #endif
850 #if 0
851 G.debug_file = fopen ("ggc-mmap.debug", "w");
852 setlinebuf (G.debug_file);
853 #else
854 G.debug_file = stdout;
855 #endif
857 #ifdef USING_MMAP
858 /* StunOS has an amazing off-by-one error for the first mmap allocation
859 after fiddling with RLIMIT_STACK. The result, as hard as it is to
860 believe, is an unaligned page allocation, which would cause us to
861 hork badly if we tried to use it. */
863 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
864 struct page_entry *e;
865 if ((size_t)p & (G.pagesize - 1))
867 /* How losing. Discard this one and try another. If we still
868 can't get something useful, give up. */
870 p = alloc_anon (NULL, G.pagesize, &main_zone);
871 if ((size_t)p & (G.pagesize - 1))
872 abort ();
875 /* We have a good page, might as well hold onto it... */
876 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
877 e->bytes = G.pagesize;
878 e->page = p;
879 e->next = main_zone.free_pages;
880 main_zone.free_pages = e;
882 #endif
885 /* Start a new GGC zone. */
887 struct alloc_zone *
888 new_ggc_zone (const char * name)
890 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
891 new_zone->name = name;
892 new_zone->next_zone = G.zones->next_zone;
893 G.zones->next_zone = new_zone;
894 return new_zone;
897 /* Destroy a GGC zone. */
898 void
899 destroy_ggc_zone (struct alloc_zone * dead_zone)
901 struct alloc_zone *z;
903 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
904 /* Just find that zone. */ ;
906 #ifdef ENABLE_CHECKING
907 /* We should have found the zone in the list. Anything else is fatal. */
908 if (!z)
909 abort ();
910 #endif
912 /* z is dead, baby. z is dead. */
913 z->dead= true;
916 /* Increment the `GC context'. Objects allocated in an outer context
917 are never freed, eliminating the need to register their roots. */
919 void
920 ggc_push_context (void)
922 struct alloc_zone *zone;
923 for (zone = G.zones; zone; zone = zone->next_zone)
924 ++(zone->context_depth);
925 /* Die on wrap. */
926 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
927 abort ();
930 /* Decrement the `GC context'. All objects allocated since the
931 previous ggc_push_context are migrated to the outer context. */
933 static void
934 ggc_pop_context_1 (struct alloc_zone *zone)
936 unsigned long omask;
937 unsigned depth;
938 page_entry *p;
940 depth = --(zone->context_depth);
941 omask = (unsigned long)1 << (depth + 1);
943 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
944 return;
946 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
947 zone->context_depth_allocations &= omask - 1;
948 zone->context_depth_collections &= omask - 1;
950 /* Any remaining pages in the popped context are lowered to the new
951 current context; i.e. objects allocated in the popped context and
952 left over are imported into the previous context. */
953 for (p = zone->pages; p != NULL; p = p->next)
954 if (p->context_depth > depth)
955 p->context_depth = depth;
958 /* Pop all the zone contexts. */
960 void
961 ggc_pop_context (void)
963 struct alloc_zone *zone;
964 for (zone = G.zones; zone; zone = zone->next_zone)
965 ggc_pop_context_1 (zone);
968 /* Free all empty pages and objects within a page for a given zone */
970 static void
971 sweep_pages (struct alloc_zone *zone)
973 page_entry **pp, *p, *next;
974 struct alloc_chunk *chunk, *last_free, *end;
975 size_t last_free_size, allocated = 0;
976 bool nomarksinpage;
977 /* First, reset the free_chunks lists, since we are going to
978 re-free free chunks in hopes of coalescing them into large chunks. */
979 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
980 pp = &zone->pages;
981 for (p = zone->pages; p ; p = next)
983 next = p->next;
984 /* Large pages are all or none affairs. Either they are
985 completely empty, or they are completely full.
987 XXX: Should we bother to increment allocated. */
988 if (p->large_p)
990 if (((struct alloc_chunk *)p->page)->mark == 1)
992 ((struct alloc_chunk *)p->page)->mark = 0;
994 else
996 *pp = next;
997 #ifdef ENABLE_GC_CHECKING
998 /* Poison the page. */
999 memset (p->page, 0xb5, p->bytes);
1000 #endif
1001 free_page (p);
1003 continue;
1006 /* This page has now survived another collection. */
1007 p->survived++;
1009 /* Which leaves full and partial pages. Step through all chunks,
1010 consolidate those that are free and insert them into the free
1011 lists. Note that consolidation slows down collection
1012 slightly. */
1014 chunk = (struct alloc_chunk *)p->page;
1015 end = (struct alloc_chunk *)(p->page + G.pagesize);
1016 last_free = NULL;
1017 last_free_size = 0;
1018 nomarksinpage = true;
1021 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1022 if (chunk->mark || p->context_depth < zone->context_depth)
1024 nomarksinpage = false;
1025 if (last_free)
1027 last_free->type = 0;
1028 last_free->size = last_free_size;
1029 last_free->mark = 0;
1030 poison_chunk (last_free, last_free_size);
1031 free_chunk (last_free, last_free_size, zone);
1032 last_free = NULL;
1034 if (chunk->mark)
1036 allocated += chunk->size + CHUNK_OVERHEAD;
1038 chunk->mark = 0;
1040 else
1042 if (last_free)
1044 last_free_size += CHUNK_OVERHEAD + chunk->size;
1046 else
1048 last_free = chunk;
1049 last_free_size = chunk->size;
1053 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1055 while (chunk < end);
1057 if (nomarksinpage)
1059 *pp = next;
1060 #ifdef ENABLE_GC_CHECKING
1061 /* Poison the page. */
1062 memset (p->page, 0xb5, p->bytes);
1063 #endif
1064 free_page (p);
1065 continue;
1067 else if (last_free)
1069 last_free->type = 0;
1070 last_free->size = last_free_size;
1071 last_free->mark = 0;
1072 poison_chunk (last_free, last_free_size);
1073 free_chunk (last_free, last_free_size, zone);
1075 pp = &p->next;
1078 zone->allocated = allocated;
1081 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1082 is true if we need to mark before sweeping, false if some other
1083 zone collection has already performed marking for us. Returns true
1084 if we collected, false otherwise. */
1086 static bool
1087 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1089 if (!zone->dead)
1091 /* Avoid frequent unnecessary work by skipping collection if the
1092 total allocations haven't expanded much since the last
1093 collection. */
1094 float allocated_last_gc =
1095 MAX (zone->allocated_last_gc,
1096 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1098 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1100 if (zone->allocated < allocated_last_gc + min_expand)
1101 return false;
1104 if (!quiet_flag)
1105 fprintf (stderr, " {%s GC %luk -> ",
1106 zone->name, (unsigned long) zone->allocated / 1024);
1108 /* Zero the total allocated bytes. This will be recalculated in the
1109 sweep phase. */
1110 zone->allocated = 0;
1112 /* Release the pages we freed the last time we collected, but didn't
1113 reuse in the interim. */
1114 release_pages (zone);
1116 /* Indicate that we've seen collections at this context depth. */
1117 zone->context_depth_collections
1118 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1119 if (need_marking)
1120 ggc_mark_roots ();
1121 sweep_pages (zone);
1122 zone->was_collected = true;
1123 zone->allocated_last_gc = zone->allocated;
1125 if (!quiet_flag)
1126 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1127 return true;
1130 /* Calculate the average page survival rate in terms of number of
1131 collections. */
1133 static float
1134 calculate_average_page_survival (struct alloc_zone *zone)
1136 float count = 0.0;
1137 float survival = 0.0;
1138 page_entry *p;
1139 for (p = zone->pages; p; p = p->next)
1141 count += 1.0;
1142 survival += p->survived;
1144 return survival/count;
1147 /* Check the magic cookies all of the chunks contain, to make sure we
1148 aren't doing anything stupid, like stomping on alloc_chunk
1149 structures. */
1151 static inline void
1152 check_cookies (void)
1154 #ifdef COOKIE_CHECKING
1155 page_entry *p;
1156 struct alloc_zone *zone;
1158 for (zone = G.zones; zone; zone = zone->next_zone)
1160 for (p = zone->pages; p; p = p->next)
1162 if (!p->large_p)
1164 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1165 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1168 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1169 abort ();
1170 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1172 while (chunk < end);
1176 #endif
1178 /* Top level collection routine. */
1180 void
1181 ggc_collect (void)
1183 struct alloc_zone *zone;
1184 bool marked = false;
1185 float f;
1187 timevar_push (TV_GC);
1188 check_cookies ();
1189 /* Start by possibly collecting the main zone. */
1190 main_zone.was_collected = false;
1191 marked |= ggc_collect_1 (&main_zone, true);
1193 /* In order to keep the number of collections down, we don't
1194 collect other zones unless we are collecting the main zone. This
1195 gives us roughly the same number of collections as we used to
1196 have with the old gc. The number of collection is important
1197 because our main slowdown (according to profiling) is now in
1198 marking. So if we mark twice as often as we used to, we'll be
1199 twice as slow. Hopefully we'll avoid this cost when we mark
1200 zone-at-a-time. */
1202 if (main_zone.was_collected)
1204 struct alloc_zone *zone;
1206 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1208 check_cookies ();
1209 zone->was_collected = false;
1210 marked |= ggc_collect_1 (zone, !marked);
1214 /* Print page survival stats, if someone wants them. */
1215 if (GGC_DEBUG_LEVEL >= 2)
1217 for (zone = G.zones; zone; zone = zone->next_zone)
1219 if (zone->was_collected)
1221 f = calculate_average_page_survival (zone);
1222 printf ("Average page survival in zone `%s' is %f\n",
1223 zone->name, f);
1228 /* Since we don't mark zone at a time right now, marking in any
1229 zone means marking in every zone. So we have to clear all the
1230 marks in all the zones that weren't collected already. */
1231 if (marked)
1233 page_entry *p;
1234 for (zone = G.zones; zone; zone = zone->next_zone)
1236 if (zone->was_collected)
1237 continue;
1238 for (p = zone->pages; p; p = p->next)
1240 if (!p->large_p)
1242 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1243 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1246 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1247 if (chunk->mark || p->context_depth < zone->context_depth)
1249 chunk->mark = 0;
1251 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1253 while (chunk < end);
1255 else
1257 ((struct alloc_chunk *)p->page)->mark = 0;
1263 /* Free dead zones. */
1264 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1266 if (zone->next_zone->dead)
1268 struct alloc_zone *dead_zone = zone->next_zone;
1270 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1272 /* The zone must be empty. */
1273 if (dead_zone->allocated != 0)
1274 abort ();
1276 /* Unchain the dead zone, release all its pages and free it. */
1277 zone->next_zone = zone->next_zone->next_zone;
1278 release_pages (dead_zone);
1279 free (dead_zone);
1283 timevar_pop (TV_GC);
1286 /* Print allocation statistics. */
1288 void
1289 ggc_print_statistics (void)
1293 struct ggc_pch_data
1295 struct ggc_pch_ondisk
1297 unsigned total;
1298 } d;
1299 size_t base;
1300 size_t written;
1303 /* Initialize the PCH data structure. */
1305 struct ggc_pch_data *
1306 init_ggc_pch (void)
1308 return xcalloc (sizeof (struct ggc_pch_data), 1);
1311 /* Add the size of object X to the size of the PCH data. */
1313 void
1314 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1315 size_t size, bool is_string)
1317 if (!is_string)
1319 d->d.total += size + CHUNK_OVERHEAD;
1321 else
1322 d->d.total += size;
1325 /* Return the total size of the PCH data. */
1327 size_t
1328 ggc_pch_total_size (struct ggc_pch_data *d)
1330 return d->d.total;
1333 /* Set the base address for the objects in the PCH file. */
1335 void
1336 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1338 d->base = (size_t) base;
1341 /* Allocate a place for object X of size SIZE in the PCH file. */
1343 char *
1344 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1345 size_t size, bool is_string)
1347 char *result;
1348 result = (char *)d->base;
1349 if (!is_string)
1351 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1352 if (chunk->large)
1353 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1354 else
1355 d->base += chunk->size + CHUNK_OVERHEAD;
1356 return result + CHUNK_OVERHEAD;
1358 else
1360 d->base += size;
1361 return result;
1366 /* Prepare to write out the PCH data to file F. */
1368 void
1369 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1370 FILE *f ATTRIBUTE_UNUSED)
1372 /* Nothing to do. */
1375 /* Write out object X of SIZE to file F. */
1377 void
1378 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1379 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1380 size_t size, bool is_string)
1382 if (!is_string)
1384 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1385 size = ggc_get_size (x);
1386 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1387 fatal_error ("can't write PCH file: %m");
1388 d->written += size + CHUNK_OVERHEAD;
1390 else
1392 if (fwrite (x, size, 1, f) != 1)
1393 fatal_error ("can't write PCH file: %m");
1394 d->written += size;
1398 void
1399 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1401 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1402 fatal_error ("can't write PCH file: %m");
1403 free (d);
1405 void
1406 ggc_pch_read (FILE *f, void *addr)
1408 struct ggc_pch_ondisk d;
1409 struct page_entry *entry;
1410 struct alloc_zone *pch_zone;
1411 if (fread (&d, sizeof (d), 1, f) != 1)
1412 fatal_error ("can't read PCH file: %m");
1413 entry = xcalloc (1, sizeof (struct page_entry));
1414 entry->bytes = d.total;
1415 entry->page = addr;
1416 entry->context_depth = 0;
1417 pch_zone = new_ggc_zone ("PCH zone");
1418 entry->zone = pch_zone;
1419 entry->next = entry->zone->pages;
1420 entry->zone->pages = entry;