* contrib.texi: Update Paolo Carlini's entry. New entries for
[official-gcc.git] / gcc / ggc-zone.c
blobb9ad7334655e563958d4b73de687ce7aff2fa88e
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
4 (dberlin@dberlin.org)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "toplev.h"
32 #include "varray.h"
33 #include "flags.h"
34 #include "ggc.h"
35 #include "timevar.h"
36 #include "params.h"
37 #include "bitmap.h"
39 #ifdef ENABLE_VALGRIND_CHECKING
40 # ifdef HAVE_VALGRIND_MEMCHECK_H
41 # include <valgrind/memcheck.h>
42 # elif defined HAVE_MEMCHECK_H
43 # include <memcheck.h>
44 # else
45 # include <valgrind.h>
46 # endif
47 #else
48 /* Avoid #ifdef:s when we can help it. */
49 #define VALGRIND_DISCARD(x)
50 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
51 #define VALGRIND_FREELIKE_BLOCK(x,y)
52 #endif
53 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
54 file open. Prefer either to valloc. */
55 #ifdef HAVE_MMAP_ANON
56 # undef HAVE_MMAP_DEV_ZERO
58 # include <sys/mman.h>
59 # ifndef MAP_FAILED
60 # define MAP_FAILED -1
61 # endif
62 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
63 # define MAP_ANONYMOUS MAP_ANON
64 # endif
65 # define USING_MMAP
67 #endif
69 #ifdef HAVE_MMAP_DEV_ZERO
71 # include <sys/mman.h>
72 # ifndef MAP_FAILED
73 # define MAP_FAILED -1
74 # endif
75 # define USING_MMAP
77 #endif
79 #ifndef USING_MMAP
80 #error "Zone collector requires mmap"
81 #endif
83 #if (GCC_VERSION < 3001)
84 #define prefetch(X) ((void) X)
85 #else
86 #define prefetch(X) __builtin_prefetch (X)
87 #endif
89 /* NOTES:
90 If we track inter-zone pointers, we can mark single zones at a
91 time.
92 If we have a zone where we guarantee no inter-zone pointers, we
93 could mark that zone separately.
94 The garbage zone should not be marked, and we should return 1 in
95 ggc_set_mark for any object in the garbage zone, which cuts off
96 marking quickly. */
97 /* Stategy:
99 This garbage-collecting allocator segregates objects into zones.
100 It also segregates objects into "large" and "small" bins. Large
101 objects are greater or equal to page size.
103 Pages for small objects are broken up into chunks, each of which
104 are described by a struct alloc_chunk. One can walk over all
105 chunks on the page by adding the chunk size to the chunk's data
106 address. The free space for a page exists in the free chunk bins.
108 Each page-entry also has a context depth, which is used to track
109 pushing and popping of allocation contexts. Only objects allocated
110 in the current (highest-numbered) context may be collected.
112 Empty pages (of all sizes) are kept on a single page cache list,
113 and are considered first when new pages are required; they are
114 deallocated at the start of the next collection if they haven't
115 been recycled by then. */
117 /* Define GGC_DEBUG_LEVEL to print debugging information.
118 0: No debugging output.
119 1: GC statistics only.
120 2: Page-entry allocations/deallocations as well.
121 3: Object allocations as well.
122 4: Object marks as well. */
123 #define GGC_DEBUG_LEVEL (0)
125 #ifndef HOST_BITS_PER_PTR
126 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
127 #endif
128 #ifdef COOKIE_CHECKING
129 #define CHUNK_MAGIC 0x95321123
130 #define DEADCHUNK_MAGIC 0x12817317
131 #endif
133 /* This structure manages small chunks. When the chunk is free, it's
134 linked with other chunks via free_next. When the chunk is allocated,
135 the data starts at u. Large chunks are allocated one at a time to
136 their own page, and so don't come in here.
138 The "type" field is a placeholder for a future change to do
139 generational collection. At present it is 0 when free and
140 and 1 when allocated. */
142 struct alloc_chunk {
143 #ifdef COOKIE_CHECKING
144 unsigned int magic;
145 #endif
146 unsigned int type:1;
147 unsigned int typecode:14;
148 unsigned int large:1;
149 unsigned int size:15;
150 unsigned int mark:1;
151 union {
152 struct alloc_chunk *next_free;
153 char data[1];
155 /* Make sure the data is sufficiently aligned. */
156 HOST_WIDEST_INT align_i;
157 #ifdef HAVE_LONG_DOUBLE
158 long double align_d;
159 #else
160 double align_d;
161 #endif
162 } u;
163 } __attribute__ ((packed));
165 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
167 /* We maintain several bins of free lists for chunks for very small
168 objects. We never exhaustively search other bins -- if we don't
169 find one of the proper size, we allocate from the "larger" bin. */
171 /* Decreasing the number of free bins increases the time it takes to allocate.
172 Similar with increasing max_free_bin_size without increasing num_free_bins.
174 After much histogramming of allocation sizes and time spent on gc,
175 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
176 these were determined to be the optimal values. */
177 #define NUM_FREE_BINS 64
178 #define MAX_FREE_BIN_SIZE 256
179 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
180 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
181 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
183 /* Marker used as chunk->size for a large object. Should correspond
184 to the size of the bitfield above. */
185 #define LARGE_OBJECT_SIZE 0x7fff
187 /* We use this structure to determine the alignment required for
188 allocations. For power-of-two sized allocations, that's not a
189 problem, but it does matter for odd-sized allocations. */
191 struct max_alignment {
192 char c;
193 union {
194 HOST_WIDEST_INT i;
195 #ifdef HAVE_LONG_DOUBLE
196 long double d;
197 #else
198 double d;
199 #endif
200 } u;
203 /* The biggest alignment required. */
205 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
207 /* Compute the smallest nonnegative number which when added to X gives
208 a multiple of F. */
210 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
212 /* Compute the smallest multiple of F that is >= X. */
214 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
217 /* A page_entry records the status of an allocation page. */
218 typedef struct page_entry
220 /* The next page-entry with objects of the same size, or NULL if
221 this is the last page-entry. */
222 struct page_entry *next;
224 /* The number of bytes allocated. (This will always be a multiple
225 of the host system page size.) */
226 size_t bytes;
228 /* How many collections we've survived. */
229 size_t survived;
231 /* The address at which the memory is allocated. */
232 char *page;
234 /* Context depth of this page. */
235 unsigned short context_depth;
237 /* Does this page contain small objects, or one large object? */
238 bool large_p;
240 /* The zone that this page entry belongs to. */
241 struct alloc_zone *zone;
242 } page_entry;
245 /* The global variables. */
246 static struct globals
248 /* The linked list of zones. */
249 struct alloc_zone *zones;
251 /* The system's page size. */
252 size_t pagesize;
253 size_t lg_pagesize;
255 /* A file descriptor open to /dev/zero for reading. */
256 #if defined (HAVE_MMAP_DEV_ZERO)
257 int dev_zero_fd;
258 #endif
260 /* The file descriptor for debugging output. */
261 FILE *debug_file;
262 } G;
264 /* The zone allocation structure. */
265 struct alloc_zone
267 /* Name of the zone. */
268 const char *name;
270 /* Linked list of pages in a zone. */
271 page_entry *pages;
273 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
274 FREE_BIN_DELTA. All other chunks are in slot 0. */
275 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
277 /* Bytes currently allocated. */
278 size_t allocated;
280 /* Bytes currently allocated at the end of the last collection. */
281 size_t allocated_last_gc;
283 /* Total amount of memory mapped. */
284 size_t bytes_mapped;
286 /* Bit N set if any allocations have been done at context depth N. */
287 unsigned long context_depth_allocations;
289 /* Bit N set if any collections have been done at context depth N. */
290 unsigned long context_depth_collections;
292 /* The current depth in the context stack. */
293 unsigned short context_depth;
295 /* A cache of free system pages. */
296 page_entry *free_pages;
298 /* Next zone in the linked list of zones. */
299 struct alloc_zone *next_zone;
301 /* True if this zone was collected during this collection. */
302 bool was_collected;
304 /* True if this zone should be destroyed after the next collection. */
305 bool dead;
306 } main_zone;
308 struct alloc_zone *rtl_zone;
309 struct alloc_zone *garbage_zone;
310 struct alloc_zone *tree_zone;
312 /* Allocate pages in chunks of this size, to throttle calls to memory
313 allocation routines. The first page is used, the rest go onto the
314 free list. This cannot be larger than HOST_BITS_PER_INT for the
315 in_use bitmask for page_group. */
316 #define GGC_QUIRE_SIZE 16
318 static int ggc_allocated_p (const void *);
319 #ifdef USING_MMAP
320 static char *alloc_anon (char *, size_t, struct alloc_zone *);
321 #endif
322 static struct page_entry * alloc_small_page ( struct alloc_zone *);
323 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
324 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
325 static void free_page (struct page_entry *);
326 static void release_pages (struct alloc_zone *);
327 static void sweep_pages (struct alloc_zone *);
328 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
329 static bool ggc_collect_1 (struct alloc_zone *, bool);
330 static void check_cookies (void);
333 /* Returns nonzero if P was allocated in GC'able memory. */
335 static inline int
336 ggc_allocated_p (const void *p)
338 struct alloc_chunk *chunk;
339 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
340 #ifdef COOKIE_CHECKING
341 if (chunk->magic != CHUNK_MAGIC)
342 abort ();
343 #endif
344 if (chunk->type == 1)
345 return true;
346 return false;
350 #ifdef USING_MMAP
351 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
352 (if non-null). The ifdef structure here is intended to cause a
353 compile error unless exactly one of the HAVE_* is defined. */
355 static inline char *
356 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
358 #ifdef HAVE_MMAP_ANON
359 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
361 #endif
362 #ifdef HAVE_MMAP_DEV_ZERO
363 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
364 MAP_PRIVATE, G.dev_zero_fd, 0);
365 #endif
366 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
368 if (page == (char *) MAP_FAILED)
370 perror ("virtual memory exhausted");
371 exit (FATAL_EXIT_CODE);
374 /* Remember that we allocated this memory. */
375 zone->bytes_mapped += size;
376 /* Pretend we don't have access to the allocated pages. We'll enable
377 access to smaller pieces of the area in ggc_alloc. Discard the
378 handle to avoid handle leak. */
379 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
380 return page;
382 #endif
384 /* Allocate a new page for allocating objects of size 2^ORDER,
385 and return an entry for it. */
387 static inline struct page_entry *
388 alloc_small_page (struct alloc_zone *zone)
390 struct page_entry *entry;
391 char *page;
393 page = NULL;
395 /* Check the list of free pages for one we can use. */
396 entry = zone->free_pages;
397 if (entry != NULL)
399 /* Recycle the allocated memory from this page ... */
400 zone->free_pages = entry->next;
401 page = entry->page;
405 #ifdef USING_MMAP
406 else
408 /* We want just one page. Allocate a bunch of them and put the
409 extras on the freelist. (Can only do this optimization with
410 mmap for backing store.) */
411 struct page_entry *e, *f = zone->free_pages;
412 int i;
414 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
416 /* This loop counts down so that the chain will be in ascending
417 memory order. */
418 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
420 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
421 e->bytes = G.pagesize;
422 e->page = page + (i << G.lg_pagesize);
423 e->next = f;
424 f = e;
427 zone->free_pages = f;
429 #endif
430 if (entry == NULL)
431 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
433 entry->next = 0;
434 entry->bytes = G.pagesize;
435 entry->page = page;
436 entry->context_depth = zone->context_depth;
437 entry->large_p = false;
438 entry->zone = zone;
439 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
441 if (GGC_DEBUG_LEVEL >= 2)
442 fprintf (G.debug_file,
443 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
444 (PTR) entry, page, page + G.pagesize - 1);
446 return entry;
448 /* Compute the smallest multiple of F that is >= X. */
450 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
452 /* Allocate a large page of size SIZE in ZONE. */
454 static inline struct page_entry *
455 alloc_large_page (size_t size, struct alloc_zone *zone)
457 struct page_entry *entry;
458 char *page;
459 size = ROUND_UP (size, 1024);
460 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
461 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
463 entry->next = 0;
464 entry->bytes = size;
465 entry->page = page;
466 entry->context_depth = zone->context_depth;
467 entry->large_p = true;
468 entry->zone = zone;
469 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
471 if (GGC_DEBUG_LEVEL >= 2)
472 fprintf (G.debug_file,
473 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
474 (PTR) entry, page, page + size - 1);
476 return entry;
480 /* For a page that is no longer needed, put it on the free page list. */
482 static inline void
483 free_page (page_entry *entry)
485 if (GGC_DEBUG_LEVEL >= 2)
486 fprintf (G.debug_file,
487 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
488 entry->page, entry->page + entry->bytes - 1);
490 if (entry->large_p)
492 free (entry->page);
493 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
495 else
497 /* Mark the page as inaccessible. Discard the handle to
498 avoid handle leak. */
499 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
501 entry->next = entry->zone->free_pages;
502 entry->zone->free_pages = entry;
506 /* Release the free page cache to the system. */
508 static void
509 release_pages (struct alloc_zone *zone)
511 #ifdef USING_MMAP
512 page_entry *p, *next;
513 char *start;
514 size_t len;
516 /* Gather up adjacent pages so they are unmapped together. */
517 p = zone->free_pages;
519 while (p)
521 start = p->page;
522 next = p->next;
523 len = p->bytes;
524 free (p);
525 p = next;
527 while (p && p->page == start + len)
529 next = p->next;
530 len += p->bytes;
531 free (p);
532 p = next;
535 munmap (start, len);
536 zone->bytes_mapped -= len;
539 zone->free_pages = NULL;
540 #endif
543 /* Place CHUNK of size SIZE on the free list for ZONE. */
545 static inline void
546 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
548 size_t bin = 0;
550 bin = SIZE_BIN_DOWN (size);
551 if (bin == 0)
552 abort ();
553 if (bin > NUM_FREE_BINS)
554 bin = 0;
555 #ifdef COOKIE_CHECKING
556 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
557 abort ();
558 chunk->magic = DEADCHUNK_MAGIC;
559 #endif
560 chunk->u.next_free = zone->free_chunks[bin];
561 zone->free_chunks[bin] = chunk;
562 if (GGC_DEBUG_LEVEL >= 3)
563 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
564 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
567 /* Allocate a chunk of memory of SIZE bytes. */
569 static void *
570 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
572 size_t bin = 0;
573 size_t lsize = 0;
574 struct page_entry *entry;
575 struct alloc_chunk *chunk, *lchunk, **pp;
576 void *result;
578 /* Align size, so that we're assured of aligned allocations. */
579 if (size < FREE_BIN_DELTA)
580 size = FREE_BIN_DELTA;
581 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
583 /* Large objects are handled specially. */
584 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
586 size = ROUND_UP (size, 1024);
587 entry = alloc_large_page (size, zone);
588 entry->survived = 0;
589 entry->next = entry->zone->pages;
590 entry->zone->pages = entry;
592 chunk = (struct alloc_chunk *) entry->page;
593 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
594 chunk->large = 1;
595 chunk->size = CEIL (size, 1024);
597 goto found;
600 /* First look for a tiny object already segregated into its own
601 size bucket. */
602 bin = SIZE_BIN_UP (size);
603 if (bin <= NUM_FREE_BINS)
605 chunk = zone->free_chunks[bin];
606 if (chunk)
608 zone->free_chunks[bin] = chunk->u.next_free;
609 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
610 goto found;
614 /* Failing that, look through the "other" bucket for a chunk
615 that is large enough. */
616 pp = &(zone->free_chunks[0]);
617 chunk = *pp;
618 while (chunk && chunk->size < size)
620 pp = &chunk->u.next_free;
621 chunk = *pp;
624 /* Failing that, allocate new storage. */
625 if (!chunk)
627 entry = alloc_small_page (zone);
628 entry->next = entry->zone->pages;
629 entry->zone->pages = entry;
631 chunk = (struct alloc_chunk *) entry->page;
632 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
633 chunk->size = G.pagesize - CHUNK_OVERHEAD;
634 chunk->large = 0;
636 else
638 *pp = chunk->u.next_free;
639 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
640 chunk->large = 0;
642 /* Release extra memory from a chunk that's too big. */
643 lsize = chunk->size - size;
644 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
646 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
647 chunk->size = size;
649 lsize -= CHUNK_OVERHEAD;
650 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
651 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
652 #ifdef COOKIE_CHECKING
653 lchunk->magic = CHUNK_MAGIC;
654 #endif
655 lchunk->type = 0;
656 lchunk->mark = 0;
657 lchunk->size = lsize;
658 lchunk->large = 0;
659 free_chunk (lchunk, lsize, zone);
661 /* Calculate the object's address. */
662 found:
663 #ifdef COOKIE_CHECKING
664 chunk->magic = CHUNK_MAGIC;
665 #endif
666 chunk->type = 1;
667 chunk->mark = 0;
668 chunk->typecode = type;
669 result = chunk->u.data;
671 #ifdef ENABLE_GC_CHECKING
672 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
673 exact same semantics in presence of memory bugs, regardless of
674 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
675 handle to avoid handle leak. */
676 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
678 /* `Poison' the entire allocated object. */
679 memset (result, 0xaf, size);
680 #endif
682 /* Tell Valgrind that the memory is there, but its content isn't
683 defined. The bytes at the end of the object are still marked
684 unaccessible. */
685 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
687 /* Keep track of how many bytes are being allocated. This
688 information is used in deciding when to collect. */
689 zone->allocated += size + CHUNK_OVERHEAD;
691 if (GGC_DEBUG_LEVEL >= 3)
692 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
693 (void *)chunk, (unsigned long) size, result);
695 return result;
698 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
699 for that type. */
701 void *
702 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
704 switch (gte)
706 case gt_ggc_e_14lang_tree_node:
707 return ggc_alloc_zone_1 (size, tree_zone, gte);
709 case gt_ggc_e_7rtx_def:
710 return ggc_alloc_zone_1 (size, rtl_zone, gte);
712 case gt_ggc_e_9rtvec_def:
713 return ggc_alloc_zone_1 (size, rtl_zone, gte);
715 default:
716 return ggc_alloc_zone_1 (size, &main_zone, gte);
720 /* Normal ggc_alloc simply allocates into the main zone. */
722 void *
723 ggc_alloc (size_t size)
725 return ggc_alloc_zone_1 (size, &main_zone, -1);
728 /* Zone allocation allocates into the specified zone. */
730 void *
731 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
733 return ggc_alloc_zone_1 (size, zone, -1);
736 /* If P is not marked, mark it and return false. Otherwise return true.
737 P must have been allocated by the GC allocator; it mustn't point to
738 static objects, stack variables, or memory allocated with malloc. */
741 ggc_set_mark (const void *p)
743 struct alloc_chunk *chunk;
745 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
746 #ifdef COOKIE_CHECKING
747 if (chunk->magic != CHUNK_MAGIC)
748 abort ();
749 #endif
750 if (chunk->mark)
751 return 1;
752 chunk->mark = 1;
754 if (GGC_DEBUG_LEVEL >= 4)
755 fprintf (G.debug_file, "Marking %p\n", p);
757 return 0;
760 /* Return 1 if P has been marked, zero otherwise.
761 P must have been allocated by the GC allocator; it mustn't point to
762 static objects, stack variables, or memory allocated with malloc. */
765 ggc_marked_p (const void *p)
767 struct alloc_chunk *chunk;
769 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
770 #ifdef COOKIE_CHECKING
771 if (chunk->magic != CHUNK_MAGIC)
772 abort ();
773 #endif
774 return chunk->mark;
777 /* Return the size of the gc-able object P. */
779 size_t
780 ggc_get_size (const void *p)
782 struct alloc_chunk *chunk;
784 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
785 #ifdef COOKIE_CHECKING
786 if (chunk->magic != CHUNK_MAGIC)
787 abort ();
788 #endif
789 if (chunk->large)
790 return chunk->size * 1024;
792 return chunk->size;
795 /* Initialize the ggc-zone-mmap allocator. */
796 void
797 init_ggc (void)
799 /* Set up the main zone by hand. */
800 main_zone.name = "Main zone";
801 G.zones = &main_zone;
803 /* Allocate the default zones. */
804 rtl_zone = new_ggc_zone ("RTL zone");
805 tree_zone = new_ggc_zone ("Tree zone");
806 garbage_zone = new_ggc_zone ("Garbage zone");
808 G.pagesize = getpagesize();
809 G.lg_pagesize = exact_log2 (G.pagesize);
810 #ifdef HAVE_MMAP_DEV_ZERO
811 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
812 if (G.dev_zero_fd == -1)
813 abort ();
814 #endif
816 #if 0
817 G.debug_file = fopen ("ggc-mmap.debug", "w");
818 setlinebuf (G.debug_file);
819 #else
820 G.debug_file = stdout;
821 #endif
823 #ifdef USING_MMAP
824 /* StunOS has an amazing off-by-one error for the first mmap allocation
825 after fiddling with RLIMIT_STACK. The result, as hard as it is to
826 believe, is an unaligned page allocation, which would cause us to
827 hork badly if we tried to use it. */
829 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
830 struct page_entry *e;
831 if ((size_t)p & (G.pagesize - 1))
833 /* How losing. Discard this one and try another. If we still
834 can't get something useful, give up. */
836 p = alloc_anon (NULL, G.pagesize, &main_zone);
837 if ((size_t)p & (G.pagesize - 1))
838 abort ();
841 /* We have a good page, might as well hold onto it... */
842 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
843 e->bytes = G.pagesize;
844 e->page = p;
845 e->next = main_zone.free_pages;
846 main_zone.free_pages = e;
848 #endif
851 /* Start a new GGC zone. */
853 struct alloc_zone *
854 new_ggc_zone (const char * name)
856 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
857 new_zone->name = name;
858 new_zone->next_zone = G.zones->next_zone;
859 G.zones->next_zone = new_zone;
860 return new_zone;
863 /* Destroy a GGC zone. */
864 void
865 destroy_ggc_zone (struct alloc_zone * dead_zone)
867 struct alloc_zone *z;
869 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
870 /* Just find that zone. */ ;
872 #ifdef ENABLE_CHECKING
873 /* We should have found the zone in the list. Anything else is fatal. */
874 if (!z)
875 abort ();
876 #endif
878 /* z is dead, baby. z is dead. */
879 z->dead= true;
882 /* Increment the `GC context'. Objects allocated in an outer context
883 are never freed, eliminating the need to register their roots. */
885 void
886 ggc_push_context (void)
888 struct alloc_zone *zone;
889 for (zone = G.zones; zone; zone = zone->next_zone)
890 ++(zone->context_depth);
891 /* Die on wrap. */
892 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
893 abort ();
896 /* Decrement the `GC context'. All objects allocated since the
897 previous ggc_push_context are migrated to the outer context. */
899 static void
900 ggc_pop_context_1 (struct alloc_zone *zone)
902 unsigned long omask;
903 unsigned depth;
904 page_entry *p;
906 depth = --(zone->context_depth);
907 omask = (unsigned long)1 << (depth + 1);
909 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
910 return;
912 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
913 zone->context_depth_allocations &= omask - 1;
914 zone->context_depth_collections &= omask - 1;
916 /* Any remaining pages in the popped context are lowered to the new
917 current context; i.e. objects allocated in the popped context and
918 left over are imported into the previous context. */
919 for (p = zone->pages; p != NULL; p = p->next)
920 if (p->context_depth > depth)
921 p->context_depth = depth;
924 /* Pop all the zone contexts. */
926 void
927 ggc_pop_context (void)
929 struct alloc_zone *zone;
930 for (zone = G.zones; zone; zone = zone->next_zone)
931 ggc_pop_context_1 (zone);
933 /* Poison the chunk. */
934 #ifdef ENABLE_GC_CHECKING
935 #define poison_chunk(CHUNK, SIZE) \
936 memset ((CHUNK)->u.data, 0xa5, (SIZE))
937 #else
938 #define poison_chunk(CHUNK, SIZE)
939 #endif
941 /* Free all empty pages and objects within a page for a given zone */
943 static void
944 sweep_pages (struct alloc_zone *zone)
946 page_entry **pp, *p, *next;
947 struct alloc_chunk *chunk, *last_free, *end;
948 size_t last_free_size, allocated = 0;
949 bool nomarksinpage;
950 /* First, reset the free_chunks lists, since we are going to
951 re-free free chunks in hopes of coalescing them into large chunks. */
952 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
953 pp = &zone->pages;
954 for (p = zone->pages; p ; p = next)
956 next = p->next;
957 /* Large pages are all or none affairs. Either they are
958 completely empty, or they are completely full.
960 XXX: Should we bother to increment allocated. */
961 if (p->large_p)
963 if (((struct alloc_chunk *)p->page)->mark == 1)
965 ((struct alloc_chunk *)p->page)->mark = 0;
967 else
969 *pp = next;
970 #ifdef ENABLE_GC_CHECKING
971 /* Poison the page. */
972 memset (p->page, 0xb5, p->bytes);
973 #endif
974 free_page (p);
976 continue;
979 /* This page has now survived another collection. */
980 p->survived++;
982 /* Which leaves full and partial pages. Step through all chunks,
983 consolidate those that are free and insert them into the free
984 lists. Note that consolidation slows down collection
985 slightly. */
987 chunk = (struct alloc_chunk *)p->page;
988 end = (struct alloc_chunk *)(p->page + G.pagesize);
989 last_free = NULL;
990 last_free_size = 0;
991 nomarksinpage = true;
994 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
995 if (chunk->mark || p->context_depth < zone->context_depth)
997 nomarksinpage = false;
998 if (last_free)
1000 last_free->type = 0;
1001 last_free->size = last_free_size;
1002 last_free->mark = 0;
1003 poison_chunk (last_free, last_free_size);
1004 free_chunk (last_free, last_free_size, zone);
1005 last_free = NULL;
1007 if (chunk->mark)
1009 allocated += chunk->size + CHUNK_OVERHEAD;
1011 chunk->mark = 0;
1013 else
1015 if (last_free)
1017 last_free_size += CHUNK_OVERHEAD + chunk->size;
1019 else
1021 last_free = chunk;
1022 last_free_size = chunk->size;
1026 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1028 while (chunk < end);
1030 if (nomarksinpage)
1032 *pp = next;
1033 #ifdef ENABLE_GC_CHECKING
1034 /* Poison the page. */
1035 memset (p->page, 0xb5, p->bytes);
1036 #endif
1037 free_page (p);
1038 continue;
1040 else if (last_free)
1042 last_free->type = 0;
1043 last_free->size = last_free_size;
1044 last_free->mark = 0;
1045 poison_chunk (last_free, last_free_size);
1046 free_chunk (last_free, last_free_size, zone);
1048 pp = &p->next;
1051 zone->allocated = allocated;
1054 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1055 is true if we need to mark before sweeping, false if some other
1056 zone collection has already performed marking for us. Returns true
1057 if we collected, false otherwise. */
1059 static bool
1060 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1062 if (!zone->dead)
1064 /* Avoid frequent unnecessary work by skipping collection if the
1065 total allocations haven't expanded much since the last
1066 collection. */
1067 float allocated_last_gc =
1068 MAX (zone->allocated_last_gc,
1069 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1071 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1073 if (zone->allocated < allocated_last_gc + min_expand)
1074 return false;
1077 if (!quiet_flag)
1078 fprintf (stderr, " {%s GC %luk -> ",
1079 zone->name, (unsigned long) zone->allocated / 1024);
1081 /* Zero the total allocated bytes. This will be recalculated in the
1082 sweep phase. */
1083 zone->allocated = 0;
1085 /* Release the pages we freed the last time we collected, but didn't
1086 reuse in the interim. */
1087 release_pages (zone);
1089 /* Indicate that we've seen collections at this context depth. */
1090 zone->context_depth_collections
1091 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1092 if (need_marking)
1093 ggc_mark_roots ();
1094 sweep_pages (zone);
1095 zone->was_collected = true;
1096 zone->allocated_last_gc = zone->allocated;
1098 if (!quiet_flag)
1099 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1100 return true;
1103 /* Calculate the average page survival rate in terms of number of
1104 collections. */
1106 static float
1107 calculate_average_page_survival (struct alloc_zone *zone)
1109 float count = 0.0;
1110 float survival = 0.0;
1111 page_entry *p;
1112 for (p = zone->pages; p; p = p->next)
1114 count += 1.0;
1115 survival += p->survived;
1117 return survival/count;
1120 /* Check the magic cookies all of the chunks contain, to make sure we
1121 aren't doing anything stupid, like stomping on alloc_chunk
1122 structures. */
1124 static inline void
1125 check_cookies (void)
1127 #ifdef COOKIE_CHECKING
1128 page_entry *p;
1129 struct alloc_zone *zone;
1131 for (zone = G.zones; zone; zone = zone->next_zone)
1133 for (p = zone->pages; p; p = p->next)
1135 if (!p->large_p)
1137 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1138 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1141 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1142 abort ();
1143 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1145 while (chunk < end);
1149 #endif
1151 /* Top level collection routine. */
1153 void
1154 ggc_collect (void)
1156 struct alloc_zone *zone;
1157 bool marked = false;
1158 float f;
1160 timevar_push (TV_GC);
1161 check_cookies ();
1162 /* Start by possibly collecting the main zone. */
1163 main_zone.was_collected = false;
1164 marked |= ggc_collect_1 (&main_zone, true);
1166 /* In order to keep the number of collections down, we don't
1167 collect other zones unless we are collecting the main zone. This
1168 gives us roughly the same number of collections as we used to
1169 have with the old gc. The number of collection is important
1170 because our main slowdown (according to profiling) is now in
1171 marking. So if we mark twice as often as we used to, we'll be
1172 twice as slow. Hopefully we'll avoid this cost when we mark
1173 zone-at-a-time. */
1175 if (main_zone.was_collected)
1177 struct alloc_zone *zone;
1179 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1181 check_cookies ();
1182 zone->was_collected = false;
1183 marked |= ggc_collect_1 (zone, !marked);
1187 /* Print page survival stats, if someone wants them. */
1188 if (GGC_DEBUG_LEVEL >= 2)
1190 for (zone = G.zones; zone; zone = zone->next_zone)
1192 if (zone->was_collected)
1194 f = calculate_average_page_survival (zone);
1195 printf ("Average page survival in zone `%s' is %f\n",
1196 zone->name, f);
1201 /* Since we don't mark zone at a time right now, marking in any
1202 zone means marking in every zone. So we have to clear all the
1203 marks in all the zones that weren't collected already. */
1204 if (marked)
1206 page_entry *p;
1207 for (zone = G.zones; zone; zone = zone->next_zone)
1209 if (zone->was_collected)
1210 continue;
1211 for (p = zone->pages; p; p = p->next)
1213 if (!p->large_p)
1215 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1216 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1219 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1220 if (chunk->mark || p->context_depth < zone->context_depth)
1222 chunk->mark = 0;
1224 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1226 while (chunk < end);
1228 else
1230 ((struct alloc_chunk *)p->page)->mark = 0;
1236 /* Free dead zones. */
1237 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1239 if (zone->next_zone->dead)
1241 struct alloc_zone *dead_zone = zone->next_zone;
1243 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1245 /* The zone must be empty. */
1246 if (dead_zone->allocated != 0)
1247 abort ();
1249 /* Unchain the dead zone, release all its pages and free it. */
1250 zone->next_zone = zone->next_zone->next_zone;
1251 release_pages (dead_zone);
1252 free (dead_zone);
1256 timevar_pop (TV_GC);
1259 /* Print allocation statistics. */
1261 void
1262 ggc_print_statistics (void)
1266 struct ggc_pch_data
1268 struct ggc_pch_ondisk
1270 unsigned total;
1271 } d;
1272 size_t base;
1273 size_t written;
1276 /* Initialize the PCH datastructure. */
1278 struct ggc_pch_data *
1279 init_ggc_pch (void)
1281 return xcalloc (sizeof (struct ggc_pch_data), 1);
1284 /* Add the size of object X to the size of the PCH data. */
1286 void
1287 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1288 size_t size, bool is_string)
1290 if (!is_string)
1292 d->d.total += size + CHUNK_OVERHEAD;
1294 else
1295 d->d.total += size;
1298 /* Return the total size of the PCH data. */
1300 size_t
1301 ggc_pch_total_size (struct ggc_pch_data *d)
1303 return d->d.total;
1306 /* Set the base address for the objects in the PCH file. */
1308 void
1309 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1311 d->base = (size_t) base;
1314 /* Allocate a place for object X of size SIZE in the PCH file. */
1316 char *
1317 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1318 size_t size, bool is_string)
1320 char *result;
1321 result = (char *)d->base;
1322 if (!is_string)
1324 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1325 if (chunk->large)
1326 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1327 else
1328 d->base += chunk->size + CHUNK_OVERHEAD;
1329 return result + CHUNK_OVERHEAD;
1331 else
1333 d->base += size;
1334 return result;
1339 /* Prepare to write out the PCH data to file F. */
1341 void
1342 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1343 FILE *f ATTRIBUTE_UNUSED)
1345 /* Nothing to do. */
1348 /* Write out object X of SIZE to file F. */
1350 void
1351 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1352 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1353 size_t size, bool is_string)
1355 if (!is_string)
1357 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1358 size = ggc_get_size (x);
1359 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1360 fatal_error ("can't write PCH file: %m");
1361 d->written += size + CHUNK_OVERHEAD;
1363 else
1365 if (fwrite (x, size, 1, f) != 1)
1366 fatal_error ("can't write PCH file: %m");
1367 d->written += size;
1369 if (d->written == d->d.total
1370 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1371 fatal_error ("can't write PCH file: %m");
1374 void
1375 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1377 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1378 fatal_error ("can't write PCH file: %m");
1379 free (d);
1381 void
1382 ggc_pch_read (FILE *f, void *addr)
1384 struct ggc_pch_ondisk d;
1385 struct page_entry *entry;
1386 struct alloc_zone *pch_zone;
1387 if (fread (&d, sizeof (d), 1, f) != 1)
1388 fatal_error ("can't read PCH file: %m");
1389 entry = xcalloc (1, sizeof (struct page_entry));
1390 entry->bytes = d.total;
1391 entry->page = addr;
1392 entry->context_depth = 0;
1393 pch_zone = new_ggc_zone ("PCH zone");
1394 entry->zone = pch_zone;
1395 entry->next = entry->zone->pages;
1396 entry->zone->pages = entry;