* dbxout.c (current_file): Also wrap inside DBX_DEBUGGING_INFO ||
[official-gcc.git] / gcc / ggc-zone.c
blob2b9c45d41468f9b2ba2ecd09ddf47fe71244fd96
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "toplev.h"
31 #include "varray.h"
32 #include "flags.h"
33 #include "ggc.h"
34 #include "timevar.h"
35 #include "params.h"
36 #include "bitmap.h"
38 #ifdef ENABLE_VALGRIND_CHECKING
39 # ifdef HAVE_VALGRIND_MEMCHECK_H
40 # include <valgrind/memcheck.h>
41 # elif defined HAVE_MEMCHECK_H
42 # include <memcheck.h>
43 # else
44 # include <valgrind.h>
45 # endif
46 #else
47 /* Avoid #ifdef:s when we can help it. */
48 #define VALGRIND_DISCARD(x)
49 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
50 #define VALGRIND_FREELIKE_BLOCK(x,y)
51 #endif
52 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
53 file open. Prefer either to valloc. */
54 #ifdef HAVE_MMAP_ANON
55 # undef HAVE_MMAP_DEV_ZERO
57 # include <sys/mman.h>
58 # ifndef MAP_FAILED
59 # define MAP_FAILED -1
60 # endif
61 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
62 # define MAP_ANONYMOUS MAP_ANON
63 # endif
64 # define USING_MMAP
66 #endif
68 #ifdef HAVE_MMAP_DEV_ZERO
70 # include <sys/mman.h>
71 # ifndef MAP_FAILED
72 # define MAP_FAILED -1
73 # endif
74 # define USING_MMAP
76 #endif
78 #ifndef USING_MMAP
79 #define USING_MALLOC_PAGE_GROUPS
80 #endif
82 #if (GCC_VERSION < 3001)
83 #define prefetch(X) ((void) X)
84 #else
85 #define prefetch(X) __builtin_prefetch (X)
86 #endif
88 /* NOTES:
89 If we track inter-zone pointers, we can mark single zones at a
90 time.
91 If we have a zone where we guarantee no inter-zone pointers, we
92 could mark that zone seperately.
93 The garbage zone should not be marked, and we should return 1 in
94 ggc_set_mark for any object in the garbage zone, which cuts off
95 marking quickly. */
96 /* Stategy:
98 This garbage-collecting allocator segregates objects into zones.
99 It also segregates objects into "large" and "small" bins. Large
100 objects are greater or equal to page size.
102 Pages for small objects are broken up into chunks, each of which
103 are described by a struct alloc_chunk. One can walk over all
104 chunks on the page by adding the chunk size to the chunk's data
105 address. The free space for a page exists in the free chunk bins.
107 Each page-entry also has a context depth, which is used to track
108 pushing and popping of allocation contexts. Only objects allocated
109 in the current (highest-numbered) context may be collected.
111 Empty pages (of all sizes) are kept on a single page cache list,
112 and are considered first when new pages are required; they are
113 deallocated at the start of the next collection if they haven't
114 been recycled by then. */
116 /* Define GGC_DEBUG_LEVEL to print debugging information.
117 0: No debugging output.
118 1: GC statistics only.
119 2: Page-entry allocations/deallocations as well.
120 3: Object allocations as well.
121 4: Object marks as well. */
122 #define GGC_DEBUG_LEVEL (0)
124 #ifndef HOST_BITS_PER_PTR
125 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
126 #endif
127 #ifdef COOKIE_CHECKING
128 #define CHUNK_MAGIC 0x95321123
129 #define DEADCHUNK_MAGIC 0x12817317
130 #endif
132 /* This structure manages small chunks. When the chunk is free, it's
133 linked with other chunks via free_next. When the chunk is allocated,
134 the data starts at u. Large chunks are allocated one at a time to
135 their own page, and so don't come in here.
137 The "type" field is a placeholder for a future change to do
138 generational collection. At present it is 0 when free and
139 and 1 when allocated. */
141 struct alloc_chunk {
142 #ifdef COOKIE_CHECKING
143 unsigned int magic;
144 #endif
145 unsigned int type:1;
146 unsigned int typecode:15;
147 unsigned int size:15;
148 unsigned int mark:1;
149 union {
150 struct alloc_chunk *next_free;
151 char data[1];
153 /* Make sure the data is sufficiently aligned. */
154 HOST_WIDEST_INT align_i;
155 #ifdef HAVE_LONG_DOUBLE
156 long double align_d;
157 #else
158 double align_d;
159 #endif
160 } u;
161 } __attribute__ ((packed));
163 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
165 /* We maintain several bins of free lists for chunks for very small
166 objects. We never exhaustively search other bins -- if we don't
167 find one of the proper size, we allocate from the "larger" bin. */
169 /* Decreasing the number of free bins increases the time it takes to allocate.
170 Similar with increasing max_free_bin_size without increasing num_free_bins.
172 After much histogramming of allocation sizes and time spent on gc,
173 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
174 these were determined to be the optimal values. */
175 #define NUM_FREE_BINS 64
176 #define MAX_FREE_BIN_SIZE 256
177 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
178 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
179 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
181 /* Marker used as chunk->size for a large object. Should correspond
182 to the size of the bitfield above. */
183 #define LARGE_OBJECT_SIZE 0x7fff
185 /* We use this structure to determine the alignment required for
186 allocations. For power-of-two sized allocations, that's not a
187 problem, but it does matter for odd-sized allocations. */
189 struct max_alignment {
190 char c;
191 union {
192 HOST_WIDEST_INT i;
193 #ifdef HAVE_LONG_DOUBLE
194 long double d;
195 #else
196 double d;
197 #endif
198 } u;
201 /* The biggest alignment required. */
203 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
205 /* Compute the smallest nonnegative number which when added to X gives
206 a multiple of F. */
208 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
210 /* Compute the smallest multiple of F that is >= X. */
212 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
214 /* A two-level tree is used to look up the page-entry for a given
215 pointer. Two chunks of the pointer's bits are extracted to index
216 the first and second levels of the tree, as follows:
218 HOST_PAGE_SIZE_BITS
219 32 | |
220 msb +----------------+----+------+------+ lsb
221 | | |
222 PAGE_L1_BITS |
224 PAGE_L2_BITS
226 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
227 pages are aligned on system page boundaries. The next most
228 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
229 index values in the lookup table, respectively.
231 For 32-bit architectures and the settings below, there are no
232 leftover bits. For architectures with wider pointers, the lookup
233 tree points to a list of pages, which must be scanned to find the
234 correct one. */
236 #define PAGE_L1_BITS (8)
237 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
238 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
239 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
241 #define LOOKUP_L1(p) \
242 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
244 #define LOOKUP_L2(p) \
245 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
247 struct alloc_zone;
248 /* A page_entry records the status of an allocation page. */
249 typedef struct page_entry
251 /* The next page-entry with objects of the same size, or NULL if
252 this is the last page-entry. */
253 struct page_entry *next;
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
259 /* How many collections we've survived. */
260 size_t survived;
262 /* The address at which the memory is allocated. */
263 char *page;
265 #ifdef USING_MALLOC_PAGE_GROUPS
266 /* Back pointer to the page group this page came from. */
267 struct page_group *group;
268 #endif
270 /* Number of bytes on the page unallocated. Only used during
271 collection, and even then large pages merely set this non-zero. */
272 size_t bytes_free;
274 /* Context depth of this page. */
275 unsigned short context_depth;
277 /* Does this page contain small objects, or one large object? */
278 bool large_p;
280 struct alloc_zone *zone;
281 } page_entry;
283 #ifdef USING_MALLOC_PAGE_GROUPS
284 /* A page_group describes a large allocation from malloc, from which
285 we parcel out aligned pages. */
286 typedef struct page_group
288 /* A linked list of all extant page groups. */
289 struct page_group *next;
291 /* The address we received from malloc. */
292 char *allocation;
294 /* The size of the block. */
295 size_t alloc_size;
297 /* A bitmask of pages in use. */
298 unsigned int in_use;
299 } page_group;
300 #endif
302 #if HOST_BITS_PER_PTR <= 32
304 /* On 32-bit hosts, we use a two level page table, as pictured above. */
305 typedef page_entry **page_table[PAGE_L1_SIZE];
307 #else
309 /* On 64-bit hosts, we use the same two level page tables plus a linked
310 list that disambiguates the top 32-bits. There will almost always be
311 exactly one entry in the list. */
312 typedef struct page_table_chain
314 struct page_table_chain *next;
315 size_t high_bits;
316 page_entry **table[PAGE_L1_SIZE];
317 } *page_table;
319 #endif
321 /* The global variables. */
322 static struct globals
324 /* The page lookup table. A single page can only belong to one
325 zone. This means free pages are zone-specific ATM. */
326 page_table lookup;
327 /* The linked list of zones. */
328 struct alloc_zone *zones;
330 /* The system's page size. */
331 size_t pagesize;
332 size_t lg_pagesize;
334 /* A file descriptor open to /dev/zero for reading. */
335 #if defined (HAVE_MMAP_DEV_ZERO)
336 int dev_zero_fd;
337 #endif
339 /* The file descriptor for debugging output. */
340 FILE *debug_file;
341 } G;
343 /* The zone allocation structure. */
344 struct alloc_zone
346 /* Name of the zone. */
347 const char *name;
349 /* Linked list of pages in a zone. */
350 page_entry *pages;
352 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
353 FREE_BIN_DELTA. All other chunks are in slot 0. */
354 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
356 /* Bytes currently allocated. */
357 size_t allocated;
359 /* Bytes currently allocated at the end of the last collection. */
360 size_t allocated_last_gc;
362 /* Total amount of memory mapped. */
363 size_t bytes_mapped;
365 /* Bit N set if any allocations have been done at context depth N. */
366 unsigned long context_depth_allocations;
368 /* Bit N set if any collections have been done at context depth N. */
369 unsigned long context_depth_collections;
371 /* The current depth in the context stack. */
372 unsigned short context_depth;
374 /* A cache of free system pages. */
375 page_entry *free_pages;
377 #ifdef USING_MALLOC_PAGE_GROUPS
378 page_group *page_groups;
379 #endif
381 /* Next zone in the linked list of zones. */
382 struct alloc_zone *next_zone;
384 /* Return true if this zone was collected during this collection. */
385 bool was_collected;
386 } main_zone;
388 struct alloc_zone *rtl_zone;
389 struct alloc_zone *garbage_zone;
390 struct alloc_zone *tree_zone;
392 /* Allocate pages in chunks of this size, to throttle calls to memory
393 allocation routines. The first page is used, the rest go onto the
394 free list. This cannot be larger than HOST_BITS_PER_INT for the
395 in_use bitmask for page_group. */
396 #define GGC_QUIRE_SIZE 16
398 static int ggc_allocated_p (const void *);
399 static page_entry *lookup_page_table_entry (const void *);
400 static void set_page_table_entry (void *, page_entry *);
401 #ifdef USING_MMAP
402 static char *alloc_anon (char *, size_t, struct alloc_zone *);
403 #endif
404 #ifdef USING_MALLOC_PAGE_GROUPS
405 static size_t page_group_index (char *, char *);
406 static void set_page_group_in_use (page_group *, char *);
407 static void clear_page_group_in_use (page_group *, char *);
408 #endif
409 static struct page_entry * alloc_small_page ( struct alloc_zone *);
410 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
411 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
412 static void free_page (struct page_entry *);
413 static void release_pages (struct alloc_zone *);
414 static void sweep_pages (struct alloc_zone *);
415 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
416 static bool ggc_collect_1 (struct alloc_zone *, bool);
417 static void check_cookies (void);
420 /* Returns nonzero if P was allocated in GC'able memory. */
422 static inline int
423 ggc_allocated_p (const void *p)
425 page_entry ***base;
426 size_t L1, L2;
428 #if HOST_BITS_PER_PTR <= 32
429 base = &G.lookup[0];
430 #else
431 page_table table = G.lookup;
432 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
433 while (1)
435 if (table == NULL)
436 return 0;
437 if (table->high_bits == high_bits)
438 break;
439 table = table->next;
441 base = &table->table[0];
442 #endif
444 /* Extract the level 1 and 2 indices. */
445 L1 = LOOKUP_L1 (p);
446 L2 = LOOKUP_L2 (p);
448 return base[L1] && base[L1][L2];
451 /* Traverse the page table and find the entry for a page.
452 Die (probably) if the object wasn't allocated via GC. */
454 static inline page_entry *
455 lookup_page_table_entry(const void *p)
457 page_entry ***base;
458 size_t L1, L2;
460 #if HOST_BITS_PER_PTR <= 32
461 base = &G.lookup[0];
462 #else
463 page_table table = G.lookup;
464 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
465 while (table->high_bits != high_bits)
466 table = table->next;
467 base = &table->table[0];
468 #endif
470 /* Extract the level 1 and 2 indices. */
471 L1 = LOOKUP_L1 (p);
472 L2 = LOOKUP_L2 (p);
474 return base[L1][L2];
478 /* Set the page table entry for a page. */
480 static void
481 set_page_table_entry(void *p, page_entry *entry)
483 page_entry ***base;
484 size_t L1, L2;
486 #if HOST_BITS_PER_PTR <= 32
487 base = &G.lookup[0];
488 #else
489 page_table table;
490 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
491 for (table = G.lookup; table; table = table->next)
492 if (table->high_bits == high_bits)
493 goto found;
495 /* Not found -- allocate a new table. */
496 table = (page_table) xcalloc (1, sizeof(*table));
497 table->next = G.lookup;
498 table->high_bits = high_bits;
499 G.lookup = table;
500 found:
501 base = &table->table[0];
502 #endif
504 /* Extract the level 1 and 2 indices. */
505 L1 = LOOKUP_L1 (p);
506 L2 = LOOKUP_L2 (p);
508 if (base[L1] == NULL)
509 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
511 base[L1][L2] = entry;
514 #ifdef USING_MMAP
515 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
516 (if non-null). The ifdef structure here is intended to cause a
517 compile error unless exactly one of the HAVE_* is defined. */
519 static inline char *
520 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
522 #ifdef HAVE_MMAP_ANON
523 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
524 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
525 #endif
526 #ifdef HAVE_MMAP_DEV_ZERO
527 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
528 MAP_PRIVATE, G.dev_zero_fd, 0);
529 #endif
530 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
532 if (page == (char *) MAP_FAILED)
534 perror ("virtual memory exhausted");
535 exit (FATAL_EXIT_CODE);
538 /* Remember that we allocated this memory. */
539 zone->bytes_mapped += size;
540 /* Pretend we don't have access to the allocated pages. We'll enable
541 access to smaller pieces of the area in ggc_alloc. Discard the
542 handle to avoid handle leak. */
543 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
544 return page;
546 #endif
547 #ifdef USING_MALLOC_PAGE_GROUPS
548 /* Compute the index for this page into the page group. */
550 static inline size_t
551 page_group_index (char *allocation, char *page)
553 return (size_t) (page - allocation) >> G.lg_pagesize;
556 /* Set and clear the in_use bit for this page in the page group. */
558 static inline void
559 set_page_group_in_use (page_group *group, char *page)
561 group->in_use |= 1 << page_group_index (group->allocation, page);
564 static inline void
565 clear_page_group_in_use (page_group *group, char *page)
567 group->in_use &= ~(1 << page_group_index (group->allocation, page));
569 #endif
571 /* Allocate a new page for allocating objects of size 2^ORDER,
572 and return an entry for it. The entry is not added to the
573 appropriate page_table list. */
575 static inline struct page_entry *
576 alloc_small_page (struct alloc_zone *zone)
578 struct page_entry *entry;
579 char *page;
580 #ifdef USING_MALLOC_PAGE_GROUPS
581 page_group *group;
582 #endif
584 page = NULL;
586 /* Check the list of free pages for one we can use. */
587 entry = zone->free_pages;
588 if (entry != NULL)
590 /* Recycle the allocated memory from this page ... */
591 zone->free_pages = entry->next;
592 page = entry->page;
594 #ifdef USING_MALLOC_PAGE_GROUPS
595 group = entry->group;
596 #endif
598 #ifdef USING_MMAP
599 else
601 /* We want just one page. Allocate a bunch of them and put the
602 extras on the freelist. (Can only do this optimization with
603 mmap for backing store.) */
604 struct page_entry *e, *f = zone->free_pages;
605 int i;
607 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
609 /* This loop counts down so that the chain will be in ascending
610 memory order. */
611 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
613 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
614 e->bytes = G.pagesize;
615 e->page = page + (i << G.lg_pagesize);
616 e->next = f;
617 f = e;
620 zone->free_pages = f;
622 #endif
623 #ifdef USING_MALLOC_PAGE_GROUPS
624 else
626 /* Allocate a large block of memory and serve out the aligned
627 pages therein. This results in much less memory wastage
628 than the traditional implementation of valloc. */
630 char *allocation, *a, *enda;
631 size_t alloc_size, head_slop, tail_slop;
632 int multiple_pages = (entry_size == G.pagesize);
634 if (multiple_pages)
635 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
636 else
637 alloc_size = entry_size + G.pagesize - 1;
638 allocation = xmalloc (alloc_size);
639 VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
641 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
642 head_slop = page - allocation;
643 if (multiple_pages)
644 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
645 else
646 tail_slop = alloc_size - entry_size - head_slop;
647 enda = allocation + alloc_size - tail_slop;
649 /* We allocated N pages, which are likely not aligned, leaving
650 us with N-1 usable pages. We plan to place the page_group
651 structure somewhere in the slop. */
652 if (head_slop >= sizeof (page_group))
653 group = (page_group *)page - 1;
654 else
656 /* We magically got an aligned allocation. Too bad, we have
657 to waste a page anyway. */
658 if (tail_slop == 0)
660 enda -= G.pagesize;
661 tail_slop += G.pagesize;
663 if (tail_slop < sizeof (page_group))
664 abort ();
665 group = (page_group *)enda;
666 tail_slop -= sizeof (page_group);
669 /* Remember that we allocated this memory. */
670 group->next = G.page_groups;
671 group->allocation = allocation;
672 group->alloc_size = alloc_size;
673 group->in_use = 0;
674 zone->page_groups = group;
675 G.bytes_mapped += alloc_size;
677 /* If we allocated multiple pages, put the rest on the free list. */
678 if (multiple_pages)
680 struct page_entry *e, *f = G.free_pages;
681 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
683 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
684 e->bytes = G.pagesize;
685 e->page = a;
686 e->group = group;
687 e->next = f;
688 f = e;
690 zone->free_pages = f;
693 #endif
695 if (entry == NULL)
696 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
698 entry->next = 0;
699 entry->bytes = G.pagesize;
700 entry->bytes_free = G.pagesize;
701 entry->page = page;
702 entry->context_depth = zone->context_depth;
703 entry->large_p = false;
704 entry->zone = zone;
705 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
707 #ifdef USING_MALLOC_PAGE_GROUPS
708 entry->group = group;
709 set_page_group_in_use (group, page);
710 #endif
712 set_page_table_entry (page, entry);
714 if (GGC_DEBUG_LEVEL >= 2)
715 fprintf (G.debug_file,
716 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
717 (PTR) entry, page, page + G.pagesize - 1);
719 return entry;
722 /* Allocate a large page of size SIZE in ZONE. */
724 static inline struct page_entry *
725 alloc_large_page (size_t size, struct alloc_zone *zone)
727 struct page_entry *entry;
728 char *page;
730 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
731 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
733 entry->next = 0;
734 entry->bytes = size;
735 entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
736 entry->page = page;
737 entry->context_depth = zone->context_depth;
738 entry->large_p = true;
739 entry->zone = zone;
740 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
742 #ifdef USING_MALLOC_PAGE_GROUPS
743 entry->group = NULL;
744 #endif
745 set_page_table_entry (page, entry);
747 if (GGC_DEBUG_LEVEL >= 2)
748 fprintf (G.debug_file,
749 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
750 (PTR) entry, page, page + size - 1);
752 return entry;
756 /* For a page that is no longer needed, put it on the free page list. */
758 static inline void
759 free_page (page_entry *entry)
761 if (GGC_DEBUG_LEVEL >= 2)
762 fprintf (G.debug_file,
763 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
764 entry->page, entry->page + entry->bytes - 1);
766 set_page_table_entry (entry->page, NULL);
768 if (entry->large_p)
770 free (entry->page);
771 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
773 else
775 /* Mark the page as inaccessible. Discard the handle to
776 avoid handle leak. */
777 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
779 #ifdef USING_MALLOC_PAGE_GROUPS
780 clear_page_group_in_use (entry->group, entry->page);
781 #endif
783 entry->next = entry->zone->free_pages;
784 entry->zone->free_pages = entry;
788 /* Release the free page cache to the system. */
790 static void
791 release_pages (struct alloc_zone *zone)
793 #ifdef USING_MMAP
794 page_entry *p, *next;
795 char *start;
796 size_t len;
798 /* Gather up adjacent pages so they are unmapped together. */
799 p = zone->free_pages;
801 while (p)
803 start = p->page;
804 next = p->next;
805 len = p->bytes;
806 free (p);
807 p = next;
809 while (p && p->page == start + len)
811 next = p->next;
812 len += p->bytes;
813 free (p);
814 p = next;
817 munmap (start, len);
818 zone->bytes_mapped -= len;
821 zone->free_pages = NULL;
822 #endif
823 #ifdef USING_MALLOC_PAGE_GROUPS
824 page_entry **pp, *p;
825 page_group **gp, *g;
827 /* Remove all pages from free page groups from the list. */
828 pp = &(zone->free_pages);
829 while ((p = *pp) != NULL)
830 if (p->group->in_use == 0)
832 *pp = p->next;
833 free (p);
835 else
836 pp = &p->next;
838 /* Remove all free page groups, and release the storage. */
839 gp = &(zone->page_groups);
840 while ((g = *gp) != NULL)
841 if (g->in_use == 0)
843 *gp = g->next;
844 zone->bytes_mapped -= g->alloc_size;
845 free (g->allocation);
846 VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
848 else
849 gp = &g->next;
850 #endif
853 /* Place CHUNK of size SIZE on the free list for ZONE. */
855 static inline void
856 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
858 size_t bin = 0;
860 bin = SIZE_BIN_DOWN (size);
861 if (bin == 0)
862 abort ();
863 if (bin > NUM_FREE_BINS)
864 bin = 0;
865 #ifdef COOKIE_CHECKING
866 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
867 abort ();
868 chunk->magic = DEADCHUNK_MAGIC;
869 #endif
870 chunk->u.next_free = zone->free_chunks[bin];
871 zone->free_chunks[bin] = chunk;
872 if (GGC_DEBUG_LEVEL >= 3)
873 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
874 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
877 /* Allocate a chunk of memory of SIZE bytes. */
879 static void *
880 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
882 size_t bin = 0;
883 size_t lsize = 0;
884 struct page_entry *entry;
885 struct alloc_chunk *chunk, *lchunk, **pp;
886 void *result;
888 /* Align size, so that we're assured of aligned allocations. */
889 if (size < FREE_BIN_DELTA)
890 size = FREE_BIN_DELTA;
891 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
893 /* Large objects are handled specially. */
894 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
896 entry = alloc_large_page (size, zone);
897 entry->survived = 0;
898 entry->next = entry->zone->pages;
899 entry->zone->pages = entry;
902 chunk = (struct alloc_chunk *) entry->page;
903 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
904 chunk->size = LARGE_OBJECT_SIZE;
906 goto found;
909 /* First look for a tiny object already segregated into its own
910 size bucket. */
911 bin = SIZE_BIN_UP (size);
912 if (bin <= NUM_FREE_BINS)
914 chunk = zone->free_chunks[bin];
915 if (chunk)
917 zone->free_chunks[bin] = chunk->u.next_free;
918 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
919 goto found;
923 /* Failing that, look through the "other" bucket for a chunk
924 that is large enough. */
925 pp = &(zone->free_chunks[0]);
926 chunk = *pp;
927 while (chunk && chunk->size < size)
929 pp = &chunk->u.next_free;
930 chunk = *pp;
933 /* Failing that, allocate new storage. */
934 if (!chunk)
936 entry = alloc_small_page (zone);
937 entry->next = entry->zone->pages;
938 entry->zone->pages = entry;
940 chunk = (struct alloc_chunk *) entry->page;
941 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
942 chunk->size = G.pagesize - CHUNK_OVERHEAD;
944 else
946 *pp = chunk->u.next_free;
947 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
949 /* Release extra memory from a chunk that's too big. */
950 lsize = chunk->size - size;
951 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
953 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
954 chunk->size = size;
956 lsize -= CHUNK_OVERHEAD;
957 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
958 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
959 #ifdef COOKIE_CHECKING
960 lchunk->magic = CHUNK_MAGIC;
961 #endif
962 lchunk->type = 0;
963 lchunk->mark = 0;
964 lchunk->size = lsize;
965 free_chunk (lchunk, lsize, zone);
967 /* Calculate the object's address. */
968 found:
969 #ifdef COOKIE_CHECKING
970 chunk->magic = CHUNK_MAGIC;
971 #endif
972 chunk->type = 1;
973 chunk->mark = 0;
974 chunk->typecode = type;
975 result = chunk->u.data;
977 #ifdef ENABLE_GC_CHECKING
978 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
979 exact same semantics in presence of memory bugs, regardless of
980 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
981 handle to avoid handle leak. */
982 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
984 /* `Poison' the entire allocated object. */
985 memset (result, 0xaf, size);
986 #endif
988 /* Tell Valgrind that the memory is there, but its content isn't
989 defined. The bytes at the end of the object are still marked
990 unaccessible. */
991 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
993 /* Keep track of how many bytes are being allocated. This
994 information is used in deciding when to collect. */
995 zone->allocated += size + CHUNK_OVERHEAD;
997 if (GGC_DEBUG_LEVEL >= 3)
998 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
999 (void *)chunk, (unsigned long) size, result);
1001 return result;
1004 /* Allocate a SIZE of chunk memory of GTE type, into an approriate zone
1005 for that type. */
1007 void *
1008 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
1010 if (gte == gt_ggc_e_14lang_tree_node)
1011 return ggc_alloc_zone_1 (size, tree_zone, gte);
1012 else if (gte == gt_ggc_e_7rtx_def)
1013 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1014 else if (gte == gt_ggc_e_9rtvec_def)
1015 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1016 else
1017 return ggc_alloc_zone_1 (size, &main_zone, gte);
1020 /* Normal ggc_alloc simply allocates into the main zone. */
1022 void *
1023 ggc_alloc (size_t size)
1025 return ggc_alloc_zone_1 (size, &main_zone, -1);
1028 /* Zone allocation allocates into the specified zone. */
1030 void *
1031 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
1033 return ggc_alloc_zone_1 (size, zone, -1);
1036 /* If P is not marked, mark it and return false. Otherwise return true.
1037 P must have been allocated by the GC allocator; it mustn't point to
1038 static objects, stack variables, or memory allocated with malloc. */
1041 ggc_set_mark (const void *p)
1043 page_entry *entry;
1044 struct alloc_chunk *chunk;
1046 #ifdef ENABLE_CHECKING
1047 /* Look up the page on which the object is alloced. If the object
1048 wasn't allocated by the collector, we'll probably die. */
1049 entry = lookup_page_table_entry (p);
1050 if (entry == NULL)
1051 abort ();
1052 #endif
1053 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1054 #ifdef COOKIE_CHECKING
1055 if (chunk->magic != CHUNK_MAGIC)
1056 abort ();
1057 #endif
1058 if (chunk->mark)
1059 return 1;
1060 chunk->mark = 1;
1062 #ifndef ENABLE_CHECKING
1063 entry = lookup_page_table_entry (p);
1064 #endif
1066 /* Large pages are either completely full or completely empty. So if
1067 they are marked, they are completely full. */
1068 if (entry->large_p)
1069 entry->bytes_free = 0;
1070 else
1071 entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
1073 if (GGC_DEBUG_LEVEL >= 4)
1074 fprintf (G.debug_file, "Marking %p\n", p);
1076 return 0;
1079 /* Return 1 if P has been marked, zero otherwise.
1080 P must have been allocated by the GC allocator; it mustn't point to
1081 static objects, stack variables, or memory allocated with malloc. */
1084 ggc_marked_p (const void *p)
1086 struct alloc_chunk *chunk;
1088 #ifdef ENABLE_CHECKING
1090 page_entry *entry = lookup_page_table_entry (p);
1091 if (entry == NULL)
1092 abort ();
1094 #endif
1096 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1097 #ifdef COOKIE_CHECKING
1098 if (chunk->magic != CHUNK_MAGIC)
1099 abort ();
1100 #endif
1101 return chunk->mark;
1104 /* Return the size of the gc-able object P. */
1106 size_t
1107 ggc_get_size (const void *p)
1109 struct alloc_chunk *chunk;
1110 struct page_entry *entry;
1112 #ifdef ENABLE_CHECKING
1113 entry = lookup_page_table_entry (p);
1114 if (entry == NULL)
1115 abort ();
1116 #endif
1118 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1119 #ifdef COOKIE_CHECKING
1120 if (chunk->magic != CHUNK_MAGIC)
1121 abort ();
1122 #endif
1123 if (chunk->size == LARGE_OBJECT_SIZE)
1125 #ifndef ENABLE_CHECKING
1126 entry = lookup_page_table_entry (p);
1127 #endif
1128 return entry->bytes;
1131 return chunk->size;
1134 /* Initialize the ggc-zone-mmap allocator. */
1135 void
1136 init_ggc (void)
1138 /* Create the zones. */
1139 main_zone.name = "Main zone";
1140 G.zones = &main_zone;
1142 rtl_zone = xcalloc (1, sizeof (struct alloc_zone));
1143 rtl_zone->name = "RTL zone";
1144 /* The main zone's connected to the ... rtl_zone */
1145 G.zones->next_zone = rtl_zone;
1147 garbage_zone = xcalloc (1, sizeof (struct alloc_zone));
1148 garbage_zone->name = "Garbage zone";
1149 /* The rtl zone's connected to the ... garbage zone */
1150 rtl_zone->next_zone = garbage_zone;
1152 tree_zone = xcalloc (1, sizeof (struct alloc_zone));
1153 tree_zone->name = "Tree zone";
1154 /* The garbage zone's connected to ... the tree zone */
1155 garbage_zone->next_zone = tree_zone;
1157 G.pagesize = getpagesize();
1158 G.lg_pagesize = exact_log2 (G.pagesize);
1159 #ifdef HAVE_MMAP_DEV_ZERO
1160 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1161 if (G.dev_zero_fd == -1)
1162 abort ();
1163 #endif
1165 #if 0
1166 G.debug_file = fopen ("ggc-mmap.debug", "w");
1167 setlinebuf (G.debug_file);
1168 #else
1169 G.debug_file = stdout;
1170 #endif
1172 #ifdef USING_MMAP
1173 /* StunOS has an amazing off-by-one error for the first mmap allocation
1174 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1175 believe, is an unaligned page allocation, which would cause us to
1176 hork badly if we tried to use it. */
1178 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1179 struct page_entry *e;
1180 if ((size_t)p & (G.pagesize - 1))
1182 /* How losing. Discard this one and try another. If we still
1183 can't get something useful, give up. */
1185 p = alloc_anon (NULL, G.pagesize, &main_zone);
1186 if ((size_t)p & (G.pagesize - 1))
1187 abort ();
1190 /* We have a good page, might as well hold onto it... */
1191 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
1192 e->bytes = G.pagesize;
1193 e->page = p;
1194 e->next = main_zone.free_pages;
1195 main_zone.free_pages = e;
1197 #endif
1200 /* Increment the `GC context'. Objects allocated in an outer context
1201 are never freed, eliminating the need to register their roots. */
1203 void
1204 ggc_push_context (void)
1206 struct alloc_zone *zone;
1207 for (zone = G.zones; zone; zone = zone->next_zone)
1208 ++(zone->context_depth);
1209 /* Die on wrap. */
1210 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
1211 abort ();
1214 /* Decrement the `GC context'. All objects allocated since the
1215 previous ggc_push_context are migrated to the outer context. */
1217 static void
1218 ggc_pop_context_1 (struct alloc_zone *zone)
1220 unsigned long omask;
1221 unsigned depth;
1222 page_entry *p;
1224 depth = --(zone->context_depth);
1225 omask = (unsigned long)1 << (depth + 1);
1227 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
1228 return;
1230 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
1231 zone->context_depth_allocations &= omask - 1;
1232 zone->context_depth_collections &= omask - 1;
1234 /* Any remaining pages in the popped context are lowered to the new
1235 current context; i.e. objects allocated in the popped context and
1236 left over are imported into the previous context. */
1237 for (p = zone->pages; p != NULL; p = p->next)
1238 if (p->context_depth > depth)
1239 p->context_depth = depth;
1242 /* Pop all the zone contexts. */
1244 void
1245 ggc_pop_context (void)
1247 struct alloc_zone *zone;
1248 for (zone = G.zones; zone; zone = zone->next_zone)
1249 ggc_pop_context_1 (zone);
1253 /* Poison the chunk. */
1254 #ifdef ENABLE_GC_CHECKING
1255 #define poison_chunk(CHUNK, SIZE) \
1256 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1257 #else
1258 #define poison_chunk(CHUNK, SIZE)
1259 #endif
1261 /* Free all empty pages and objects within a page for a given zone */
1263 static void
1264 sweep_pages (struct alloc_zone *zone)
1266 page_entry **pp, *p, *next;
1267 struct alloc_chunk *chunk, *last_free, *end;
1268 size_t last_free_size, allocated = 0;
1270 /* First, reset the free_chunks lists, since we are going to
1271 re-free free chunks in hopes of coalescing them into large chunks. */
1272 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1273 pp = &zone->pages;
1274 for (p = zone->pages; p ; p = next)
1276 next = p->next;
1278 /* For empty pages, just free the page. */
1279 if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
1281 *pp = next;
1282 #ifdef ENABLE_GC_CHECKING
1283 /* Poison the page. */
1284 memset (p->page, 0xb5, p->bytes);
1285 #endif
1286 free_page (p);
1287 continue;
1290 /* Large pages are all or none affairs. Either they are
1291 completely empty, or they are completeley full.
1292 Thus, if the above didn't catch it, we need not do anything
1293 except remove the mark and reset the bytes_free.
1295 XXX: Should we bother to increment allocated. */
1296 else if (p->large_p)
1298 p->bytes_free = p->bytes;
1299 ((struct alloc_chunk *)p->page)->mark = 0;
1300 continue;
1302 pp = &p->next;
1304 /* This page has now survived another collection. */
1305 p->survived++;
1307 /* Which leaves full and partial pages. Step through all chunks,
1308 consolidate those that are free and insert them into the free
1309 lists. Note that consolidation slows down collection
1310 slightly. */
1312 chunk = (struct alloc_chunk *)p->page;
1313 end = (struct alloc_chunk *)(p->page + G.pagesize);
1314 last_free = NULL;
1315 last_free_size = 0;
1319 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1320 if (chunk->mark || p->context_depth < zone->context_depth)
1322 if (last_free)
1324 last_free->type = 0;
1325 last_free->size = last_free_size;
1326 last_free->mark = 0;
1327 poison_chunk (last_free, last_free_size);
1328 free_chunk (last_free, last_free_size, zone);
1329 last_free = NULL;
1331 if (chunk->mark)
1333 allocated += chunk->size + CHUNK_OVERHEAD;
1334 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1336 chunk->mark = 0;
1337 #ifdef ENABLE_CHECKING
1338 if (p->bytes_free > p->bytes)
1339 abort ();
1340 #endif
1342 else
1344 if (last_free)
1346 last_free_size += CHUNK_OVERHEAD + chunk->size;
1348 else
1350 last_free = chunk;
1351 last_free_size = chunk->size;
1355 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1357 while (chunk < end);
1359 if (last_free)
1361 last_free->type = 0;
1362 last_free->size = last_free_size;
1363 last_free->mark = 0;
1364 poison_chunk (last_free, last_free_size);
1365 free_chunk (last_free, last_free_size, zone);
1369 zone->allocated = allocated;
1372 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1373 is true if we need to mark before sweeping, false if some other
1374 zone collection has already performed marking for us. Returns true
1375 if we collected, false otherwise. */
1377 static bool
1378 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1380 /* Avoid frequent unnecessary work by skipping collection if the
1381 total allocations haven't expanded much since the last
1382 collection. */
1383 float allocated_last_gc =
1384 MAX (zone->allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1386 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1388 if (zone->allocated < allocated_last_gc + min_expand)
1389 return false;
1391 if (!quiet_flag)
1392 fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024);
1394 /* Zero the total allocated bytes. This will be recalculated in the
1395 sweep phase. */
1396 zone->allocated = 0;
1398 /* Release the pages we freed the last time we collected, but didn't
1399 reuse in the interim. */
1400 release_pages (zone);
1402 /* Indicate that we've seen collections at this context depth. */
1403 zone->context_depth_collections
1404 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1405 if (need_marking)
1406 ggc_mark_roots ();
1407 sweep_pages (zone);
1408 zone->was_collected = true;
1409 zone->allocated_last_gc = zone->allocated;
1412 if (!quiet_flag)
1413 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1414 return true;
1417 /* Calculate the average page survival rate in terms of number of
1418 collections. */
1420 static float
1421 calculate_average_page_survival (struct alloc_zone *zone)
1423 float count = 0.0;
1424 float survival = 0.0;
1425 page_entry *p;
1426 for (p = zone->pages; p; p = p->next)
1428 count += 1.0;
1429 survival += p->survived;
1431 return survival/count;
1434 /* Check the magic cookies all of the chunks contain, to make sure we
1435 aren't doing anything stupid, like stomping on alloc_chunk
1436 structures. */
1438 static inline void
1439 check_cookies (void)
1441 #ifdef COOKIE_CHECKING
1442 page_entry *p;
1443 struct alloc_zone *zone;
1445 for (zone = G.zones; zone; zone = zone->next_zone)
1447 for (p = zone->pages; p; p = p->next)
1449 if (!p->large_p)
1451 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1452 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1455 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1456 abort ();
1457 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1459 while (chunk < end);
1463 #endif
1467 /* Top level collection routine. */
1469 void
1470 ggc_collect (void)
1472 struct alloc_zone *zone;
1473 bool marked = false;
1474 float f;
1476 timevar_push (TV_GC);
1477 check_cookies ();
1478 /* Start by possibly collecting the main zone. */
1479 main_zone.was_collected = false;
1480 marked |= ggc_collect_1 (&main_zone, true);
1481 /* In order to keep the number of collections down, we don't
1482 collect other zones unless we are collecting the main zone. This
1483 gives us roughly the same number of collections as we used to
1484 have with the old gc. The number of collection is important
1485 because our main slowdown (according to profiling) is now in
1486 marking. So if we mark twice as often as we used to, we'll be
1487 twice as slow. Hopefully we'll avoid this cost when we mark
1488 zone-at-a-time. */
1490 if (main_zone.was_collected)
1492 check_cookies ();
1493 rtl_zone->was_collected = false;
1494 marked |= ggc_collect_1 (rtl_zone, !marked);
1495 check_cookies ();
1496 tree_zone->was_collected = false;
1497 marked |= ggc_collect_1 (tree_zone, !marked);
1498 check_cookies ();
1499 garbage_zone->was_collected = false;
1500 marked |= ggc_collect_1 (garbage_zone, !marked);
1503 /* Print page survival stats, if someone wants them. */
1504 if (GGC_DEBUG_LEVEL >= 2)
1506 if (rtl_zone->was_collected)
1508 f = calculate_average_page_survival (rtl_zone);
1509 printf ("Average RTL page survival is %f\n", f);
1511 if (main_zone.was_collected)
1513 f = calculate_average_page_survival (&main_zone);
1514 printf ("Average main page survival is %f\n", f);
1516 if (tree_zone->was_collected)
1518 f = calculate_average_page_survival (tree_zone);
1519 printf ("Average tree page survival is %f\n", f);
1522 /* Since we don't mark zone at a time right now, marking in any
1523 zone means marking in every zone. So we have to clear all the
1524 marks in all the zones that weren't collected already. */
1525 if (marked)
1527 page_entry *p;
1528 for (zone = G.zones; zone; zone = zone->next_zone)
1530 if (zone->was_collected)
1531 continue;
1532 for (p = zone->pages; p; p = p->next)
1534 if (!p->large_p)
1536 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1537 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1540 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1541 if (chunk->mark || p->context_depth < zone->context_depth)
1543 if (chunk->mark)
1544 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1545 #ifdef ENABLE_CHECKING
1546 if (p->bytes_free > p->bytes)
1547 abort ();
1548 #endif
1549 chunk->mark = 0;
1551 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1553 while (chunk < end);
1555 else
1557 p->bytes_free = p->bytes;
1558 ((struct alloc_chunk *)p->page)->mark = 0;
1563 timevar_pop (TV_GC);
1566 /* Print allocation statistics. */
1568 void
1569 ggc_print_statistics (void)
1573 struct ggc_pch_data
1575 struct ggc_pch_ondisk
1577 unsigned total;
1578 } d;
1579 size_t base;
1580 size_t written;
1584 /* Initialize the PCH datastructure. */
1586 struct ggc_pch_data *
1587 init_ggc_pch (void)
1589 return xcalloc (sizeof (struct ggc_pch_data), 1);
1592 /* Add the size of object X to the size of the PCH data. */
1594 void
1595 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1596 size_t size, bool is_string)
1598 if (!is_string)
1600 d->d.total += size + CHUNK_OVERHEAD;
1602 else
1603 d->d.total += size;
1606 /* Return the total size of the PCH data. */
1608 size_t
1609 ggc_pch_total_size (struct ggc_pch_data *d)
1611 return d->d.total;
1614 /* Set the base address for the objects in the PCH file. */
1616 void
1617 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1619 d->base = (size_t) base;
1622 /* Allocate a place for object X of size SIZE in the PCH file. */
1624 char *
1625 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1626 size_t size, bool is_string)
1628 char *result;
1629 result = (char *)d->base;
1630 if (!is_string)
1632 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1633 if (chunk->size == LARGE_OBJECT_SIZE)
1634 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1635 else
1636 d->base += chunk->size + CHUNK_OVERHEAD;
1637 return result + CHUNK_OVERHEAD;
1639 else
1641 d->base += size;
1642 return result;
1647 /* Prepare to write out the PCH data to file F. */
1649 void
1650 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1651 FILE *f ATTRIBUTE_UNUSED)
1653 /* Nothing to do. */
1656 /* Write out object X of SIZE to file F. */
1658 void
1659 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1660 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1661 size_t size, bool is_string)
1663 if (!is_string)
1665 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1666 size = chunk->size;
1667 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1668 fatal_error ("can't write PCH file: %m");
1669 d->written += size + CHUNK_OVERHEAD;
1671 else
1673 if (fwrite (x, size, 1, f) != 1)
1674 fatal_error ("can't write PCH file: %m");
1675 d->written += size;
1677 if (d->written == d->d.total
1678 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1679 fatal_error ("can't write PCH file: %m");
1682 void
1683 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1685 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1686 fatal_error ("can't write PCH file: %m");
1687 free (d);
1691 void
1692 ggc_pch_read (FILE *f, void *addr)
1694 struct ggc_pch_ondisk d;
1695 struct page_entry *entry;
1696 char *pte;
1697 if (fread (&d, sizeof (d), 1, f) != 1)
1698 fatal_error ("can't read PCH file: %m");
1699 entry = xcalloc (1, sizeof (struct page_entry));
1700 entry->bytes = d.total;
1701 entry->page = addr;
1702 entry->context_depth = 0;
1703 entry->zone = &main_zone;
1704 for (pte = entry->page;
1705 pte < entry->page + entry->bytes;
1706 pte += G.pagesize)
1707 set_page_table_entry (pte, entry);