* ggc-zone.c: Follow spelling conventions.
[official-gcc.git] / gcc / ggc-zone.c
blobced786bd23576234165cc247b24b14990dd99112
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "toplev.h"
31 #include "varray.h"
32 #include "flags.h"
33 #include "ggc.h"
34 #include "timevar.h"
35 #include "params.h"
36 #include "bitmap.h"
38 #ifdef ENABLE_VALGRIND_CHECKING
39 # ifdef HAVE_VALGRIND_MEMCHECK_H
40 # include <valgrind/memcheck.h>
41 # elif defined HAVE_MEMCHECK_H
42 # include <memcheck.h>
43 # else
44 # include <valgrind.h>
45 # endif
46 #else
47 /* Avoid #ifdef:s when we can help it. */
48 #define VALGRIND_DISCARD(x)
49 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
50 #define VALGRIND_FREELIKE_BLOCK(x,y)
51 #endif
52 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
53 file open. Prefer either to valloc. */
54 #ifdef HAVE_MMAP_ANON
55 # undef HAVE_MMAP_DEV_ZERO
57 # include <sys/mman.h>
58 # ifndef MAP_FAILED
59 # define MAP_FAILED -1
60 # endif
61 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
62 # define MAP_ANONYMOUS MAP_ANON
63 # endif
64 # define USING_MMAP
66 #endif
68 #ifdef HAVE_MMAP_DEV_ZERO
70 # include <sys/mman.h>
71 # ifndef MAP_FAILED
72 # define MAP_FAILED -1
73 # endif
74 # define USING_MMAP
76 #endif
78 #ifndef USING_MMAP
79 #define USING_MALLOC_PAGE_GROUPS
80 #endif
82 #if (GCC_VERSION < 3001)
83 #define prefetch(X) ((void) X)
84 #else
85 #define prefetch(X) __builtin_prefetch (X)
86 #endif
88 /* NOTES:
89 If we track inter-zone pointers, we can mark single zones at a
90 time.
91 If we have a zone where we guarantee no inter-zone pointers, we
92 could mark that zone seperately.
93 The garbage zone should not be marked, and we should return 1 in
94 ggc_set_mark for any object in the garbage zone, which cuts off
95 marking quickly. */
96 /* Stategy:
98 This garbage-collecting allocator segregates objects into zones.
99 It also segregates objects into "large" and "small" bins. Large
100 objects are greater or equal to page size.
102 Pages for small objects are broken up into chunks, each of which
103 are described by a struct alloc_chunk. One can walk over all
104 chunks on the page by adding the chunk size to the chunk's data
105 address. The free space for a page exists in the free chunk bins.
107 Each page-entry also has a context depth, which is used to track
108 pushing and popping of allocation contexts. Only objects allocated
109 in the current (highest-numbered) context may be collected.
111 Empty pages (of all sizes) are kept on a single page cache list,
112 and are considered first when new pages are required; they are
113 deallocated at the start of the next collection if they haven't
114 been recycled by then. */
116 /* Define GGC_DEBUG_LEVEL to print debugging information.
117 0: No debugging output.
118 1: GC statistics only.
119 2: Page-entry allocations/deallocations as well.
120 3: Object allocations as well.
121 4: Object marks as well. */
122 #define GGC_DEBUG_LEVEL (0)
124 #ifndef HOST_BITS_PER_PTR
125 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
126 #endif
127 #ifdef COOKIE_CHECKING
128 #define CHUNK_MAGIC 0x95321123
129 #define DEADCHUNK_MAGIC 0x12817317
130 #endif
132 /* This structure manages small chunks. When the chunk is free, it's
133 linked with other chunks via free_next. When the chunk is allocated,
134 the data starts at u. Large chunks are allocated one at a time to
135 their own page, and so don't come in here.
137 The "type" field is a placeholder for a future change to do
138 generational collection. At present it is 0 when free and
139 and 1 when allocated. */
141 struct alloc_chunk {
142 #ifdef COOKIE_CHECKING
143 unsigned int magic;
144 #endif
145 unsigned int type:1;
146 unsigned int typecode:15;
147 unsigned int size:15;
148 unsigned int mark:1;
149 union {
150 struct alloc_chunk *next_free;
151 char data[1];
153 /* Make sure the data is sufficiently aligned. */
154 HOST_WIDEST_INT align_i;
155 #ifdef HAVE_LONG_DOUBLE
156 long double align_d;
157 #else
158 double align_d;
159 #endif
160 } u;
161 } __attribute__ ((packed));
163 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
165 /* We maintain several bins of free lists for chunks for very small
166 objects. We never exhaustively search other bins -- if we don't
167 find one of the proper size, we allocate from the "larger" bin. */
169 /* Decreasing the number of free bins increases the time it takes to allocate.
170 Similar with increasing max_free_bin_size without increasing num_free_bins.
172 After much histogramming of allocation sizes and time spent on gc,
173 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
174 these were determined to be the optimal values. */
175 #define NUM_FREE_BINS 64
176 #define MAX_FREE_BIN_SIZE 256
177 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
178 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
179 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
181 /* Marker used as chunk->size for a large object. Should correspond
182 to the size of the bitfield above. */
183 #define LARGE_OBJECT_SIZE 0x7fff
185 /* We use this structure to determine the alignment required for
186 allocations. For power-of-two sized allocations, that's not a
187 problem, but it does matter for odd-sized allocations. */
189 struct max_alignment {
190 char c;
191 union {
192 HOST_WIDEST_INT i;
193 #ifdef HAVE_LONG_DOUBLE
194 long double d;
195 #else
196 double d;
197 #endif
198 } u;
201 /* The biggest alignment required. */
203 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
205 /* Compute the smallest nonnegative number which when added to X gives
206 a multiple of F. */
208 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
210 /* Compute the smallest multiple of F that is >= X. */
212 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
214 /* A two-level tree is used to look up the page-entry for a given
215 pointer. Two chunks of the pointer's bits are extracted to index
216 the first and second levels of the tree, as follows:
218 HOST_PAGE_SIZE_BITS
219 32 | |
220 msb +----------------+----+------+------+ lsb
221 | | |
222 PAGE_L1_BITS |
224 PAGE_L2_BITS
226 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
227 pages are aligned on system page boundaries. The next most
228 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
229 index values in the lookup table, respectively.
231 For 32-bit architectures and the settings below, there are no
232 leftover bits. For architectures with wider pointers, the lookup
233 tree points to a list of pages, which must be scanned to find the
234 correct one. */
236 #define PAGE_L1_BITS (8)
237 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
238 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
239 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
241 #define LOOKUP_L1(p) \
242 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
244 #define LOOKUP_L2(p) \
245 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
247 struct alloc_zone;
248 /* A page_entry records the status of an allocation page. */
249 typedef struct page_entry
251 /* The next page-entry with objects of the same size, or NULL if
252 this is the last page-entry. */
253 struct page_entry *next;
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
259 /* How many collections we've survived. */
260 size_t survived;
262 /* The address at which the memory is allocated. */
263 char *page;
265 #ifdef USING_MALLOC_PAGE_GROUPS
266 /* Back pointer to the page group this page came from. */
267 struct page_group *group;
268 #endif
270 /* Number of bytes on the page unallocated. Only used during
271 collection, and even then large pages merely set this nonzero. */
272 size_t bytes_free;
274 /* Context depth of this page. */
275 unsigned short context_depth;
277 /* Does this page contain small objects, or one large object? */
278 bool large_p;
280 /* The zone that this page entry belongs to. */
281 struct alloc_zone *zone;
282 } page_entry;
284 #ifdef USING_MALLOC_PAGE_GROUPS
285 /* A page_group describes a large allocation from malloc, from which
286 we parcel out aligned pages. */
287 typedef struct page_group
289 /* A linked list of all extant page groups. */
290 struct page_group *next;
292 /* The address we received from malloc. */
293 char *allocation;
295 /* The size of the block. */
296 size_t alloc_size;
298 /* A bitmask of pages in use. */
299 unsigned int in_use;
300 } page_group;
301 #endif
303 #if HOST_BITS_PER_PTR <= 32
305 /* On 32-bit hosts, we use a two level page table, as pictured above. */
306 typedef page_entry **page_table[PAGE_L1_SIZE];
308 #else
310 /* On 64-bit hosts, we use the same two level page tables plus a linked
311 list that disambiguates the top 32-bits. There will almost always be
312 exactly one entry in the list. */
313 typedef struct page_table_chain
315 struct page_table_chain *next;
316 size_t high_bits;
317 page_entry **table[PAGE_L1_SIZE];
318 } *page_table;
320 #endif
322 /* The global variables. */
323 static struct globals
325 /* The page lookup table. A single page can only belong to one
326 zone. This means free pages are zone-specific ATM. */
327 page_table lookup;
328 /* The linked list of zones. */
329 struct alloc_zone *zones;
331 /* The system's page size. */
332 size_t pagesize;
333 size_t lg_pagesize;
335 /* A file descriptor open to /dev/zero for reading. */
336 #if defined (HAVE_MMAP_DEV_ZERO)
337 int dev_zero_fd;
338 #endif
340 /* The file descriptor for debugging output. */
341 FILE *debug_file;
342 } G;
344 /* The zone allocation structure. */
345 struct alloc_zone
347 /* Name of the zone. */
348 const char *name;
350 /* Linked list of pages in a zone. */
351 page_entry *pages;
353 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
354 FREE_BIN_DELTA. All other chunks are in slot 0. */
355 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
357 /* Bytes currently allocated. */
358 size_t allocated;
360 /* Bytes currently allocated at the end of the last collection. */
361 size_t allocated_last_gc;
363 /* Total amount of memory mapped. */
364 size_t bytes_mapped;
366 /* Bit N set if any allocations have been done at context depth N. */
367 unsigned long context_depth_allocations;
369 /* Bit N set if any collections have been done at context depth N. */
370 unsigned long context_depth_collections;
372 /* The current depth in the context stack. */
373 unsigned short context_depth;
375 /* A cache of free system pages. */
376 page_entry *free_pages;
378 #ifdef USING_MALLOC_PAGE_GROUPS
379 page_group *page_groups;
380 #endif
382 /* Next zone in the linked list of zones. */
383 struct alloc_zone *next_zone;
385 /* Return true if this zone was collected during this collection. */
386 bool was_collected;
387 } main_zone;
389 struct alloc_zone *rtl_zone;
390 struct alloc_zone *garbage_zone;
391 struct alloc_zone *tree_zone;
393 /* Allocate pages in chunks of this size, to throttle calls to memory
394 allocation routines. The first page is used, the rest go onto the
395 free list. This cannot be larger than HOST_BITS_PER_INT for the
396 in_use bitmask for page_group. */
397 #define GGC_QUIRE_SIZE 16
399 static int ggc_allocated_p (const void *);
400 static page_entry *lookup_page_table_entry (const void *);
401 static void set_page_table_entry (void *, page_entry *);
402 #ifdef USING_MMAP
403 static char *alloc_anon (char *, size_t, struct alloc_zone *);
404 #endif
405 #ifdef USING_MALLOC_PAGE_GROUPS
406 static size_t page_group_index (char *, char *);
407 static void set_page_group_in_use (page_group *, char *);
408 static void clear_page_group_in_use (page_group *, char *);
409 #endif
410 static struct page_entry * alloc_small_page ( struct alloc_zone *);
411 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
412 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
413 static void free_page (struct page_entry *);
414 static void release_pages (struct alloc_zone *);
415 static void sweep_pages (struct alloc_zone *);
416 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
417 static bool ggc_collect_1 (struct alloc_zone *, bool);
418 static void check_cookies (void);
421 /* Returns nonzero if P was allocated in GC'able memory. */
423 static inline int
424 ggc_allocated_p (const void *p)
426 page_entry ***base;
427 size_t L1, L2;
429 #if HOST_BITS_PER_PTR <= 32
430 base = &G.lookup[0];
431 #else
432 page_table table = G.lookup;
433 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
434 while (1)
436 if (table == NULL)
437 return 0;
438 if (table->high_bits == high_bits)
439 break;
440 table = table->next;
442 base = &table->table[0];
443 #endif
445 /* Extract the level 1 and 2 indices. */
446 L1 = LOOKUP_L1 (p);
447 L2 = LOOKUP_L2 (p);
449 return base[L1] && base[L1][L2];
452 /* Traverse the page table and find the entry for a page.
453 Die (probably) if the object wasn't allocated via GC. */
455 static inline page_entry *
456 lookup_page_table_entry(const void *p)
458 page_entry ***base;
459 size_t L1, L2;
461 #if HOST_BITS_PER_PTR <= 32
462 base = &G.lookup[0];
463 #else
464 page_table table = G.lookup;
465 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
466 while (table->high_bits != high_bits)
467 table = table->next;
468 base = &table->table[0];
469 #endif
471 /* Extract the level 1 and 2 indices. */
472 L1 = LOOKUP_L1 (p);
473 L2 = LOOKUP_L2 (p);
475 return base[L1][L2];
479 /* Set the page table entry for a page. */
481 static void
482 set_page_table_entry(void *p, page_entry *entry)
484 page_entry ***base;
485 size_t L1, L2;
487 #if HOST_BITS_PER_PTR <= 32
488 base = &G.lookup[0];
489 #else
490 page_table table;
491 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
492 for (table = G.lookup; table; table = table->next)
493 if (table->high_bits == high_bits)
494 goto found;
496 /* Not found -- allocate a new table. */
497 table = (page_table) xcalloc (1, sizeof(*table));
498 table->next = G.lookup;
499 table->high_bits = high_bits;
500 G.lookup = table;
501 found:
502 base = &table->table[0];
503 #endif
505 /* Extract the level 1 and 2 indices. */
506 L1 = LOOKUP_L1 (p);
507 L2 = LOOKUP_L2 (p);
509 if (base[L1] == NULL)
510 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
512 base[L1][L2] = entry;
515 #ifdef USING_MMAP
516 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
517 (if non-null). The ifdef structure here is intended to cause a
518 compile error unless exactly one of the HAVE_* is defined. */
520 static inline char *
521 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
523 #ifdef HAVE_MMAP_ANON
524 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
525 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
526 #endif
527 #ifdef HAVE_MMAP_DEV_ZERO
528 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
529 MAP_PRIVATE, G.dev_zero_fd, 0);
530 #endif
531 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
533 if (page == (char *) MAP_FAILED)
535 perror ("virtual memory exhausted");
536 exit (FATAL_EXIT_CODE);
539 /* Remember that we allocated this memory. */
540 zone->bytes_mapped += size;
541 /* Pretend we don't have access to the allocated pages. We'll enable
542 access to smaller pieces of the area in ggc_alloc. Discard the
543 handle to avoid handle leak. */
544 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
545 return page;
547 #endif
548 #ifdef USING_MALLOC_PAGE_GROUPS
549 /* Compute the index for this page into the page group. */
551 static inline size_t
552 page_group_index (char *allocation, char *page)
554 return (size_t) (page - allocation) >> G.lg_pagesize;
557 /* Set and clear the in_use bit for this page in the page group. */
559 static inline void
560 set_page_group_in_use (page_group *group, char *page)
562 group->in_use |= 1 << page_group_index (group->allocation, page);
565 static inline void
566 clear_page_group_in_use (page_group *group, char *page)
568 group->in_use &= ~(1 << page_group_index (group->allocation, page));
570 #endif
572 /* Allocate a new page for allocating objects of size 2^ORDER,
573 and return an entry for it. The entry is not added to the
574 appropriate page_table list. */
576 static inline struct page_entry *
577 alloc_small_page (struct alloc_zone *zone)
579 struct page_entry *entry;
580 char *page;
581 #ifdef USING_MALLOC_PAGE_GROUPS
582 page_group *group;
583 #endif
585 page = NULL;
587 /* Check the list of free pages for one we can use. */
588 entry = zone->free_pages;
589 if (entry != NULL)
591 /* Recycle the allocated memory from this page ... */
592 zone->free_pages = entry->next;
593 page = entry->page;
595 #ifdef USING_MALLOC_PAGE_GROUPS
596 group = entry->group;
597 #endif
599 #ifdef USING_MMAP
600 else
602 /* We want just one page. Allocate a bunch of them and put the
603 extras on the freelist. (Can only do this optimization with
604 mmap for backing store.) */
605 struct page_entry *e, *f = zone->free_pages;
606 int i;
608 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
610 /* This loop counts down so that the chain will be in ascending
611 memory order. */
612 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
614 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
615 e->bytes = G.pagesize;
616 e->page = page + (i << G.lg_pagesize);
617 e->next = f;
618 f = e;
621 zone->free_pages = f;
623 #endif
624 #ifdef USING_MALLOC_PAGE_GROUPS
625 else
627 /* Allocate a large block of memory and serve out the aligned
628 pages therein. This results in much less memory wastage
629 than the traditional implementation of valloc. */
631 char *allocation, *a, *enda;
632 size_t alloc_size, head_slop, tail_slop;
633 int multiple_pages = (entry_size == G.pagesize);
635 if (multiple_pages)
636 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
637 else
638 alloc_size = entry_size + G.pagesize - 1;
639 allocation = xmalloc (alloc_size);
640 VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
642 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
643 head_slop = page - allocation;
644 if (multiple_pages)
645 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
646 else
647 tail_slop = alloc_size - entry_size - head_slop;
648 enda = allocation + alloc_size - tail_slop;
650 /* We allocated N pages, which are likely not aligned, leaving
651 us with N-1 usable pages. We plan to place the page_group
652 structure somewhere in the slop. */
653 if (head_slop >= sizeof (page_group))
654 group = (page_group *)page - 1;
655 else
657 /* We magically got an aligned allocation. Too bad, we have
658 to waste a page anyway. */
659 if (tail_slop == 0)
661 enda -= G.pagesize;
662 tail_slop += G.pagesize;
664 if (tail_slop < sizeof (page_group))
665 abort ();
666 group = (page_group *)enda;
667 tail_slop -= sizeof (page_group);
670 /* Remember that we allocated this memory. */
671 group->next = G.page_groups;
672 group->allocation = allocation;
673 group->alloc_size = alloc_size;
674 group->in_use = 0;
675 zone->page_groups = group;
676 G.bytes_mapped += alloc_size;
678 /* If we allocated multiple pages, put the rest on the free list. */
679 if (multiple_pages)
681 struct page_entry *e, *f = G.free_pages;
682 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
684 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
685 e->bytes = G.pagesize;
686 e->page = a;
687 e->group = group;
688 e->next = f;
689 f = e;
691 zone->free_pages = f;
694 #endif
696 if (entry == NULL)
697 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
699 entry->next = 0;
700 entry->bytes = G.pagesize;
701 entry->bytes_free = G.pagesize;
702 entry->page = page;
703 entry->context_depth = zone->context_depth;
704 entry->large_p = false;
705 entry->zone = zone;
706 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
708 #ifdef USING_MALLOC_PAGE_GROUPS
709 entry->group = group;
710 set_page_group_in_use (group, page);
711 #endif
713 set_page_table_entry (page, entry);
715 if (GGC_DEBUG_LEVEL >= 2)
716 fprintf (G.debug_file,
717 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
718 (PTR) entry, page, page + G.pagesize - 1);
720 return entry;
723 /* Allocate a large page of size SIZE in ZONE. */
725 static inline struct page_entry *
726 alloc_large_page (size_t size, struct alloc_zone *zone)
728 struct page_entry *entry;
729 char *page;
731 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
732 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
734 entry->next = 0;
735 entry->bytes = size;
736 entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
737 entry->page = page;
738 entry->context_depth = zone->context_depth;
739 entry->large_p = true;
740 entry->zone = zone;
741 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
743 #ifdef USING_MALLOC_PAGE_GROUPS
744 entry->group = NULL;
745 #endif
746 set_page_table_entry (page, entry);
748 if (GGC_DEBUG_LEVEL >= 2)
749 fprintf (G.debug_file,
750 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
751 (PTR) entry, page, page + size - 1);
753 return entry;
757 /* For a page that is no longer needed, put it on the free page list. */
759 static inline void
760 free_page (page_entry *entry)
762 if (GGC_DEBUG_LEVEL >= 2)
763 fprintf (G.debug_file,
764 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
765 entry->page, entry->page + entry->bytes - 1);
767 set_page_table_entry (entry->page, NULL);
769 if (entry->large_p)
771 free (entry->page);
772 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
774 else
776 /* Mark the page as inaccessible. Discard the handle to
777 avoid handle leak. */
778 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
780 #ifdef USING_MALLOC_PAGE_GROUPS
781 clear_page_group_in_use (entry->group, entry->page);
782 #endif
784 entry->next = entry->zone->free_pages;
785 entry->zone->free_pages = entry;
789 /* Release the free page cache to the system. */
791 static void
792 release_pages (struct alloc_zone *zone)
794 #ifdef USING_MMAP
795 page_entry *p, *next;
796 char *start;
797 size_t len;
799 /* Gather up adjacent pages so they are unmapped together. */
800 p = zone->free_pages;
802 while (p)
804 start = p->page;
805 next = p->next;
806 len = p->bytes;
807 free (p);
808 p = next;
810 while (p && p->page == start + len)
812 next = p->next;
813 len += p->bytes;
814 free (p);
815 p = next;
818 munmap (start, len);
819 zone->bytes_mapped -= len;
822 zone->free_pages = NULL;
823 #endif
824 #ifdef USING_MALLOC_PAGE_GROUPS
825 page_entry **pp, *p;
826 page_group **gp, *g;
828 /* Remove all pages from free page groups from the list. */
829 pp = &(zone->free_pages);
830 while ((p = *pp) != NULL)
831 if (p->group->in_use == 0)
833 *pp = p->next;
834 free (p);
836 else
837 pp = &p->next;
839 /* Remove all free page groups, and release the storage. */
840 gp = &(zone->page_groups);
841 while ((g = *gp) != NULL)
842 if (g->in_use == 0)
844 *gp = g->next;
845 zone->bytes_mapped -= g->alloc_size;
846 free (g->allocation);
847 VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
849 else
850 gp = &g->next;
851 #endif
854 /* Place CHUNK of size SIZE on the free list for ZONE. */
856 static inline void
857 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
859 size_t bin = 0;
861 bin = SIZE_BIN_DOWN (size);
862 if (bin == 0)
863 abort ();
864 if (bin > NUM_FREE_BINS)
865 bin = 0;
866 #ifdef COOKIE_CHECKING
867 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
868 abort ();
869 chunk->magic = DEADCHUNK_MAGIC;
870 #endif
871 chunk->u.next_free = zone->free_chunks[bin];
872 zone->free_chunks[bin] = chunk;
873 if (GGC_DEBUG_LEVEL >= 3)
874 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
875 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
878 /* Allocate a chunk of memory of SIZE bytes. */
880 static void *
881 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
883 size_t bin = 0;
884 size_t lsize = 0;
885 struct page_entry *entry;
886 struct alloc_chunk *chunk, *lchunk, **pp;
887 void *result;
889 /* Align size, so that we're assured of aligned allocations. */
890 if (size < FREE_BIN_DELTA)
891 size = FREE_BIN_DELTA;
892 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
894 /* Large objects are handled specially. */
895 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
897 entry = alloc_large_page (size, zone);
898 entry->survived = 0;
899 entry->next = entry->zone->pages;
900 entry->zone->pages = entry;
903 chunk = (struct alloc_chunk *) entry->page;
904 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
905 chunk->size = LARGE_OBJECT_SIZE;
907 goto found;
910 /* First look for a tiny object already segregated into its own
911 size bucket. */
912 bin = SIZE_BIN_UP (size);
913 if (bin <= NUM_FREE_BINS)
915 chunk = zone->free_chunks[bin];
916 if (chunk)
918 zone->free_chunks[bin] = chunk->u.next_free;
919 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
920 goto found;
924 /* Failing that, look through the "other" bucket for a chunk
925 that is large enough. */
926 pp = &(zone->free_chunks[0]);
927 chunk = *pp;
928 while (chunk && chunk->size < size)
930 pp = &chunk->u.next_free;
931 chunk = *pp;
934 /* Failing that, allocate new storage. */
935 if (!chunk)
937 entry = alloc_small_page (zone);
938 entry->next = entry->zone->pages;
939 entry->zone->pages = entry;
941 chunk = (struct alloc_chunk *) entry->page;
942 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
943 chunk->size = G.pagesize - CHUNK_OVERHEAD;
945 else
947 *pp = chunk->u.next_free;
948 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
950 /* Release extra memory from a chunk that's too big. */
951 lsize = chunk->size - size;
952 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
954 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
955 chunk->size = size;
957 lsize -= CHUNK_OVERHEAD;
958 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
959 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
960 #ifdef COOKIE_CHECKING
961 lchunk->magic = CHUNK_MAGIC;
962 #endif
963 lchunk->type = 0;
964 lchunk->mark = 0;
965 lchunk->size = lsize;
966 free_chunk (lchunk, lsize, zone);
968 /* Calculate the object's address. */
969 found:
970 #ifdef COOKIE_CHECKING
971 chunk->magic = CHUNK_MAGIC;
972 #endif
973 chunk->type = 1;
974 chunk->mark = 0;
975 chunk->typecode = type;
976 result = chunk->u.data;
978 #ifdef ENABLE_GC_CHECKING
979 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
980 exact same semantics in presence of memory bugs, regardless of
981 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
982 handle to avoid handle leak. */
983 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
985 /* `Poison' the entire allocated object. */
986 memset (result, 0xaf, size);
987 #endif
989 /* Tell Valgrind that the memory is there, but its content isn't
990 defined. The bytes at the end of the object are still marked
991 unaccessible. */
992 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
994 /* Keep track of how many bytes are being allocated. This
995 information is used in deciding when to collect. */
996 zone->allocated += size + CHUNK_OVERHEAD;
998 if (GGC_DEBUG_LEVEL >= 3)
999 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
1000 (void *)chunk, (unsigned long) size, result);
1002 return result;
1005 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1006 for that type. */
1008 void *
1009 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
1011 switch (gte)
1013 case gt_ggc_e_14lang_tree_node:
1014 return ggc_alloc_zone_1 (size, tree_zone, gte);
1016 case gt_ggc_e_7rtx_def:
1017 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1019 case gt_ggc_e_9rtvec_def:
1020 return ggc_alloc_zone_1 (size, rtl_zone, gte);
1022 default:
1023 return ggc_alloc_zone_1 (size, &main_zone, gte);
1027 /* Normal ggc_alloc simply allocates into the main zone. */
1029 void *
1030 ggc_alloc (size_t size)
1032 return ggc_alloc_zone_1 (size, &main_zone, -1);
1035 /* Zone allocation allocates into the specified zone. */
1037 void *
1038 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
1040 return ggc_alloc_zone_1 (size, zone, -1);
1043 /* If P is not marked, mark it and return false. Otherwise return true.
1044 P must have been allocated by the GC allocator; it mustn't point to
1045 static objects, stack variables, or memory allocated with malloc. */
1048 ggc_set_mark (const void *p)
1050 page_entry *entry;
1051 struct alloc_chunk *chunk;
1053 #ifdef ENABLE_CHECKING
1054 /* Look up the page on which the object is alloced. If the object
1055 wasn't allocated by the collector, we'll probably die. */
1056 entry = lookup_page_table_entry (p);
1057 if (entry == NULL)
1058 abort ();
1059 #endif
1060 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1061 #ifdef COOKIE_CHECKING
1062 if (chunk->magic != CHUNK_MAGIC)
1063 abort ();
1064 #endif
1065 if (chunk->mark)
1066 return 1;
1067 chunk->mark = 1;
1069 #ifndef ENABLE_CHECKING
1070 entry = lookup_page_table_entry (p);
1071 #endif
1073 /* Large pages are either completely full or completely empty. So if
1074 they are marked, they are completely full. */
1075 if (entry->large_p)
1076 entry->bytes_free = 0;
1077 else
1078 entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
1080 if (GGC_DEBUG_LEVEL >= 4)
1081 fprintf (G.debug_file, "Marking %p\n", p);
1083 return 0;
1086 /* Return 1 if P has been marked, zero otherwise.
1087 P must have been allocated by the GC allocator; it mustn't point to
1088 static objects, stack variables, or memory allocated with malloc. */
1091 ggc_marked_p (const void *p)
1093 struct alloc_chunk *chunk;
1095 #ifdef ENABLE_CHECKING
1097 page_entry *entry = lookup_page_table_entry (p);
1098 if (entry == NULL)
1099 abort ();
1101 #endif
1103 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1104 #ifdef COOKIE_CHECKING
1105 if (chunk->magic != CHUNK_MAGIC)
1106 abort ();
1107 #endif
1108 return chunk->mark;
1111 /* Return the size of the gc-able object P. */
1113 size_t
1114 ggc_get_size (const void *p)
1116 struct alloc_chunk *chunk;
1117 struct page_entry *entry;
1119 #ifdef ENABLE_CHECKING
1120 entry = lookup_page_table_entry (p);
1121 if (entry == NULL)
1122 abort ();
1123 #endif
1125 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
1126 #ifdef COOKIE_CHECKING
1127 if (chunk->magic != CHUNK_MAGIC)
1128 abort ();
1129 #endif
1130 if (chunk->size == LARGE_OBJECT_SIZE)
1132 #ifndef ENABLE_CHECKING
1133 entry = lookup_page_table_entry (p);
1134 #endif
1135 return entry->bytes;
1138 return chunk->size;
1141 /* Initialize the ggc-zone-mmap allocator. */
1142 void
1143 init_ggc (void)
1145 /* Set up the main zone by hand. */
1146 main_zone.name = "Main zone";
1147 G.zones = &main_zone;
1149 /* Allocate the default zones. */
1150 rtl_zone = new_ggc_zone ("RTL zone");
1151 tree_zone = new_ggc_zone ("Tree zone");
1152 garbage_zone = new_ggc_zone ("Garbage zone");
1154 G.pagesize = getpagesize();
1155 G.lg_pagesize = exact_log2 (G.pagesize);
1156 #ifdef HAVE_MMAP_DEV_ZERO
1157 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1158 if (G.dev_zero_fd == -1)
1159 abort ();
1160 #endif
1162 #if 0
1163 G.debug_file = fopen ("ggc-mmap.debug", "w");
1164 setlinebuf (G.debug_file);
1165 #else
1166 G.debug_file = stdout;
1167 #endif
1169 #ifdef USING_MMAP
1170 /* StunOS has an amazing off-by-one error for the first mmap allocation
1171 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1172 believe, is an unaligned page allocation, which would cause us to
1173 hork badly if we tried to use it. */
1175 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1176 struct page_entry *e;
1177 if ((size_t)p & (G.pagesize - 1))
1179 /* How losing. Discard this one and try another. If we still
1180 can't get something useful, give up. */
1182 p = alloc_anon (NULL, G.pagesize, &main_zone);
1183 if ((size_t)p & (G.pagesize - 1))
1184 abort ();
1187 /* We have a good page, might as well hold onto it... */
1188 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
1189 e->bytes = G.pagesize;
1190 e->page = p;
1191 e->next = main_zone.free_pages;
1192 main_zone.free_pages = e;
1194 #endif
1197 /* Start a new GGC zone. */
1199 struct alloc_zone *
1200 new_ggc_zone (const char * name)
1202 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1203 new_zone->name = name;
1204 new_zone->next_zone = G.zones->next_zone;
1205 G.zones->next_zone = new_zone;
1206 return new_zone;
1209 /* Destroy a GGC zone. */
1210 void
1211 destroy_ggc_zone (struct alloc_zone * dead_zone)
1213 struct alloc_zone *z;
1215 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1216 /* Just find that zone. */ ;
1218 /* We should have found the zone in the list. Anything else is
1219 fatal.
1220 If we did find the zone, we expect this zone to be empty.
1221 A ggc_collect should have emptied it before we can destroy it. */
1222 if (!z || dead_zone->allocated != 0)
1223 abort ();
1225 /* Unchain the dead zone, release all its pages and free it. */
1226 z->next_zone = z->next_zone->next_zone;
1227 release_pages (dead_zone);
1228 free (dead_zone);
1231 /* Increment the `GC context'. Objects allocated in an outer context
1232 are never freed, eliminating the need to register their roots. */
1234 void
1235 ggc_push_context (void)
1237 struct alloc_zone *zone;
1238 for (zone = G.zones; zone; zone = zone->next_zone)
1239 ++(zone->context_depth);
1240 /* Die on wrap. */
1241 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
1242 abort ();
1245 /* Decrement the `GC context'. All objects allocated since the
1246 previous ggc_push_context are migrated to the outer context. */
1248 static void
1249 ggc_pop_context_1 (struct alloc_zone *zone)
1251 unsigned long omask;
1252 unsigned depth;
1253 page_entry *p;
1255 depth = --(zone->context_depth);
1256 omask = (unsigned long)1 << (depth + 1);
1258 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
1259 return;
1261 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
1262 zone->context_depth_allocations &= omask - 1;
1263 zone->context_depth_collections &= omask - 1;
1265 /* Any remaining pages in the popped context are lowered to the new
1266 current context; i.e. objects allocated in the popped context and
1267 left over are imported into the previous context. */
1268 for (p = zone->pages; p != NULL; p = p->next)
1269 if (p->context_depth > depth)
1270 p->context_depth = depth;
1273 /* Pop all the zone contexts. */
1275 void
1276 ggc_pop_context (void)
1278 struct alloc_zone *zone;
1279 for (zone = G.zones; zone; zone = zone->next_zone)
1280 ggc_pop_context_1 (zone);
1284 /* Poison the chunk. */
1285 #ifdef ENABLE_GC_CHECKING
1286 #define poison_chunk(CHUNK, SIZE) \
1287 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1288 #else
1289 #define poison_chunk(CHUNK, SIZE)
1290 #endif
1292 /* Free all empty pages and objects within a page for a given zone */
1294 static void
1295 sweep_pages (struct alloc_zone *zone)
1297 page_entry **pp, *p, *next;
1298 struct alloc_chunk *chunk, *last_free, *end;
1299 size_t last_free_size, allocated = 0;
1301 /* First, reset the free_chunks lists, since we are going to
1302 re-free free chunks in hopes of coalescing them into large chunks. */
1303 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1304 pp = &zone->pages;
1305 for (p = zone->pages; p ; p = next)
1307 next = p->next;
1309 /* For empty pages, just free the page. */
1310 if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
1312 *pp = next;
1313 #ifdef ENABLE_GC_CHECKING
1314 /* Poison the page. */
1315 memset (p->page, 0xb5, p->bytes);
1316 #endif
1317 free_page (p);
1318 continue;
1321 /* Large pages are all or none affairs. Either they are
1322 completely empty, or they are completely full.
1323 Thus, if the above didn't catch it, we need not do anything
1324 except remove the mark and reset the bytes_free.
1326 XXX: Should we bother to increment allocated. */
1327 else if (p->large_p)
1329 p->bytes_free = p->bytes;
1330 ((struct alloc_chunk *)p->page)->mark = 0;
1331 continue;
1333 pp = &p->next;
1335 /* This page has now survived another collection. */
1336 p->survived++;
1338 /* Which leaves full and partial pages. Step through all chunks,
1339 consolidate those that are free and insert them into the free
1340 lists. Note that consolidation slows down collection
1341 slightly. */
1343 chunk = (struct alloc_chunk *)p->page;
1344 end = (struct alloc_chunk *)(p->page + G.pagesize);
1345 last_free = NULL;
1346 last_free_size = 0;
1350 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1351 if (chunk->mark || p->context_depth < zone->context_depth)
1353 if (last_free)
1355 last_free->type = 0;
1356 last_free->size = last_free_size;
1357 last_free->mark = 0;
1358 poison_chunk (last_free, last_free_size);
1359 free_chunk (last_free, last_free_size, zone);
1360 last_free = NULL;
1362 if (chunk->mark)
1364 allocated += chunk->size + CHUNK_OVERHEAD;
1365 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1367 chunk->mark = 0;
1368 #ifdef ENABLE_CHECKING
1369 if (p->bytes_free > p->bytes)
1370 abort ();
1371 #endif
1373 else
1375 if (last_free)
1377 last_free_size += CHUNK_OVERHEAD + chunk->size;
1379 else
1381 last_free = chunk;
1382 last_free_size = chunk->size;
1386 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1388 while (chunk < end);
1390 if (last_free)
1392 last_free->type = 0;
1393 last_free->size = last_free_size;
1394 last_free->mark = 0;
1395 poison_chunk (last_free, last_free_size);
1396 free_chunk (last_free, last_free_size, zone);
1400 zone->allocated = allocated;
1403 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1404 is true if we need to mark before sweeping, false if some other
1405 zone collection has already performed marking for us. Returns true
1406 if we collected, false otherwise. */
1408 static bool
1409 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1411 /* Avoid frequent unnecessary work by skipping collection if the
1412 total allocations haven't expanded much since the last
1413 collection. */
1414 float allocated_last_gc =
1415 MAX (zone->allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1417 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1419 if (zone->allocated < allocated_last_gc + min_expand)
1420 return false;
1422 if (!quiet_flag)
1423 fprintf (stderr, " {%s GC %luk -> ", zone->name, (unsigned long) zone->allocated / 1024);
1425 /* Zero the total allocated bytes. This will be recalculated in the
1426 sweep phase. */
1427 zone->allocated = 0;
1429 /* Release the pages we freed the last time we collected, but didn't
1430 reuse in the interim. */
1431 release_pages (zone);
1433 /* Indicate that we've seen collections at this context depth. */
1434 zone->context_depth_collections
1435 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1436 if (need_marking)
1437 ggc_mark_roots ();
1438 sweep_pages (zone);
1439 zone->was_collected = true;
1440 zone->allocated_last_gc = zone->allocated;
1443 if (!quiet_flag)
1444 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1445 return true;
1448 /* Calculate the average page survival rate in terms of number of
1449 collections. */
1451 static float
1452 calculate_average_page_survival (struct alloc_zone *zone)
1454 float count = 0.0;
1455 float survival = 0.0;
1456 page_entry *p;
1457 for (p = zone->pages; p; p = p->next)
1459 count += 1.0;
1460 survival += p->survived;
1462 return survival/count;
1465 /* Check the magic cookies all of the chunks contain, to make sure we
1466 aren't doing anything stupid, like stomping on alloc_chunk
1467 structures. */
1469 static inline void
1470 check_cookies (void)
1472 #ifdef COOKIE_CHECKING
1473 page_entry *p;
1474 struct alloc_zone *zone;
1476 for (zone = G.zones; zone; zone = zone->next_zone)
1478 for (p = zone->pages; p; p = p->next)
1480 if (!p->large_p)
1482 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1483 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1486 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1487 abort ();
1488 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1490 while (chunk < end);
1494 #endif
1498 /* Top level collection routine. */
1500 void
1501 ggc_collect (void)
1503 struct alloc_zone *zone;
1504 bool marked = false;
1505 float f;
1507 timevar_push (TV_GC);
1508 check_cookies ();
1509 /* Start by possibly collecting the main zone. */
1510 main_zone.was_collected = false;
1511 marked |= ggc_collect_1 (&main_zone, true);
1513 /* In order to keep the number of collections down, we don't
1514 collect other zones unless we are collecting the main zone. This
1515 gives us roughly the same number of collections as we used to
1516 have with the old gc. The number of collection is important
1517 because our main slowdown (according to profiling) is now in
1518 marking. So if we mark twice as often as we used to, we'll be
1519 twice as slow. Hopefully we'll avoid this cost when we mark
1520 zone-at-a-time. */
1522 if (main_zone.was_collected)
1524 struct alloc_zone *zone;
1526 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1528 check_cookies ();
1529 zone->was_collected = false;
1530 marked |= ggc_collect_1 (zone, !marked);
1534 /* Print page survival stats, if someone wants them. */
1535 if (GGC_DEBUG_LEVEL >= 2)
1537 for (zone = G.zones; zone; zone = zone->next_zone)
1539 if (zone->was_collected)
1541 f = calculate_average_page_survival (zone);
1542 printf ("Average page survival in zone `%s' is %f\n",
1543 zone->name, f);
1548 /* Since we don't mark zone at a time right now, marking in any
1549 zone means marking in every zone. So we have to clear all the
1550 marks in all the zones that weren't collected already. */
1551 if (marked)
1553 page_entry *p;
1554 for (zone = G.zones; zone; zone = zone->next_zone)
1556 if (zone->was_collected)
1557 continue;
1558 for (p = zone->pages; p; p = p->next)
1560 if (!p->large_p)
1562 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1563 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1566 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1567 if (chunk->mark || p->context_depth < zone->context_depth)
1569 if (chunk->mark)
1570 p->bytes_free += chunk->size + CHUNK_OVERHEAD;
1571 #ifdef ENABLE_CHECKING
1572 if (p->bytes_free > p->bytes)
1573 abort ();
1574 #endif
1575 chunk->mark = 0;
1577 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1579 while (chunk < end);
1581 else
1583 p->bytes_free = p->bytes;
1584 ((struct alloc_chunk *)p->page)->mark = 0;
1589 timevar_pop (TV_GC);
1592 /* Print allocation statistics. */
1594 void
1595 ggc_print_statistics (void)
1599 struct ggc_pch_data
1601 struct ggc_pch_ondisk
1603 unsigned total;
1604 } d;
1605 size_t base;
1606 size_t written;
1609 /* Initialize the PCH datastructure. */
1611 struct ggc_pch_data *
1612 init_ggc_pch (void)
1614 return xcalloc (sizeof (struct ggc_pch_data), 1);
1617 /* Add the size of object X to the size of the PCH data. */
1619 void
1620 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1621 size_t size, bool is_string)
1623 if (!is_string)
1625 d->d.total += size + CHUNK_OVERHEAD;
1627 else
1628 d->d.total += size;
1631 /* Return the total size of the PCH data. */
1633 size_t
1634 ggc_pch_total_size (struct ggc_pch_data *d)
1636 return d->d.total;
1639 /* Set the base address for the objects in the PCH file. */
1641 void
1642 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1644 d->base = (size_t) base;
1647 /* Allocate a place for object X of size SIZE in the PCH file. */
1649 char *
1650 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1651 size_t size, bool is_string)
1653 char *result;
1654 result = (char *)d->base;
1655 if (!is_string)
1657 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1658 if (chunk->size == LARGE_OBJECT_SIZE)
1659 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1660 else
1661 d->base += chunk->size + CHUNK_OVERHEAD;
1662 return result + CHUNK_OVERHEAD;
1664 else
1666 d->base += size;
1667 return result;
1672 /* Prepare to write out the PCH data to file F. */
1674 void
1675 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1676 FILE *f ATTRIBUTE_UNUSED)
1678 /* Nothing to do. */
1681 /* Write out object X of SIZE to file F. */
1683 void
1684 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1685 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1686 size_t size, bool is_string)
1688 if (!is_string)
1690 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1691 size = ggc_get_size (x);
1692 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1693 fatal_error ("can't write PCH file: %m");
1694 d->written += size + CHUNK_OVERHEAD;
1696 else
1698 if (fwrite (x, size, 1, f) != 1)
1699 fatal_error ("can't write PCH file: %m");
1700 d->written += size;
1702 if (d->written == d->d.total
1703 && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1704 fatal_error ("can't write PCH file: %m");
1707 void
1708 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1710 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1711 fatal_error ("can't write PCH file: %m");
1712 free (d);
1716 void
1717 ggc_pch_read (FILE *f, void *addr)
1719 struct ggc_pch_ondisk d;
1720 struct page_entry *entry;
1721 char *pte;
1722 if (fread (&d, sizeof (d), 1, f) != 1)
1723 fatal_error ("can't read PCH file: %m");
1724 entry = xcalloc (1, sizeof (struct page_entry));
1725 entry->bytes = d.total;
1726 entry->page = addr;
1727 entry->context_depth = 0;
1728 entry->zone = &main_zone;
1729 for (pte = entry->page;
1730 pte < entry->page + entry->bytes;
1731 pte += G.pagesize)
1732 set_page_table_entry (pte, entry);