PR c++/6749
[official-gcc.git] / gcc / ggc-zone.c
blobfc605f49045472fc3c21d03011893d1b12bce584
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
45 # else
46 # include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
68 #endif
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
78 #endif
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
90 /* NOTES:
91 If we track inter-zone pointers, we can mark single zones at a
92 time.
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
97 marking quickly. */
98 /* Stategy:
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
128 #endif
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
133 #endif
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
144 struct alloc_chunk {
145 #ifdef COOKIE_CHECKING
146 unsigned int magic;
147 #endif
148 unsigned int type:1;
149 unsigned int typecode:14;
150 unsigned int large:1;
151 unsigned int size:15;
152 unsigned int mark:1;
153 union {
154 struct alloc_chunk *next_free;
155 char data[1];
157 /* Make sure the data is sufficiently aligned. */
158 HOST_WIDEST_INT align_i;
159 #ifdef HAVE_LONG_DOUBLE
160 long double align_d;
161 #else
162 double align_d;
163 #endif
164 } u;
167 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
169 /* We maintain several bins of free lists for chunks for very small
170 objects. We never exhaustively search other bins -- if we don't
171 find one of the proper size, we allocate from the "larger" bin. */
173 /* Decreasing the number of free bins increases the time it takes to allocate.
174 Similar with increasing max_free_bin_size without increasing num_free_bins.
176 After much histogramming of allocation sizes and time spent on gc,
177 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
178 these were determined to be the optimal values. */
179 #define NUM_FREE_BINS 64
180 #define MAX_FREE_BIN_SIZE (64 * sizeof (void *))
181 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
182 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
183 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
185 /* Marker used as chunk->size for a large object. Should correspond
186 to the size of the bitfield above. */
187 #define LARGE_OBJECT_SIZE 0x7fff
189 /* We use this structure to determine the alignment required for
190 allocations. For power-of-two sized allocations, that's not a
191 problem, but it does matter for odd-sized allocations. */
193 struct max_alignment {
194 char c;
195 union {
196 HOST_WIDEST_INT i;
197 #ifdef HAVE_LONG_DOUBLE
198 long double d;
199 #else
200 double d;
201 #endif
202 } u;
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest nonnegative number which when added to X gives
210 a multiple of F. */
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Compute the smallest multiple of F that is >= X. */
216 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* A page_entry records the status of an allocation page. */
220 typedef struct page_entry
222 /* The next page-entry with objects of the same size, or NULL if
223 this is the last page-entry. */
224 struct page_entry *next;
226 /* The number of bytes allocated. (This will always be a multiple
227 of the host system page size.) */
228 size_t bytes;
230 /* How many collections we've survived. */
231 size_t survived;
233 /* The address at which the memory is allocated. */
234 char *page;
236 /* Context depth of this page. */
237 unsigned short context_depth;
239 /* Does this page contain small objects, or one large object? */
240 bool large_p;
242 /* The zone that this page entry belongs to. */
243 struct alloc_zone *zone;
244 } page_entry;
247 /* The global variables. */
248 static struct globals
250 /* The linked list of zones. */
251 struct alloc_zone *zones;
253 /* The system's page size. */
254 size_t pagesize;
255 size_t lg_pagesize;
257 /* A file descriptor open to /dev/zero for reading. */
258 #if defined (HAVE_MMAP_DEV_ZERO)
259 int dev_zero_fd;
260 #endif
262 /* The file descriptor for debugging output. */
263 FILE *debug_file;
264 } G;
266 /* The zone allocation structure. */
267 struct alloc_zone
269 /* Name of the zone. */
270 const char *name;
272 /* Linked list of pages in a zone. */
273 page_entry *pages;
275 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
276 FREE_BIN_DELTA. All other chunks are in slot 0. */
277 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
279 /* Bytes currently allocated. */
280 size_t allocated;
282 /* Bytes currently allocated at the end of the last collection. */
283 size_t allocated_last_gc;
285 /* Total amount of memory mapped. */
286 size_t bytes_mapped;
288 /* Bit N set if any allocations have been done at context depth N. */
289 unsigned long context_depth_allocations;
291 /* Bit N set if any collections have been done at context depth N. */
292 unsigned long context_depth_collections;
294 /* The current depth in the context stack. */
295 unsigned short context_depth;
297 /* A cache of free system pages. */
298 page_entry *free_pages;
300 /* Next zone in the linked list of zones. */
301 struct alloc_zone *next_zone;
303 /* True if this zone was collected during this collection. */
304 bool was_collected;
306 /* True if this zone should be destroyed after the next collection. */
307 bool dead;
309 #ifdef GATHER_STATISTICS
310 struct
312 /* Total memory allocated with ggc_alloc. */
313 unsigned long long total_allocated;
314 /* Total overhead for memory to be allocated with ggc_alloc. */
315 unsigned long long total_overhead;
317 /* Total allocations and overhead for sizes less than 32, 64 and 128.
318 These sizes are interesting because they are typical cache line
319 sizes. */
321 unsigned long long total_allocated_under32;
322 unsigned long long total_overhead_under32;
324 unsigned long long total_allocated_under64;
325 unsigned long long total_overhead_under64;
327 unsigned long long total_allocated_under128;
328 unsigned long long total_overhead_under128;
329 } stats;
330 #endif
331 } main_zone;
333 struct alloc_zone *rtl_zone;
334 struct alloc_zone *garbage_zone;
335 struct alloc_zone *tree_zone;
337 static int always_collect;
339 /* Allocate pages in chunks of this size, to throttle calls to memory
340 allocation routines. The first page is used, the rest go onto the
341 free list. This cannot be larger than HOST_BITS_PER_INT for the
342 in_use bitmask for page_group. */
343 #define GGC_QUIRE_SIZE 16
345 static int ggc_allocated_p (const void *);
346 #ifdef USING_MMAP
347 static char *alloc_anon (char *, size_t, struct alloc_zone *);
348 #endif
349 static struct page_entry * alloc_small_page ( struct alloc_zone *);
350 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
351 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
352 static void free_page (struct page_entry *);
353 static void release_pages (struct alloc_zone *);
354 static void sweep_pages (struct alloc_zone *);
355 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
356 static bool ggc_collect_1 (struct alloc_zone *, bool);
357 static void check_cookies (void);
360 /* Returns nonzero if P was allocated in GC'able memory. */
362 static inline int
363 ggc_allocated_p (const void *p)
365 struct alloc_chunk *chunk;
366 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
367 #ifdef COOKIE_CHECKING
368 if (chunk->magic != CHUNK_MAGIC)
369 abort ();
370 #endif
371 if (chunk->type == 1)
372 return true;
373 return false;
377 #ifdef USING_MMAP
378 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
379 (if non-null). The ifdef structure here is intended to cause a
380 compile error unless exactly one of the HAVE_* is defined. */
382 static inline char *
383 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
385 #ifdef HAVE_MMAP_ANON
386 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
388 #endif
389 #ifdef HAVE_MMAP_DEV_ZERO
390 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
391 MAP_PRIVATE, G.dev_zero_fd, 0);
392 #endif
393 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
395 if (page == (char *) MAP_FAILED)
397 perror ("virtual memory exhausted");
398 exit (FATAL_EXIT_CODE);
401 /* Remember that we allocated this memory. */
402 zone->bytes_mapped += size;
403 /* Pretend we don't have access to the allocated pages. We'll enable
404 access to smaller pieces of the area in ggc_alloc. Discard the
405 handle to avoid handle leak. */
406 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
407 return page;
409 #endif
411 /* Allocate a new page for allocating objects of size 2^ORDER,
412 and return an entry for it. */
414 static inline struct page_entry *
415 alloc_small_page (struct alloc_zone *zone)
417 struct page_entry *entry;
418 char *page;
420 page = NULL;
422 /* Check the list of free pages for one we can use. */
423 entry = zone->free_pages;
424 if (entry != NULL)
426 /* Recycle the allocated memory from this page ... */
427 zone->free_pages = entry->next;
428 page = entry->page;
432 #ifdef USING_MMAP
433 else
435 /* We want just one page. Allocate a bunch of them and put the
436 extras on the freelist. (Can only do this optimization with
437 mmap for backing store.) */
438 struct page_entry *e, *f = zone->free_pages;
439 int i;
441 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
443 /* This loop counts down so that the chain will be in ascending
444 memory order. */
445 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
447 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
448 e->bytes = G.pagesize;
449 e->page = page + (i << G.lg_pagesize);
450 e->next = f;
451 f = e;
454 zone->free_pages = f;
456 #endif
457 if (entry == NULL)
458 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
460 entry->next = 0;
461 entry->bytes = G.pagesize;
462 entry->page = page;
463 entry->context_depth = zone->context_depth;
464 entry->large_p = false;
465 entry->zone = zone;
466 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
468 if (GGC_DEBUG_LEVEL >= 2)
469 fprintf (G.debug_file,
470 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
471 (PTR) entry, page, page + G.pagesize - 1);
473 return entry;
475 /* Compute the smallest multiple of F that is >= X. */
477 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
479 /* Allocate a large page of size SIZE in ZONE. */
481 static inline struct page_entry *
482 alloc_large_page (size_t size, struct alloc_zone *zone)
484 struct page_entry *entry;
485 char *page;
486 size = ROUND_UP (size, 1024);
487 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
488 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
490 entry->next = 0;
491 entry->bytes = size;
492 entry->page = page;
493 entry->context_depth = zone->context_depth;
494 entry->large_p = true;
495 entry->zone = zone;
496 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
498 if (GGC_DEBUG_LEVEL >= 2)
499 fprintf (G.debug_file,
500 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
501 (PTR) entry, page, page + size - 1);
503 return entry;
507 /* For a page that is no longer needed, put it on the free page list. */
509 static inline void
510 free_page (page_entry *entry)
512 if (GGC_DEBUG_LEVEL >= 2)
513 fprintf (G.debug_file,
514 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
515 entry->page, entry->page + entry->bytes - 1);
517 if (entry->large_p)
519 free (entry->page);
520 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
522 else
524 /* Mark the page as inaccessible. Discard the handle to
525 avoid handle leak. */
526 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
528 entry->next = entry->zone->free_pages;
529 entry->zone->free_pages = entry;
533 /* Release the free page cache to the system. */
535 static void
536 release_pages (struct alloc_zone *zone)
538 #ifdef USING_MMAP
539 page_entry *p, *next;
540 char *start;
541 size_t len;
543 /* Gather up adjacent pages so they are unmapped together. */
544 p = zone->free_pages;
546 while (p)
548 start = p->page;
549 next = p->next;
550 len = p->bytes;
551 free (p);
552 p = next;
554 while (p && p->page == start + len)
556 next = p->next;
557 len += p->bytes;
558 free (p);
559 p = next;
562 munmap (start, len);
563 zone->bytes_mapped -= len;
566 zone->free_pages = NULL;
567 #endif
570 /* Place CHUNK of size SIZE on the free list for ZONE. */
572 static inline void
573 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
575 size_t bin = 0;
577 bin = SIZE_BIN_DOWN (size);
578 if (bin == 0)
579 abort ();
580 if (bin > NUM_FREE_BINS)
581 bin = 0;
582 #ifdef COOKIE_CHECKING
583 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
584 abort ();
585 chunk->magic = DEADCHUNK_MAGIC;
586 #endif
587 chunk->u.next_free = zone->free_chunks[bin];
588 zone->free_chunks[bin] = chunk;
589 if (GGC_DEBUG_LEVEL >= 3)
590 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
591 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
594 /* Allocate a chunk of memory of SIZE bytes. */
596 static void *
597 ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone, short type
598 MEM_STAT_DECL)
600 size_t bin = 0;
601 size_t lsize = 0;
602 struct page_entry *entry;
603 struct alloc_chunk *chunk, *lchunk, **pp;
604 void *result;
605 size_t size = orig_size;
607 /* Align size, so that we're assured of aligned allocations. */
608 if (size < FREE_BIN_DELTA)
609 size = FREE_BIN_DELTA;
610 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
612 /* Large objects are handled specially. */
613 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
615 size = ROUND_UP (size, 1024);
616 entry = alloc_large_page (size, zone);
617 entry->survived = 0;
618 entry->next = entry->zone->pages;
619 entry->zone->pages = entry;
621 chunk = (struct alloc_chunk *) entry->page;
622 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
623 chunk->large = 1;
624 chunk->size = CEIL (size, 1024);
626 goto found;
629 /* First look for a tiny object already segregated into its own
630 size bucket. */
631 bin = SIZE_BIN_UP (size);
632 if (bin <= NUM_FREE_BINS)
634 chunk = zone->free_chunks[bin];
635 if (chunk)
637 zone->free_chunks[bin] = chunk->u.next_free;
638 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
639 goto found;
643 /* Failing that, look through the "other" bucket for a chunk
644 that is large enough. */
645 pp = &(zone->free_chunks[0]);
646 chunk = *pp;
647 while (chunk && chunk->size < size)
649 pp = &chunk->u.next_free;
650 chunk = *pp;
653 /* Failing that, allocate new storage. */
654 if (!chunk)
656 entry = alloc_small_page (zone);
657 entry->next = entry->zone->pages;
658 entry->zone->pages = entry;
660 chunk = (struct alloc_chunk *) entry->page;
661 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
662 chunk->size = G.pagesize - CHUNK_OVERHEAD;
663 chunk->large = 0;
665 else
667 *pp = chunk->u.next_free;
668 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
669 chunk->large = 0;
671 /* Release extra memory from a chunk that's too big. */
672 lsize = chunk->size - size;
673 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
675 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
676 chunk->size = size;
678 lsize -= CHUNK_OVERHEAD;
679 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
680 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
681 #ifdef COOKIE_CHECKING
682 lchunk->magic = CHUNK_MAGIC;
683 #endif
684 lchunk->type = 0;
685 lchunk->mark = 0;
686 lchunk->size = lsize;
687 lchunk->large = 0;
688 free_chunk (lchunk, lsize, zone);
689 lsize = 0;
692 /* Calculate the object's address. */
693 found:
694 #ifdef COOKIE_CHECKING
695 chunk->magic = CHUNK_MAGIC;
696 #endif
697 chunk->type = 1;
698 chunk->mark = 0;
699 chunk->typecode = type;
700 result = chunk->u.data;
702 #ifdef ENABLE_GC_CHECKING
703 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
704 exact same semantics in presence of memory bugs, regardless of
705 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
706 handle to avoid handle leak. */
707 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
709 /* `Poison' the entire allocated object. */
710 memset (result, 0xaf, size);
711 #endif
713 /* Tell Valgrind that the memory is there, but its content isn't
714 defined. The bytes at the end of the object are still marked
715 unaccessible. */
716 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
718 /* Keep track of how many bytes are being allocated. This
719 information is used in deciding when to collect. */
720 zone->allocated += size;
722 #ifdef GATHER_STATISTICS
723 ggc_record_overhead (orig_size, size + CHUNK_OVERHEAD - orig_size PASS_MEM_STAT);
726 size_t object_size = size + CHUNK_OVERHEAD;
727 size_t overhead = object_size - orig_size;
729 zone->stats.total_overhead += overhead;
730 zone->stats.total_allocated += object_size;
732 if (orig_size <= 32)
734 zone->stats.total_overhead_under32 += overhead;
735 zone->stats.total_allocated_under32 += object_size;
737 if (orig_size <= 64)
739 zone->stats.total_overhead_under64 += overhead;
740 zone->stats.total_allocated_under64 += object_size;
742 if (orig_size <= 128)
744 zone->stats.total_overhead_under128 += overhead;
745 zone->stats.total_allocated_under128 += object_size;
748 #endif
750 if (GGC_DEBUG_LEVEL >= 3)
751 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
752 (void *)chunk, (unsigned long) size, result);
754 return result;
757 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
758 for that type. */
760 void *
761 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
762 MEM_STAT_DECL)
764 switch (gte)
766 case gt_ggc_e_14lang_tree_node:
767 return ggc_alloc_zone_1 (size, tree_zone, gte PASS_MEM_STAT);
769 case gt_ggc_e_7rtx_def:
770 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
772 case gt_ggc_e_9rtvec_def:
773 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
775 default:
776 return ggc_alloc_zone_1 (size, &main_zone, gte PASS_MEM_STAT);
780 /* Normal ggc_alloc simply allocates into the main zone. */
782 void *
783 ggc_alloc_stat (size_t size MEM_STAT_DECL)
785 return ggc_alloc_zone_1 (size, &main_zone, -1 PASS_MEM_STAT);
788 /* Zone allocation allocates into the specified zone. */
790 void *
791 ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
793 return ggc_alloc_zone_1 (size, zone, -1 PASS_MEM_STAT);
796 /* Poison the chunk. */
797 #ifdef ENABLE_GC_CHECKING
798 #define poison_chunk(CHUNK, SIZE) \
799 memset ((CHUNK)->u.data, 0xa5, (SIZE))
800 #else
801 #define poison_chunk(CHUNK, SIZE)
802 #endif
804 /* Free the object at P. */
806 void
807 ggc_free (void *p)
809 struct alloc_chunk *chunk;
811 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
813 /* Poison the chunk. */
814 poison_chunk (chunk, ggc_get_size (p));
817 /* If P is not marked, mark it and return false. Otherwise return true.
818 P must have been allocated by the GC allocator; it mustn't point to
819 static objects, stack variables, or memory allocated with malloc. */
822 ggc_set_mark (const void *p)
824 struct alloc_chunk *chunk;
826 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
827 #ifdef COOKIE_CHECKING
828 if (chunk->magic != CHUNK_MAGIC)
829 abort ();
830 #endif
831 if (chunk->mark)
832 return 1;
833 chunk->mark = 1;
835 if (GGC_DEBUG_LEVEL >= 4)
836 fprintf (G.debug_file, "Marking %p\n", p);
838 return 0;
841 /* Return 1 if P has been marked, zero otherwise.
842 P must have been allocated by the GC allocator; it mustn't point to
843 static objects, stack variables, or memory allocated with malloc. */
846 ggc_marked_p (const void *p)
848 struct alloc_chunk *chunk;
850 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
851 #ifdef COOKIE_CHECKING
852 if (chunk->magic != CHUNK_MAGIC)
853 abort ();
854 #endif
855 return chunk->mark;
858 /* Return the size of the gc-able object P. */
860 size_t
861 ggc_get_size (const void *p)
863 struct alloc_chunk *chunk;
865 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
866 #ifdef COOKIE_CHECKING
867 if (chunk->magic != CHUNK_MAGIC)
868 abort ();
869 #endif
870 if (chunk->large)
871 return chunk->size * 1024;
873 return chunk->size;
876 /* Initialize the ggc-zone-mmap allocator. */
877 void
878 init_ggc (void)
880 /* Set up the main zone by hand. */
881 main_zone.name = "Main zone";
882 G.zones = &main_zone;
884 /* Allocate the default zones. */
885 rtl_zone = new_ggc_zone ("RTL zone");
886 tree_zone = new_ggc_zone ("Tree zone");
887 garbage_zone = new_ggc_zone ("Garbage zone");
889 G.pagesize = getpagesize();
890 G.lg_pagesize = exact_log2 (G.pagesize);
891 #ifdef HAVE_MMAP_DEV_ZERO
892 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
893 if (G.dev_zero_fd == -1)
894 abort ();
895 #endif
897 #if 0
898 G.debug_file = fopen ("ggc-mmap.debug", "w");
899 setlinebuf (G.debug_file);
900 #else
901 G.debug_file = stdout;
902 #endif
904 #ifdef USING_MMAP
905 /* StunOS has an amazing off-by-one error for the first mmap allocation
906 after fiddling with RLIMIT_STACK. The result, as hard as it is to
907 believe, is an unaligned page allocation, which would cause us to
908 hork badly if we tried to use it. */
910 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
911 struct page_entry *e;
912 if ((size_t)p & (G.pagesize - 1))
914 /* How losing. Discard this one and try another. If we still
915 can't get something useful, give up. */
917 p = alloc_anon (NULL, G.pagesize, &main_zone);
918 if ((size_t)p & (G.pagesize - 1))
919 abort ();
922 /* We have a good page, might as well hold onto it... */
923 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
924 e->bytes = G.pagesize;
925 e->page = p;
926 e->next = main_zone.free_pages;
927 main_zone.free_pages = e;
929 #endif
932 /* Start a new GGC zone. */
934 struct alloc_zone *
935 new_ggc_zone (const char * name)
937 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
938 new_zone->name = name;
939 new_zone->next_zone = G.zones->next_zone;
940 G.zones->next_zone = new_zone;
941 return new_zone;
944 /* Destroy a GGC zone. */
945 void
946 destroy_ggc_zone (struct alloc_zone * dead_zone)
948 struct alloc_zone *z;
950 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
951 /* Just find that zone. */ ;
953 #ifdef ENABLE_CHECKING
954 /* We should have found the zone in the list. Anything else is fatal. */
955 if (!z)
956 abort ();
957 #endif
959 /* z is dead, baby. z is dead. */
960 z->dead= true;
963 /* Increment the `GC context'. Objects allocated in an outer context
964 are never freed, eliminating the need to register their roots. */
966 void
967 ggc_push_context (void)
969 struct alloc_zone *zone;
970 for (zone = G.zones; zone; zone = zone->next_zone)
971 ++(zone->context_depth);
972 /* Die on wrap. */
973 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
974 abort ();
977 /* Decrement the `GC context'. All objects allocated since the
978 previous ggc_push_context are migrated to the outer context. */
980 static void
981 ggc_pop_context_1 (struct alloc_zone *zone)
983 unsigned long omask;
984 unsigned depth;
985 page_entry *p;
987 depth = --(zone->context_depth);
988 omask = (unsigned long)1 << (depth + 1);
990 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
991 return;
993 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
994 zone->context_depth_allocations &= omask - 1;
995 zone->context_depth_collections &= omask - 1;
997 /* Any remaining pages in the popped context are lowered to the new
998 current context; i.e. objects allocated in the popped context and
999 left over are imported into the previous context. */
1000 for (p = zone->pages; p != NULL; p = p->next)
1001 if (p->context_depth > depth)
1002 p->context_depth = depth;
1005 /* Pop all the zone contexts. */
1007 void
1008 ggc_pop_context (void)
1010 struct alloc_zone *zone;
1011 for (zone = G.zones; zone; zone = zone->next_zone)
1012 ggc_pop_context_1 (zone);
1015 /* Free all empty pages and objects within a page for a given zone */
1017 static void
1018 sweep_pages (struct alloc_zone *zone)
1020 page_entry **pp, *p, *next;
1021 struct alloc_chunk *chunk, *last_free, *end;
1022 size_t last_free_size, allocated = 0;
1023 bool nomarksinpage;
1024 /* First, reset the free_chunks lists, since we are going to
1025 re-free free chunks in hopes of coalescing them into large chunks. */
1026 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1027 pp = &zone->pages;
1028 for (p = zone->pages; p ; p = next)
1030 next = p->next;
1031 /* Large pages are all or none affairs. Either they are
1032 completely empty, or they are completely full.
1034 XXX: Should we bother to increment allocated. */
1035 if (p->large_p)
1037 if (((struct alloc_chunk *)p->page)->mark == 1)
1039 ((struct alloc_chunk *)p->page)->mark = 0;
1040 allocated += p->bytes - CHUNK_OVERHEAD;
1041 pp = &p->next;
1043 else
1045 *pp = next;
1046 #ifdef ENABLE_GC_CHECKING
1047 /* Poison the page. */
1048 memset (p->page, 0xb5, p->bytes);
1049 #endif
1050 free_page (p);
1052 continue;
1055 /* This page has now survived another collection. */
1056 p->survived++;
1058 /* Which leaves full and partial pages. Step through all chunks,
1059 consolidate those that are free and insert them into the free
1060 lists. Note that consolidation slows down collection
1061 slightly. */
1063 chunk = (struct alloc_chunk *)p->page;
1064 end = (struct alloc_chunk *)(p->page + G.pagesize);
1065 last_free = NULL;
1066 last_free_size = 0;
1067 nomarksinpage = true;
1070 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1071 if (chunk->mark || p->context_depth < zone->context_depth)
1073 nomarksinpage = false;
1074 if (last_free)
1076 last_free->type = 0;
1077 last_free->size = last_free_size;
1078 last_free->mark = 0;
1079 poison_chunk (last_free, last_free_size);
1080 free_chunk (last_free, last_free_size, zone);
1081 last_free = NULL;
1083 if (chunk->mark)
1085 allocated += chunk->size;
1087 chunk->mark = 0;
1089 else
1091 if (last_free)
1093 last_free_size += CHUNK_OVERHEAD + chunk->size;
1095 else
1097 last_free = chunk;
1098 last_free_size = chunk->size;
1102 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1104 while (chunk < end);
1106 if (nomarksinpage)
1108 *pp = next;
1109 #ifdef ENABLE_GC_CHECKING
1110 /* Poison the page. */
1111 memset (p->page, 0xb5, p->bytes);
1112 #endif
1113 free_page (p);
1114 continue;
1116 else if (last_free)
1118 last_free->type = 0;
1119 last_free->size = last_free_size;
1120 last_free->mark = 0;
1121 poison_chunk (last_free, last_free_size);
1122 free_chunk (last_free, last_free_size, zone);
1124 pp = &p->next;
1127 zone->allocated = allocated;
1130 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1131 is true if we need to mark before sweeping, false if some other
1132 zone collection has already performed marking for us. Returns true
1133 if we collected, false otherwise. */
1135 static bool
1136 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1138 if (!quiet_flag)
1139 fprintf (stderr, " {%s GC %luk -> ",
1140 zone->name, (unsigned long) zone->allocated / 1024);
1142 /* Zero the total allocated bytes. This will be recalculated in the
1143 sweep phase. */
1144 zone->allocated = 0;
1146 /* Release the pages we freed the last time we collected, but didn't
1147 reuse in the interim. */
1148 release_pages (zone);
1150 /* Indicate that we've seen collections at this context depth. */
1151 zone->context_depth_collections
1152 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1153 if (need_marking)
1154 ggc_mark_roots ();
1155 sweep_pages (zone);
1156 zone->was_collected = true;
1157 zone->allocated_last_gc = zone->allocated;
1159 if (!quiet_flag)
1160 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1161 return true;
1164 /* Calculate the average page survival rate in terms of number of
1165 collections. */
1167 static float
1168 calculate_average_page_survival (struct alloc_zone *zone)
1170 float count = 0.0;
1171 float survival = 0.0;
1172 page_entry *p;
1173 for (p = zone->pages; p; p = p->next)
1175 count += 1.0;
1176 survival += p->survived;
1178 return survival/count;
1181 /* Check the magic cookies all of the chunks contain, to make sure we
1182 aren't doing anything stupid, like stomping on alloc_chunk
1183 structures. */
1185 static inline void
1186 check_cookies (void)
1188 #ifdef COOKIE_CHECKING
1189 page_entry *p;
1190 struct alloc_zone *zone;
1192 for (zone = G.zones; zone; zone = zone->next_zone)
1194 for (p = zone->pages; p; p = p->next)
1196 if (!p->large_p)
1198 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1199 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1202 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1203 abort ();
1204 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1206 while (chunk < end);
1210 #endif
1212 /* Top level collection routine. */
1214 void
1215 ggc_collect (void)
1217 struct alloc_zone *zone;
1218 bool marked = false;
1219 float f;
1221 timevar_push (TV_GC);
1222 check_cookies ();
1224 if (!always_collect)
1226 float allocated_last_gc = 0, allocated = 0, min_expand;
1228 for (zone = G.zones; zone; zone = zone->next_zone)
1230 allocated_last_gc += zone->allocated_last_gc;
1231 allocated += zone->allocated;
1234 allocated_last_gc =
1235 MAX (allocated_last_gc,
1236 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1237 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1239 if (allocated < allocated_last_gc + min_expand)
1241 timevar_pop (TV_GC);
1242 return;
1246 /* Start by possibly collecting the main zone. */
1247 main_zone.was_collected = false;
1248 marked |= ggc_collect_1 (&main_zone, true);
1250 /* In order to keep the number of collections down, we don't
1251 collect other zones unless we are collecting the main zone. This
1252 gives us roughly the same number of collections as we used to
1253 have with the old gc. The number of collection is important
1254 because our main slowdown (according to profiling) is now in
1255 marking. So if we mark twice as often as we used to, we'll be
1256 twice as slow. Hopefully we'll avoid this cost when we mark
1257 zone-at-a-time. */
1258 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1259 keep this code in case the heuristics are further refined. */
1261 if (main_zone.was_collected)
1263 struct alloc_zone *zone;
1265 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1267 check_cookies ();
1268 zone->was_collected = false;
1269 marked |= ggc_collect_1 (zone, !marked);
1273 /* Print page survival stats, if someone wants them. */
1274 if (GGC_DEBUG_LEVEL >= 2)
1276 for (zone = G.zones; zone; zone = zone->next_zone)
1278 if (zone->was_collected)
1280 f = calculate_average_page_survival (zone);
1281 printf ("Average page survival in zone `%s' is %f\n",
1282 zone->name, f);
1287 /* Since we don't mark zone at a time right now, marking in any
1288 zone means marking in every zone. So we have to clear all the
1289 marks in all the zones that weren't collected already. */
1290 if (marked)
1292 page_entry *p;
1293 for (zone = G.zones; zone; zone = zone->next_zone)
1295 if (zone->was_collected)
1296 continue;
1297 for (p = zone->pages; p; p = p->next)
1299 if (!p->large_p)
1301 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1302 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1305 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1306 if (chunk->mark || p->context_depth < zone->context_depth)
1308 chunk->mark = 0;
1310 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1312 while (chunk < end);
1314 else
1316 ((struct alloc_chunk *)p->page)->mark = 0;
1322 /* Free dead zones. */
1323 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1325 if (zone->next_zone->dead)
1327 struct alloc_zone *dead_zone = zone->next_zone;
1329 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1331 /* The zone must be empty. */
1332 if (dead_zone->allocated != 0)
1333 abort ();
1335 /* Unchain the dead zone, release all its pages and free it. */
1336 zone->next_zone = zone->next_zone->next_zone;
1337 release_pages (dead_zone);
1338 free (dead_zone);
1342 timevar_pop (TV_GC);
1345 /* Print allocation statistics. */
1346 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1347 ? (x) \
1348 : ((x) < 1024*1024*10 \
1349 ? (x) / 1024 \
1350 : (x) / (1024*1024))))
1351 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1353 void
1354 ggc_print_statistics (void)
1356 struct alloc_zone *zone;
1357 struct ggc_statistics stats;
1358 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1360 /* Clear the statistics. */
1361 memset (&stats, 0, sizeof (stats));
1363 /* Make sure collection will really occur, in all zones. */
1364 always_collect = 1;
1366 /* Collect and print the statistics common across collectors. */
1367 ggc_print_common_statistics (stderr, &stats);
1369 always_collect = 0;
1371 /* Release free pages so that we will not count the bytes allocated
1372 there as part of the total allocated memory. */
1373 for (zone = G.zones; zone; zone = zone->next_zone)
1374 release_pages (zone);
1376 /* Collect some information about the various sizes of
1377 allocation. */
1378 fprintf (stderr,
1379 "Memory still allocated at the end of the compilation process\n");
1381 fprintf (stderr, "%20s %10s %10s %10s\n",
1382 "Zone", "Allocated", "Used", "Overhead");
1383 for (zone = G.zones; zone; zone = zone->next_zone)
1385 page_entry *p;
1386 size_t allocated;
1387 size_t in_use;
1388 size_t overhead;
1390 /* Skip empty entries. */
1391 if (!zone->pages)
1392 continue;
1394 overhead = allocated = in_use = 0;
1396 /* Figure out the total number of bytes allocated for objects of
1397 this size, and how many of them are actually in use. Also figure
1398 out how much memory the page table is using. */
1399 for (p = zone->pages; p; p = p->next)
1401 struct alloc_chunk *chunk;
1403 /* We've also allocated sizeof (page_entry), but it's not in the
1404 "managed" area... */
1405 allocated += p->bytes;
1406 overhead += sizeof (page_entry);
1408 if (p->large_p)
1410 in_use += p->bytes - CHUNK_OVERHEAD;
1411 chunk = (struct alloc_chunk *) p->page;
1412 overhead += CHUNK_OVERHEAD;
1413 if (!chunk->type)
1414 abort ();
1415 if (chunk->mark)
1416 abort ();
1417 continue;
1420 for (chunk = (struct alloc_chunk *) p->page;
1421 (char *) chunk < (char *) p->page + p->bytes;
1422 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size))
1424 overhead += CHUNK_OVERHEAD;
1425 if (chunk->type)
1426 in_use += chunk->size;
1427 if (chunk->mark)
1428 abort ();
1431 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
1432 zone->name,
1433 SCALE (allocated), LABEL (allocated),
1434 SCALE (in_use), LABEL (in_use),
1435 SCALE (overhead), LABEL (overhead));
1437 if (in_use != zone->allocated)
1438 abort ();
1440 total_overhead += overhead;
1441 total_allocated += zone->allocated;
1442 total_bytes_mapped += zone->bytes_mapped;
1445 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
1446 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
1447 SCALE (total_allocated), LABEL(total_allocated),
1448 SCALE (total_overhead), LABEL (total_overhead));
1450 #ifdef GATHER_STATISTICS
1452 unsigned long long all_overhead = 0, all_allocated = 0;
1453 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
1454 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
1455 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
1457 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1459 for (zone = G.zones; zone; zone = zone->next_zone)
1461 all_overhead += zone->stats.total_overhead;
1462 all_allocated += zone->stats.total_allocated;
1464 all_allocated_under32 += zone->stats.total_allocated_under32;
1465 all_overhead_under32 += zone->stats.total_overhead_under32;
1467 all_allocated_under64 += zone->stats.total_allocated_under64;
1468 all_overhead_under64 += zone->stats.total_overhead_under64;
1470 all_allocated_under128 += zone->stats.total_allocated_under128;
1471 all_overhead_under128 += zone->stats.total_overhead_under128;
1473 fprintf (stderr, "%20s: %10lld\n",
1474 zone->name, zone->stats.total_allocated);
1477 fprintf (stderr, "\n");
1479 fprintf (stderr, "Total Overhead: %10lld\n",
1480 all_overhead);
1481 fprintf (stderr, "Total Allocated: %10lld\n",
1482 all_allocated);
1484 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
1485 all_overhead_under32);
1486 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
1487 all_allocated_under32);
1488 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
1489 all_overhead_under64);
1490 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
1491 all_allocated_under64);
1492 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
1493 all_overhead_under128);
1494 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
1495 all_allocated_under128);
1497 #endif
1500 struct ggc_pch_data
1502 struct ggc_pch_ondisk
1504 unsigned total;
1505 } d;
1506 size_t base;
1507 size_t written;
1510 /* Initialize the PCH data structure. */
1512 struct ggc_pch_data *
1513 init_ggc_pch (void)
1515 return xcalloc (sizeof (struct ggc_pch_data), 1);
1518 /* Add the size of object X to the size of the PCH data. */
1520 void
1521 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1522 size_t size, bool is_string)
1524 if (!is_string)
1526 d->d.total += size + CHUNK_OVERHEAD;
1528 else
1529 d->d.total += size;
1532 /* Return the total size of the PCH data. */
1534 size_t
1535 ggc_pch_total_size (struct ggc_pch_data *d)
1537 return d->d.total;
1540 /* Set the base address for the objects in the PCH file. */
1542 void
1543 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1545 d->base = (size_t) base;
1548 /* Allocate a place for object X of size SIZE in the PCH file. */
1550 char *
1551 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1552 size_t size, bool is_string)
1554 char *result;
1555 result = (char *)d->base;
1556 if (!is_string)
1558 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1559 if (chunk->large)
1560 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1561 else
1562 d->base += chunk->size + CHUNK_OVERHEAD;
1563 return result + CHUNK_OVERHEAD;
1565 else
1567 d->base += size;
1568 return result;
1573 /* Prepare to write out the PCH data to file F. */
1575 void
1576 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1577 FILE *f ATTRIBUTE_UNUSED)
1579 /* Nothing to do. */
1582 /* Write out object X of SIZE to file F. */
1584 void
1585 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1586 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1587 size_t size, bool is_string)
1589 if (!is_string)
1591 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1592 size = ggc_get_size (x);
1593 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1594 fatal_error ("can't write PCH file: %m");
1595 d->written += size + CHUNK_OVERHEAD;
1597 else
1599 if (fwrite (x, size, 1, f) != 1)
1600 fatal_error ("can't write PCH file: %m");
1601 d->written += size;
1605 void
1606 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1608 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1609 fatal_error ("can't write PCH file: %m");
1610 free (d);
1612 void
1613 ggc_pch_read (FILE *f, void *addr)
1615 struct ggc_pch_ondisk d;
1616 struct page_entry *entry;
1617 struct alloc_zone *pch_zone;
1618 if (fread (&d, sizeof (d), 1, f) != 1)
1619 fatal_error ("can't read PCH file: %m");
1620 entry = xcalloc (1, sizeof (struct page_entry));
1621 entry->bytes = d.total;
1622 entry->page = addr;
1623 entry->context_depth = 0;
1624 pch_zone = new_ggc_zone ("PCH zone");
1625 entry->zone = pch_zone;
1626 entry->next = entry->zone->pages;
1627 entry->zone->pages = entry;