* trans-types.c (gfc_type_for_mode): Return NULL for unknown modes.
[official-gcc.git] / gcc / ggc-zone.c
blob86fbc281718347999c3a59aad259ac9b63caeae0
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
45 # else
46 # include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
68 #endif
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 # define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
78 #endif
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
90 /* NOTES:
91 If we track inter-zone pointers, we can mark single zones at a
92 time.
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
97 marking quickly. */
98 /* Stategy:
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
128 #endif
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
133 #endif
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
144 struct alloc_chunk {
145 #ifdef COOKIE_CHECKING
146 unsigned int magic;
147 #endif
148 unsigned int type:1;
149 unsigned int mark:1;
150 unsigned char large;
151 unsigned short size;
152 /* Right now, on 32-bit hosts we don't have enough room to save the
153 typecode unless we make the one remaining flag into a bitfield.
154 There's a performance cost to that, so we don't do it until we're
155 ready to use the type information for something. */
156 union {
157 struct alloc_chunk *next_free;
158 char data[1];
160 /* Make sure the data is sufficiently aligned. */
161 HOST_WIDEST_INT align_i;
162 #ifdef HAVE_LONG_DOUBLE
163 long double align_d;
164 #else
165 double align_d;
166 #endif
167 } u;
170 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
172 /* We maintain several bins of free lists for chunks for very small
173 objects. We never exhaustively search other bins -- if we don't
174 find one of the proper size, we allocate from the "larger" bin. */
176 /* Decreasing the number of free bins increases the time it takes to allocate.
177 Similar with increasing max_free_bin_size without increasing num_free_bins.
179 After much histogramming of allocation sizes and time spent on gc,
180 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
181 these were determined to be the optimal values. */
182 #define NUM_FREE_BINS 64
183 #define MAX_FREE_BIN_SIZE (64 * sizeof (void *))
184 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
185 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
186 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
188 /* Marker used as chunk->size for a large object. Should correspond
189 to the size of the bitfield above. */
190 #define LARGE_OBJECT_SIZE 0x7fff
192 /* We use this structure to determine the alignment required for
193 allocations. For power-of-two sized allocations, that's not a
194 problem, but it does matter for odd-sized allocations. */
196 struct max_alignment {
197 char c;
198 union {
199 HOST_WIDEST_INT i;
200 #ifdef HAVE_LONG_DOUBLE
201 long double d;
202 #else
203 double d;
204 #endif
205 } u;
208 /* The biggest alignment required. */
210 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
212 /* Compute the smallest nonnegative number which when added to X gives
213 a multiple of F. */
215 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
217 /* Compute the smallest multiple of F that is >= X. */
219 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
222 /* A page_entry records the status of an allocation page. */
223 typedef struct page_entry
225 /* The next page-entry with objects of the same size, or NULL if
226 this is the last page-entry. */
227 struct page_entry *next;
229 /* The number of bytes allocated. (This will always be a multiple
230 of the host system page size.) */
231 size_t bytes;
233 /* How many collections we've survived. */
234 size_t survived;
236 /* The address at which the memory is allocated. */
237 char *page;
239 /* Context depth of this page. */
240 unsigned short context_depth;
242 /* Does this page contain small objects, or one large object? */
243 bool large_p;
245 /* The zone that this page entry belongs to. */
246 struct alloc_zone *zone;
247 } page_entry;
250 /* The global variables. */
251 static struct globals
253 /* The linked list of zones. */
254 struct alloc_zone *zones;
256 /* The system's page size. */
257 size_t pagesize;
258 size_t lg_pagesize;
260 /* A file descriptor open to /dev/zero for reading. */
261 #if defined (HAVE_MMAP_DEV_ZERO)
262 int dev_zero_fd;
263 #endif
265 /* The file descriptor for debugging output. */
266 FILE *debug_file;
267 } G;
269 /* The zone allocation structure. */
270 struct alloc_zone
272 /* Name of the zone. */
273 const char *name;
275 /* Linked list of pages in a zone. */
276 page_entry *pages;
278 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
279 FREE_BIN_DELTA. All other chunks are in slot 0. */
280 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
282 /* Bytes currently allocated. */
283 size_t allocated;
285 /* Bytes currently allocated at the end of the last collection. */
286 size_t allocated_last_gc;
288 /* Total amount of memory mapped. */
289 size_t bytes_mapped;
291 /* Bit N set if any allocations have been done at context depth N. */
292 unsigned long context_depth_allocations;
294 /* Bit N set if any collections have been done at context depth N. */
295 unsigned long context_depth_collections;
297 /* The current depth in the context stack. */
298 unsigned short context_depth;
300 /* A cache of free system pages. */
301 page_entry *free_pages;
303 /* Next zone in the linked list of zones. */
304 struct alloc_zone *next_zone;
306 /* True if this zone was collected during this collection. */
307 bool was_collected;
309 /* True if this zone should be destroyed after the next collection. */
310 bool dead;
312 #ifdef GATHER_STATISTICS
313 struct
315 /* Total memory allocated with ggc_alloc. */
316 unsigned long long total_allocated;
317 /* Total overhead for memory to be allocated with ggc_alloc. */
318 unsigned long long total_overhead;
320 /* Total allocations and overhead for sizes less than 32, 64 and 128.
321 These sizes are interesting because they are typical cache line
322 sizes. */
324 unsigned long long total_allocated_under32;
325 unsigned long long total_overhead_under32;
327 unsigned long long total_allocated_under64;
328 unsigned long long total_overhead_under64;
330 unsigned long long total_allocated_under128;
331 unsigned long long total_overhead_under128;
332 } stats;
333 #endif
334 } main_zone;
336 struct alloc_zone *rtl_zone;
337 struct alloc_zone *garbage_zone;
338 struct alloc_zone *tree_zone;
340 static int always_collect;
342 /* Allocate pages in chunks of this size, to throttle calls to memory
343 allocation routines. The first page is used, the rest go onto the
344 free list. This cannot be larger than HOST_BITS_PER_INT for the
345 in_use bitmask for page_group. */
346 #define GGC_QUIRE_SIZE 16
348 static int ggc_allocated_p (const void *);
349 #ifdef USING_MMAP
350 static char *alloc_anon (char *, size_t, struct alloc_zone *);
351 #endif
352 static struct page_entry * alloc_small_page ( struct alloc_zone *);
353 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
354 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
355 static void free_page (struct page_entry *);
356 static void release_pages (struct alloc_zone *);
357 static void sweep_pages (struct alloc_zone *);
358 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short MEM_STAT_DECL);
359 static bool ggc_collect_1 (struct alloc_zone *, bool);
360 static void check_cookies (void);
363 /* Returns nonzero if P was allocated in GC'able memory. */
365 static inline int
366 ggc_allocated_p (const void *p)
368 struct alloc_chunk *chunk;
369 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
370 #ifdef COOKIE_CHECKING
371 if (chunk->magic != CHUNK_MAGIC)
372 abort ();
373 #endif
374 if (chunk->type == 1)
375 return true;
376 return false;
380 #ifdef USING_MMAP
381 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
382 (if non-null). The ifdef structure here is intended to cause a
383 compile error unless exactly one of the HAVE_* is defined. */
385 static inline char *
386 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
388 #ifdef HAVE_MMAP_ANON
389 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
390 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
391 #endif
392 #ifdef HAVE_MMAP_DEV_ZERO
393 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
394 MAP_PRIVATE, G.dev_zero_fd, 0);
395 #endif
396 VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
398 if (page == (char *) MAP_FAILED)
400 perror ("virtual memory exhausted");
401 exit (FATAL_EXIT_CODE);
404 /* Remember that we allocated this memory. */
405 zone->bytes_mapped += size;
406 /* Pretend we don't have access to the allocated pages. We'll enable
407 access to smaller pieces of the area in ggc_alloc. Discard the
408 handle to avoid handle leak. */
409 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
410 return page;
412 #endif
414 /* Allocate a new page for allocating objects of size 2^ORDER,
415 and return an entry for it. */
417 static inline struct page_entry *
418 alloc_small_page (struct alloc_zone *zone)
420 struct page_entry *entry;
421 char *page;
423 page = NULL;
425 /* Check the list of free pages for one we can use. */
426 entry = zone->free_pages;
427 if (entry != NULL)
429 /* Recycle the allocated memory from this page ... */
430 zone->free_pages = entry->next;
431 page = entry->page;
435 #ifdef USING_MMAP
436 else
438 /* We want just one page. Allocate a bunch of them and put the
439 extras on the freelist. (Can only do this optimization with
440 mmap for backing store.) */
441 struct page_entry *e, *f = zone->free_pages;
442 int i;
444 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
446 /* This loop counts down so that the chain will be in ascending
447 memory order. */
448 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
450 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
451 e->bytes = G.pagesize;
452 e->page = page + (i << G.lg_pagesize);
453 e->next = f;
454 f = e;
457 zone->free_pages = f;
459 #endif
460 if (entry == NULL)
461 entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
463 entry->next = 0;
464 entry->bytes = G.pagesize;
465 entry->page = page;
466 entry->context_depth = zone->context_depth;
467 entry->large_p = false;
468 entry->zone = zone;
469 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
471 if (GGC_DEBUG_LEVEL >= 2)
472 fprintf (G.debug_file,
473 "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
474 (PTR) entry, page, page + G.pagesize - 1);
476 return entry;
478 /* Compute the smallest multiple of F that is >= X. */
480 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
482 /* Allocate a large page of size SIZE in ZONE. */
484 static inline struct page_entry *
485 alloc_large_page (size_t size, struct alloc_zone *zone)
487 struct page_entry *entry;
488 char *page;
489 size = ROUND_UP (size, 1024);
490 page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
491 entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
493 entry->next = 0;
494 entry->bytes = size;
495 entry->page = page;
496 entry->context_depth = zone->context_depth;
497 entry->large_p = true;
498 entry->zone = zone;
499 zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
501 if (GGC_DEBUG_LEVEL >= 2)
502 fprintf (G.debug_file,
503 "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
504 (PTR) entry, page, page + size - 1);
506 return entry;
510 /* For a page that is no longer needed, put it on the free page list. */
512 static inline void
513 free_page (page_entry *entry)
515 if (GGC_DEBUG_LEVEL >= 2)
516 fprintf (G.debug_file,
517 "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
518 entry->page, entry->page + entry->bytes - 1);
520 if (entry->large_p)
522 free (entry->page);
523 VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
525 else
527 /* Mark the page as inaccessible. Discard the handle to
528 avoid handle leak. */
529 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
531 entry->next = entry->zone->free_pages;
532 entry->zone->free_pages = entry;
536 /* Release the free page cache to the system. */
538 static void
539 release_pages (struct alloc_zone *zone)
541 #ifdef USING_MMAP
542 page_entry *p, *next;
543 char *start;
544 size_t len;
546 /* Gather up adjacent pages so they are unmapped together. */
547 p = zone->free_pages;
549 while (p)
551 start = p->page;
552 next = p->next;
553 len = p->bytes;
554 free (p);
555 p = next;
557 while (p && p->page == start + len)
559 next = p->next;
560 len += p->bytes;
561 free (p);
562 p = next;
565 munmap (start, len);
566 zone->bytes_mapped -= len;
569 zone->free_pages = NULL;
570 #endif
573 /* Place CHUNK of size SIZE on the free list for ZONE. */
575 static inline void
576 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
578 size_t bin = 0;
580 bin = SIZE_BIN_DOWN (size);
581 if (bin == 0)
582 abort ();
583 if (bin > NUM_FREE_BINS)
584 bin = 0;
585 #ifdef COOKIE_CHECKING
586 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
587 abort ();
588 chunk->magic = DEADCHUNK_MAGIC;
589 #endif
590 chunk->u.next_free = zone->free_chunks[bin];
591 zone->free_chunks[bin] = chunk;
592 if (GGC_DEBUG_LEVEL >= 3)
593 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
594 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
597 /* Allocate a chunk of memory of SIZE bytes. */
599 static void *
600 ggc_alloc_zone_1 (size_t orig_size, struct alloc_zone *zone,
601 short type ATTRIBUTE_UNUSED
602 MEM_STAT_DECL)
604 size_t bin = 0;
605 size_t lsize = 0;
606 struct page_entry *entry;
607 struct alloc_chunk *chunk, *lchunk, **pp;
608 void *result;
609 size_t size = orig_size;
611 /* Align size, so that we're assured of aligned allocations. */
612 if (size < FREE_BIN_DELTA)
613 size = FREE_BIN_DELTA;
614 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
616 /* Large objects are handled specially. */
617 if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
619 size = ROUND_UP (size, 1024);
620 entry = alloc_large_page (size, zone);
621 entry->survived = 0;
622 entry->next = entry->zone->pages;
623 entry->zone->pages = entry;
625 chunk = (struct alloc_chunk *) entry->page;
626 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
627 chunk->large = 1;
628 chunk->size = CEIL (size, 1024);
630 goto found;
633 /* First look for a tiny object already segregated into its own
634 size bucket. */
635 bin = SIZE_BIN_UP (size);
636 if (bin <= NUM_FREE_BINS)
638 chunk = zone->free_chunks[bin];
639 if (chunk)
641 zone->free_chunks[bin] = chunk->u.next_free;
642 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
643 goto found;
647 /* Failing that, look through the "other" bucket for a chunk
648 that is large enough. */
649 pp = &(zone->free_chunks[0]);
650 chunk = *pp;
651 while (chunk && chunk->size < size)
653 pp = &chunk->u.next_free;
654 chunk = *pp;
657 /* Failing that, allocate new storage. */
658 if (!chunk)
660 entry = alloc_small_page (zone);
661 entry->next = entry->zone->pages;
662 entry->zone->pages = entry;
664 chunk = (struct alloc_chunk *) entry->page;
665 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
666 chunk->size = G.pagesize - CHUNK_OVERHEAD;
667 chunk->large = 0;
669 else
671 *pp = chunk->u.next_free;
672 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
673 chunk->large = 0;
675 /* Release extra memory from a chunk that's too big. */
676 lsize = chunk->size - size;
677 if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
679 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
680 chunk->size = size;
682 lsize -= CHUNK_OVERHEAD;
683 lchunk = (struct alloc_chunk *)(chunk->u.data + size);
684 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
685 #ifdef COOKIE_CHECKING
686 lchunk->magic = CHUNK_MAGIC;
687 #endif
688 lchunk->type = 0;
689 lchunk->mark = 0;
690 lchunk->size = lsize;
691 lchunk->large = 0;
692 free_chunk (lchunk, lsize, zone);
693 lsize = 0;
696 /* Calculate the object's address. */
697 found:
698 #ifdef COOKIE_CHECKING
699 chunk->magic = CHUNK_MAGIC;
700 #endif
701 chunk->type = 1;
702 chunk->mark = 0;
703 /* We could save TYPE in the chunk, but we don't use that for
704 anything yet. */
705 result = chunk->u.data;
707 #ifdef ENABLE_GC_CHECKING
708 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
709 exact same semantics in presence of memory bugs, regardless of
710 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
711 handle to avoid handle leak. */
712 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
714 /* `Poison' the entire allocated object. */
715 memset (result, 0xaf, size);
716 #endif
718 /* Tell Valgrind that the memory is there, but its content isn't
719 defined. The bytes at the end of the object are still marked
720 unaccessible. */
721 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
723 /* Keep track of how many bytes are being allocated. This
724 information is used in deciding when to collect. */
725 zone->allocated += size;
727 #ifdef GATHER_STATISTICS
728 ggc_record_overhead (orig_size, size + CHUNK_OVERHEAD - orig_size PASS_MEM_STAT);
731 size_t object_size = size + CHUNK_OVERHEAD;
732 size_t overhead = object_size - orig_size;
734 zone->stats.total_overhead += overhead;
735 zone->stats.total_allocated += object_size;
737 if (orig_size <= 32)
739 zone->stats.total_overhead_under32 += overhead;
740 zone->stats.total_allocated_under32 += object_size;
742 if (orig_size <= 64)
744 zone->stats.total_overhead_under64 += overhead;
745 zone->stats.total_allocated_under64 += object_size;
747 if (orig_size <= 128)
749 zone->stats.total_overhead_under128 += overhead;
750 zone->stats.total_allocated_under128 += object_size;
753 #endif
755 if (GGC_DEBUG_LEVEL >= 3)
756 fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
757 (void *)chunk, (unsigned long) size, result);
759 return result;
762 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
763 for that type. */
765 void *
766 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
767 MEM_STAT_DECL)
769 switch (gte)
771 case gt_ggc_e_14lang_tree_node:
772 return ggc_alloc_zone_1 (size, tree_zone, gte PASS_MEM_STAT);
774 case gt_ggc_e_7rtx_def:
775 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
777 case gt_ggc_e_9rtvec_def:
778 return ggc_alloc_zone_1 (size, rtl_zone, gte PASS_MEM_STAT);
780 default:
781 return ggc_alloc_zone_1 (size, &main_zone, gte PASS_MEM_STAT);
785 /* Normal ggc_alloc simply allocates into the main zone. */
787 void *
788 ggc_alloc_stat (size_t size MEM_STAT_DECL)
790 return ggc_alloc_zone_1 (size, &main_zone, -1 PASS_MEM_STAT);
793 /* Zone allocation allocates into the specified zone. */
795 void *
796 ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone MEM_STAT_DECL)
798 return ggc_alloc_zone_1 (size, zone, -1 PASS_MEM_STAT);
801 /* Poison the chunk. */
802 #ifdef ENABLE_GC_CHECKING
803 #define poison_chunk(CHUNK, SIZE) \
804 memset ((CHUNK)->u.data, 0xa5, (SIZE))
805 #else
806 #define poison_chunk(CHUNK, SIZE)
807 #endif
809 /* Free the object at P. */
811 void
812 ggc_free (void *p)
814 struct alloc_chunk *chunk;
816 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
818 /* Poison the chunk. */
819 poison_chunk (chunk, ggc_get_size (p));
822 /* If P is not marked, mark it and return false. Otherwise return true.
823 P must have been allocated by the GC allocator; it mustn't point to
824 static objects, stack variables, or memory allocated with malloc. */
827 ggc_set_mark (const void *p)
829 struct alloc_chunk *chunk;
831 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
832 #ifdef COOKIE_CHECKING
833 if (chunk->magic != CHUNK_MAGIC)
834 abort ();
835 #endif
836 if (chunk->mark)
837 return 1;
838 chunk->mark = 1;
840 if (GGC_DEBUG_LEVEL >= 4)
841 fprintf (G.debug_file, "Marking %p\n", p);
843 return 0;
846 /* Return 1 if P has been marked, zero otherwise.
847 P must have been allocated by the GC allocator; it mustn't point to
848 static objects, stack variables, or memory allocated with malloc. */
851 ggc_marked_p (const void *p)
853 struct alloc_chunk *chunk;
855 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
856 #ifdef COOKIE_CHECKING
857 if (chunk->magic != CHUNK_MAGIC)
858 abort ();
859 #endif
860 return chunk->mark;
863 /* Return the size of the gc-able object P. */
865 size_t
866 ggc_get_size (const void *p)
868 struct alloc_chunk *chunk;
870 chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
871 #ifdef COOKIE_CHECKING
872 if (chunk->magic != CHUNK_MAGIC)
873 abort ();
874 #endif
875 if (chunk->large)
876 return chunk->size * 1024;
878 return chunk->size;
881 /* Initialize the ggc-zone-mmap allocator. */
882 void
883 init_ggc (void)
885 /* Set up the main zone by hand. */
886 main_zone.name = "Main zone";
887 G.zones = &main_zone;
889 /* Allocate the default zones. */
890 rtl_zone = new_ggc_zone ("RTL zone");
891 tree_zone = new_ggc_zone ("Tree zone");
892 garbage_zone = new_ggc_zone ("Garbage zone");
894 G.pagesize = getpagesize();
895 G.lg_pagesize = exact_log2 (G.pagesize);
896 #ifdef HAVE_MMAP_DEV_ZERO
897 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
898 if (G.dev_zero_fd == -1)
899 abort ();
900 #endif
902 #if 0
903 G.debug_file = fopen ("ggc-mmap.debug", "w");
904 setlinebuf (G.debug_file);
905 #else
906 G.debug_file = stdout;
907 #endif
909 #ifdef USING_MMAP
910 /* StunOS has an amazing off-by-one error for the first mmap allocation
911 after fiddling with RLIMIT_STACK. The result, as hard as it is to
912 believe, is an unaligned page allocation, which would cause us to
913 hork badly if we tried to use it. */
915 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
916 struct page_entry *e;
917 if ((size_t)p & (G.pagesize - 1))
919 /* How losing. Discard this one and try another. If we still
920 can't get something useful, give up. */
922 p = alloc_anon (NULL, G.pagesize, &main_zone);
923 if ((size_t)p & (G.pagesize - 1))
924 abort ();
927 /* We have a good page, might as well hold onto it... */
928 e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
929 e->bytes = G.pagesize;
930 e->page = p;
931 e->next = main_zone.free_pages;
932 main_zone.free_pages = e;
934 #endif
937 /* Start a new GGC zone. */
939 struct alloc_zone *
940 new_ggc_zone (const char * name)
942 struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
943 new_zone->name = name;
944 new_zone->next_zone = G.zones->next_zone;
945 G.zones->next_zone = new_zone;
946 return new_zone;
949 /* Destroy a GGC zone. */
950 void
951 destroy_ggc_zone (struct alloc_zone * dead_zone)
953 struct alloc_zone *z;
955 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
956 /* Just find that zone. */ ;
958 #ifdef ENABLE_CHECKING
959 /* We should have found the zone in the list. Anything else is fatal. */
960 if (!z)
961 abort ();
962 #endif
964 /* z is dead, baby. z is dead. */
965 z->dead= true;
968 /* Increment the `GC context'. Objects allocated in an outer context
969 are never freed, eliminating the need to register their roots. */
971 void
972 ggc_push_context (void)
974 struct alloc_zone *zone;
975 for (zone = G.zones; zone; zone = zone->next_zone)
976 ++(zone->context_depth);
977 /* Die on wrap. */
978 if (main_zone.context_depth >= HOST_BITS_PER_LONG)
979 abort ();
982 /* Decrement the `GC context'. All objects allocated since the
983 previous ggc_push_context are migrated to the outer context. */
985 static void
986 ggc_pop_context_1 (struct alloc_zone *zone)
988 unsigned long omask;
989 unsigned depth;
990 page_entry *p;
992 depth = --(zone->context_depth);
993 omask = (unsigned long)1 << (depth + 1);
995 if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
996 return;
998 zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
999 zone->context_depth_allocations &= omask - 1;
1000 zone->context_depth_collections &= omask - 1;
1002 /* Any remaining pages in the popped context are lowered to the new
1003 current context; i.e. objects allocated in the popped context and
1004 left over are imported into the previous context. */
1005 for (p = zone->pages; p != NULL; p = p->next)
1006 if (p->context_depth > depth)
1007 p->context_depth = depth;
1010 /* Pop all the zone contexts. */
1012 void
1013 ggc_pop_context (void)
1015 struct alloc_zone *zone;
1016 for (zone = G.zones; zone; zone = zone->next_zone)
1017 ggc_pop_context_1 (zone);
1020 /* Free all empty pages and objects within a page for a given zone */
1022 static void
1023 sweep_pages (struct alloc_zone *zone)
1025 page_entry **pp, *p, *next;
1026 struct alloc_chunk *chunk, *last_free, *end;
1027 size_t last_free_size, allocated = 0;
1028 bool nomarksinpage;
1029 /* First, reset the free_chunks lists, since we are going to
1030 re-free free chunks in hopes of coalescing them into large chunks. */
1031 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1032 pp = &zone->pages;
1033 for (p = zone->pages; p ; p = next)
1035 next = p->next;
1036 /* Large pages are all or none affairs. Either they are
1037 completely empty, or they are completely full.
1039 XXX: Should we bother to increment allocated. */
1040 if (p->large_p)
1042 if (((struct alloc_chunk *)p->page)->mark == 1)
1044 ((struct alloc_chunk *)p->page)->mark = 0;
1045 allocated += p->bytes - CHUNK_OVERHEAD;
1046 pp = &p->next;
1048 else
1050 *pp = next;
1051 #ifdef ENABLE_GC_CHECKING
1052 /* Poison the page. */
1053 memset (p->page, 0xb5, p->bytes);
1054 #endif
1055 free_page (p);
1057 continue;
1060 /* This page has now survived another collection. */
1061 p->survived++;
1063 /* Which leaves full and partial pages. Step through all chunks,
1064 consolidate those that are free and insert them into the free
1065 lists. Note that consolidation slows down collection
1066 slightly. */
1068 chunk = (struct alloc_chunk *)p->page;
1069 end = (struct alloc_chunk *)(p->page + G.pagesize);
1070 last_free = NULL;
1071 last_free_size = 0;
1072 nomarksinpage = true;
1075 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1076 if (chunk->mark || p->context_depth < zone->context_depth)
1078 nomarksinpage = false;
1079 if (last_free)
1081 last_free->type = 0;
1082 last_free->size = last_free_size;
1083 last_free->mark = 0;
1084 poison_chunk (last_free, last_free_size);
1085 free_chunk (last_free, last_free_size, zone);
1086 last_free = NULL;
1088 if (chunk->mark)
1090 allocated += chunk->size;
1092 chunk->mark = 0;
1094 else
1096 if (last_free)
1098 last_free_size += CHUNK_OVERHEAD + chunk->size;
1100 else
1102 last_free = chunk;
1103 last_free_size = chunk->size;
1107 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1109 while (chunk < end);
1111 if (nomarksinpage)
1113 *pp = next;
1114 #ifdef ENABLE_GC_CHECKING
1115 /* Poison the page. */
1116 memset (p->page, 0xb5, p->bytes);
1117 #endif
1118 free_page (p);
1119 continue;
1121 else if (last_free)
1123 last_free->type = 0;
1124 last_free->size = last_free_size;
1125 last_free->mark = 0;
1126 poison_chunk (last_free, last_free_size);
1127 free_chunk (last_free, last_free_size, zone);
1129 pp = &p->next;
1132 zone->allocated = allocated;
1135 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1136 is true if we need to mark before sweeping, false if some other
1137 zone collection has already performed marking for us. Returns true
1138 if we collected, false otherwise. */
1140 static bool
1141 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1143 if (!quiet_flag)
1144 fprintf (stderr, " {%s GC %luk -> ",
1145 zone->name, (unsigned long) zone->allocated / 1024);
1147 /* Zero the total allocated bytes. This will be recalculated in the
1148 sweep phase. */
1149 zone->allocated = 0;
1151 /* Release the pages we freed the last time we collected, but didn't
1152 reuse in the interim. */
1153 release_pages (zone);
1155 /* Indicate that we've seen collections at this context depth. */
1156 zone->context_depth_collections
1157 = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1158 if (need_marking)
1159 ggc_mark_roots ();
1160 sweep_pages (zone);
1161 zone->was_collected = true;
1162 zone->allocated_last_gc = zone->allocated;
1164 if (!quiet_flag)
1165 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1166 return true;
1169 /* Calculate the average page survival rate in terms of number of
1170 collections. */
1172 static float
1173 calculate_average_page_survival (struct alloc_zone *zone)
1175 float count = 0.0;
1176 float survival = 0.0;
1177 page_entry *p;
1178 for (p = zone->pages; p; p = p->next)
1180 count += 1.0;
1181 survival += p->survived;
1183 return survival/count;
1186 /* Check the magic cookies all of the chunks contain, to make sure we
1187 aren't doing anything stupid, like stomping on alloc_chunk
1188 structures. */
1190 static inline void
1191 check_cookies (void)
1193 #ifdef COOKIE_CHECKING
1194 page_entry *p;
1195 struct alloc_zone *zone;
1197 for (zone = G.zones; zone; zone = zone->next_zone)
1199 for (p = zone->pages; p; p = p->next)
1201 if (!p->large_p)
1203 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1204 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1207 if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1208 abort ();
1209 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1211 while (chunk < end);
1215 #endif
1217 /* Top level collection routine. */
1219 void
1220 ggc_collect (void)
1222 struct alloc_zone *zone;
1223 bool marked = false;
1224 float f;
1226 timevar_push (TV_GC);
1227 check_cookies ();
1229 if (!always_collect)
1231 float allocated_last_gc = 0, allocated = 0, min_expand;
1233 for (zone = G.zones; zone; zone = zone->next_zone)
1235 allocated_last_gc += zone->allocated_last_gc;
1236 allocated += zone->allocated;
1239 allocated_last_gc =
1240 MAX (allocated_last_gc,
1241 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1242 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1244 if (allocated < allocated_last_gc + min_expand)
1246 timevar_pop (TV_GC);
1247 return;
1251 /* Start by possibly collecting the main zone. */
1252 main_zone.was_collected = false;
1253 marked |= ggc_collect_1 (&main_zone, true);
1255 /* In order to keep the number of collections down, we don't
1256 collect other zones unless we are collecting the main zone. This
1257 gives us roughly the same number of collections as we used to
1258 have with the old gc. The number of collection is important
1259 because our main slowdown (according to profiling) is now in
1260 marking. So if we mark twice as often as we used to, we'll be
1261 twice as slow. Hopefully we'll avoid this cost when we mark
1262 zone-at-a-time. */
1263 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1264 keep this code in case the heuristics are further refined. */
1266 if (main_zone.was_collected)
1268 struct alloc_zone *zone;
1270 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1272 check_cookies ();
1273 zone->was_collected = false;
1274 marked |= ggc_collect_1 (zone, !marked);
1278 /* Print page survival stats, if someone wants them. */
1279 if (GGC_DEBUG_LEVEL >= 2)
1281 for (zone = G.zones; zone; zone = zone->next_zone)
1283 if (zone->was_collected)
1285 f = calculate_average_page_survival (zone);
1286 printf ("Average page survival in zone `%s' is %f\n",
1287 zone->name, f);
1292 /* Since we don't mark zone at a time right now, marking in any
1293 zone means marking in every zone. So we have to clear all the
1294 marks in all the zones that weren't collected already. */
1295 if (marked)
1297 page_entry *p;
1298 for (zone = G.zones; zone; zone = zone->next_zone)
1300 if (zone->was_collected)
1301 continue;
1302 for (p = zone->pages; p; p = p->next)
1304 if (!p->large_p)
1306 struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1307 struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1310 prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1311 if (chunk->mark || p->context_depth < zone->context_depth)
1313 chunk->mark = 0;
1315 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1317 while (chunk < end);
1319 else
1321 ((struct alloc_chunk *)p->page)->mark = 0;
1327 /* Free dead zones. */
1328 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1330 if (zone->next_zone->dead)
1332 struct alloc_zone *dead_zone = zone->next_zone;
1334 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1336 /* The zone must be empty. */
1337 if (dead_zone->allocated != 0)
1338 abort ();
1340 /* Unchain the dead zone, release all its pages and free it. */
1341 zone->next_zone = zone->next_zone->next_zone;
1342 release_pages (dead_zone);
1343 free (dead_zone);
1347 timevar_pop (TV_GC);
1350 /* Print allocation statistics. */
1351 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1352 ? (x) \
1353 : ((x) < 1024*1024*10 \
1354 ? (x) / 1024 \
1355 : (x) / (1024*1024))))
1356 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1358 void
1359 ggc_print_statistics (void)
1361 struct alloc_zone *zone;
1362 struct ggc_statistics stats;
1363 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1365 /* Clear the statistics. */
1366 memset (&stats, 0, sizeof (stats));
1368 /* Make sure collection will really occur, in all zones. */
1369 always_collect = 1;
1371 /* Collect and print the statistics common across collectors. */
1372 ggc_print_common_statistics (stderr, &stats);
1374 always_collect = 0;
1376 /* Release free pages so that we will not count the bytes allocated
1377 there as part of the total allocated memory. */
1378 for (zone = G.zones; zone; zone = zone->next_zone)
1379 release_pages (zone);
1381 /* Collect some information about the various sizes of
1382 allocation. */
1383 fprintf (stderr,
1384 "Memory still allocated at the end of the compilation process\n");
1386 fprintf (stderr, "%20s %10s %10s %10s\n",
1387 "Zone", "Allocated", "Used", "Overhead");
1388 for (zone = G.zones; zone; zone = zone->next_zone)
1390 page_entry *p;
1391 size_t allocated;
1392 size_t in_use;
1393 size_t overhead;
1395 /* Skip empty entries. */
1396 if (!zone->pages)
1397 continue;
1399 overhead = allocated = in_use = 0;
1401 /* Figure out the total number of bytes allocated for objects of
1402 this size, and how many of them are actually in use. Also figure
1403 out how much memory the page table is using. */
1404 for (p = zone->pages; p; p = p->next)
1406 struct alloc_chunk *chunk;
1408 /* We've also allocated sizeof (page_entry), but it's not in the
1409 "managed" area... */
1410 allocated += p->bytes;
1411 overhead += sizeof (page_entry);
1413 if (p->large_p)
1415 in_use += p->bytes - CHUNK_OVERHEAD;
1416 chunk = (struct alloc_chunk *) p->page;
1417 overhead += CHUNK_OVERHEAD;
1418 if (!chunk->type)
1419 abort ();
1420 if (chunk->mark)
1421 abort ();
1422 continue;
1425 for (chunk = (struct alloc_chunk *) p->page;
1426 (char *) chunk < (char *) p->page + p->bytes;
1427 chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size))
1429 overhead += CHUNK_OVERHEAD;
1430 if (chunk->type)
1431 in_use += chunk->size;
1432 if (chunk->mark)
1433 abort ();
1436 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
1437 zone->name,
1438 SCALE (allocated), LABEL (allocated),
1439 SCALE (in_use), LABEL (in_use),
1440 SCALE (overhead), LABEL (overhead));
1442 if (in_use != zone->allocated)
1443 abort ();
1445 total_overhead += overhead;
1446 total_allocated += zone->allocated;
1447 total_bytes_mapped += zone->bytes_mapped;
1450 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
1451 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
1452 SCALE (total_allocated), LABEL(total_allocated),
1453 SCALE (total_overhead), LABEL (total_overhead));
1455 #ifdef GATHER_STATISTICS
1457 unsigned long long all_overhead = 0, all_allocated = 0;
1458 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
1459 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
1460 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
1462 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1464 for (zone = G.zones; zone; zone = zone->next_zone)
1466 all_overhead += zone->stats.total_overhead;
1467 all_allocated += zone->stats.total_allocated;
1469 all_allocated_under32 += zone->stats.total_allocated_under32;
1470 all_overhead_under32 += zone->stats.total_overhead_under32;
1472 all_allocated_under64 += zone->stats.total_allocated_under64;
1473 all_overhead_under64 += zone->stats.total_overhead_under64;
1475 all_allocated_under128 += zone->stats.total_allocated_under128;
1476 all_overhead_under128 += zone->stats.total_overhead_under128;
1478 fprintf (stderr, "%20s: %10lld\n",
1479 zone->name, zone->stats.total_allocated);
1482 fprintf (stderr, "\n");
1484 fprintf (stderr, "Total Overhead: %10lld\n",
1485 all_overhead);
1486 fprintf (stderr, "Total Allocated: %10lld\n",
1487 all_allocated);
1489 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
1490 all_overhead_under32);
1491 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
1492 all_allocated_under32);
1493 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
1494 all_overhead_under64);
1495 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
1496 all_allocated_under64);
1497 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
1498 all_overhead_under128);
1499 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
1500 all_allocated_under128);
1502 #endif
1505 struct ggc_pch_data
1507 struct ggc_pch_ondisk
1509 unsigned total;
1510 } d;
1511 size_t base;
1512 size_t written;
1515 /* Initialize the PCH data structure. */
1517 struct ggc_pch_data *
1518 init_ggc_pch (void)
1520 return xcalloc (sizeof (struct ggc_pch_data), 1);
1523 /* Add the size of object X to the size of the PCH data. */
1525 void
1526 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1527 size_t size, bool is_string)
1529 if (!is_string)
1531 d->d.total += size + CHUNK_OVERHEAD;
1533 else
1534 d->d.total += size;
1537 /* Return the total size of the PCH data. */
1539 size_t
1540 ggc_pch_total_size (struct ggc_pch_data *d)
1542 return d->d.total;
1545 /* Set the base address for the objects in the PCH file. */
1547 void
1548 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1550 d->base = (size_t) base;
1553 /* Allocate a place for object X of size SIZE in the PCH file. */
1555 char *
1556 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1557 size_t size, bool is_string)
1559 char *result;
1560 result = (char *)d->base;
1561 if (!is_string)
1563 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1564 if (chunk->large)
1565 d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1566 else
1567 d->base += chunk->size + CHUNK_OVERHEAD;
1568 return result + CHUNK_OVERHEAD;
1570 else
1572 d->base += size;
1573 return result;
1578 /* Prepare to write out the PCH data to file F. */
1580 void
1581 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1582 FILE *f ATTRIBUTE_UNUSED)
1584 /* Nothing to do. */
1587 /* Write out object X of SIZE to file F. */
1589 void
1590 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1591 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1592 size_t size, bool is_string)
1594 if (!is_string)
1596 struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1597 size = ggc_get_size (x);
1598 if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1599 fatal_error ("can't write PCH file: %m");
1600 d->written += size + CHUNK_OVERHEAD;
1602 else
1604 if (fwrite (x, size, 1, f) != 1)
1605 fatal_error ("can't write PCH file: %m");
1606 d->written += size;
1610 void
1611 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1613 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1614 fatal_error ("can't write PCH file: %m");
1615 free (d);
1617 void
1618 ggc_pch_read (FILE *f, void *addr)
1620 struct ggc_pch_ondisk d;
1621 struct page_entry *entry;
1622 struct alloc_zone *pch_zone;
1623 if (fread (&d, sizeof (d), 1, f) != 1)
1624 fatal_error ("can't read PCH file: %m");
1625 entry = xcalloc (1, sizeof (struct page_entry));
1626 entry->bytes = d.total;
1627 entry->page = addr;
1628 entry->context_depth = 0;
1629 pch_zone = new_ggc_zone ("PCH zone");
1630 entry->zone = pch_zone;
1631 entry->next = entry->zone->pages;
1632 entry->zone->pages = entry;