2013-01-15 Joseph Myers <joseph@codesourcery.com>
[official-gcc.git] / gcc / ggc-zone.c
blobb3896af5557e460582b726912495366f9877589f
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999-2013 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
6 <dan@codesourcery.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "diagnostic-core.h"
32 #include "flags.h"
33 #include "ggc.h"
34 #include "ggc-internal.h"
35 #include "timevar.h"
36 #include "params.h"
37 #include "bitmap.h"
38 #include "plugin.h"
40 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
41 file open. Prefer either to valloc. */
42 #ifdef HAVE_MMAP_ANON
43 # undef HAVE_MMAP_DEV_ZERO
44 # define USING_MMAP
45 #endif
47 #ifdef HAVE_MMAP_DEV_ZERO
48 # define USING_MMAP
49 #endif
51 #ifndef USING_MMAP
52 #error Zone collector requires mmap
53 #endif
55 #if (GCC_VERSION < 3001)
56 #define prefetch(X) ((void) X)
57 #define prefetchw(X) ((void) X)
58 #else
59 #define prefetch(X) __builtin_prefetch (X)
60 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
61 #endif
63 /* FUTURE NOTES:
65 If we track inter-zone pointers, we can mark single zones at a
66 time.
68 If we have a zone where we guarantee no inter-zone pointers, we
69 could mark that zone separately.
71 The garbage zone should not be marked, and we should return 1 in
72 ggc_set_mark for any object in the garbage zone, which cuts off
73 marking quickly. */
75 /* Strategy:
77 This garbage-collecting allocator segregates objects into zones.
78 It also segregates objects into "large" and "small" bins. Large
79 objects are greater than page size.
81 Pages for small objects are broken up into chunks. The page has
82 a bitmap which marks the start position of each chunk (whether
83 allocated or free). Free chunks are on one of the zone's free
84 lists and contain a pointer to the next free chunk. Chunks in
85 most of the free lists have a fixed size determined by the
86 free list. Chunks in the "other" sized free list have their size
87 stored right after their chain pointer.
89 Empty pages (of all sizes) are kept on a single page cache list,
90 and are considered first when new pages are required; they are
91 deallocated at the start of the next collection if they haven't
92 been recycled by then. The free page list is currently per-zone. */
94 /* Define GGC_DEBUG_LEVEL to print debugging information.
95 0: No debugging output.
96 1: GC statistics only.
97 2: Page-entry allocations/deallocations as well.
98 3: Object allocations as well.
99 4: Object marks as well. */
100 #define GGC_DEBUG_LEVEL (0)
102 #ifndef HOST_BITS_PER_PTR
103 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
104 #endif
106 /* This structure manages small free chunks. The SIZE field is only
107 initialized if the chunk is in the "other" sized free list. Large
108 chunks are allocated one at a time to their own page, and so don't
109 come in here. */
111 struct alloc_chunk {
112 struct alloc_chunk *next_free;
113 unsigned int size;
116 /* The size of the fixed-size portion of a small page descriptor. */
117 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
119 /* The collector's idea of the page size. This must be a power of two
120 no larger than the system page size, because pages must be aligned
121 to this amount and are tracked at this granularity in the page
122 table. We choose a size at compile time for efficiency.
124 We could make a better guess at compile time if PAGE_SIZE is a
125 constant in system headers, and PAGE_SHIFT is defined... */
126 #define GGC_PAGE_SIZE 4096
127 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
128 #define GGC_PAGE_SHIFT 12
130 #if 0
131 /* Alternative definitions which use the runtime page size. */
132 #define GGC_PAGE_SIZE G.pagesize
133 #define GGC_PAGE_MASK G.page_mask
134 #define GGC_PAGE_SHIFT G.lg_pagesize
135 #endif
137 /* The size of a small page managed by the garbage collector. This
138 must currently be GGC_PAGE_SIZE, but with a few changes could
139 be any multiple of it to reduce certain kinds of overhead. */
140 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
142 /* Free bin information. These numbers may be in need of re-tuning.
143 In general, decreasing the number of free bins would seem to
144 increase the time it takes to allocate... */
146 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
147 today. */
149 #define NUM_FREE_BINS 64
150 #define FREE_BIN_DELTA MAX_ALIGNMENT
151 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
153 /* Allocation and marking parameters. */
155 /* The smallest allocatable unit to keep track of. */
156 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
158 /* The smallest markable unit. If we require each allocated object
159 to contain at least two allocatable units, we can use half as many
160 bits for the mark bitmap. But this adds considerable complexity
161 to sweeping. */
162 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
164 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
166 /* We use this structure to determine the alignment required for
167 allocations.
169 There are several things wrong with this estimation of alignment.
171 The maximum alignment for a structure is often less than the
172 maximum alignment for a basic data type; for instance, on some
173 targets long long must be aligned to sizeof (int) in a structure
174 and sizeof (long long) in a variable. i386-linux is one example;
175 Darwin is another (sometimes, depending on the compiler in use).
177 Also, long double is not included. Nothing in GCC uses long
178 double, so we assume that this is OK. On powerpc-darwin, adding
179 long double would bring the maximum alignment up to 16 bytes,
180 and until we need long double (or to vectorize compiler operations)
181 that's painfully wasteful. This will need to change, some day. */
183 struct max_alignment {
184 char c;
185 union {
186 HOST_WIDEST_INT i;
187 double d;
188 } u;
191 /* The biggest alignment required. */
193 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
195 /* Compute the smallest multiple of F that is >= X. */
197 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
199 /* Types to use for the allocation and mark bitmaps. It might be
200 a good idea to add ffsl to libiberty and use unsigned long
201 instead; that could speed us up where long is wider than int. */
203 typedef unsigned int alloc_type;
204 typedef unsigned int mark_type;
205 #define alloc_ffs(x) ffs(x)
207 /* A page_entry records the status of an allocation page. This is the
208 common data between all three kinds of pages - small, large, and
209 PCH. */
210 typedef struct page_entry
212 /* The address at which the memory is allocated. */
213 char *page;
215 /* The zone that this page entry belongs to. */
216 struct alloc_zone *zone;
218 /* How many collections we've survived. */
219 size_t survived;
221 /* Does this page contain small objects, or one large object? */
222 bool large_p;
224 /* Is this page part of the loaded PCH? */
225 bool pch_p;
226 } page_entry;
228 /* Additional data needed for small pages. */
229 struct small_page_entry
231 struct page_entry common;
233 /* The next small page entry, or NULL if this is the last. */
234 struct small_page_entry *next;
236 /* If currently marking this zone, a pointer to the mark bits
237 for this page. If we aren't currently marking this zone,
238 this pointer may be stale (pointing to freed memory). */
239 mark_type *mark_bits;
241 /* The allocation bitmap. This array extends far enough to have
242 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
243 alloc_type alloc_bits[1];
246 /* Additional data needed for large pages. */
247 struct large_page_entry
249 struct page_entry common;
251 /* The next large page entry, or NULL if this is the last. */
252 struct large_page_entry *next;
254 /* The number of bytes allocated, not including the page entry. */
255 size_t bytes;
257 /* The previous page in the list, so that we can unlink this one. */
258 struct large_page_entry *prev;
260 /* During marking, is this object marked? */
261 bool mark_p;
264 /* A two-level tree is used to look up the page-entry for a given
265 pointer. Two chunks of the pointer's bits are extracted to index
266 the first and second levels of the tree, as follows:
268 HOST_PAGE_SIZE_BITS
269 32 | |
270 msb +----------------+----+------+------+ lsb
271 | | |
272 PAGE_L1_BITS |
274 PAGE_L2_BITS
276 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
277 pages are aligned on system page boundaries. The next most
278 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
279 index values in the lookup table, respectively.
281 For 32-bit architectures and the settings below, there are no
282 leftover bits. For architectures with wider pointers, the lookup
283 tree points to a list of pages, which must be scanned to find the
284 correct one. */
286 #define PAGE_L1_BITS (8)
287 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
288 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
289 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
291 #define LOOKUP_L1(p) \
292 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
294 #define LOOKUP_L2(p) \
295 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
297 #if HOST_BITS_PER_PTR <= 32
299 /* On 32-bit hosts, we use a two level page table, as pictured above. */
300 typedef page_entry **page_table[PAGE_L1_SIZE];
302 #else
304 /* On 64-bit hosts, we use the same two level page tables plus a linked
305 list that disambiguates the top 32-bits. There will almost always be
306 exactly one entry in the list. */
307 typedef struct page_table_chain
309 struct page_table_chain *next;
310 size_t high_bits;
311 page_entry **table[PAGE_L1_SIZE];
312 } *page_table;
314 #endif
316 /* The global variables. */
317 static struct globals
319 /* The linked list of zones. */
320 struct alloc_zone *zones;
322 /* Lookup table for associating allocation pages with object addresses. */
323 page_table lookup;
325 /* The system's page size, and related constants. */
326 size_t pagesize;
327 size_t lg_pagesize;
328 size_t page_mask;
330 /* The size to allocate for a small page entry. This includes
331 the size of the structure and the size of the allocation
332 bitmap. */
333 size_t small_page_overhead;
335 #if defined (HAVE_MMAP_DEV_ZERO)
336 /* A file descriptor open to /dev/zero for reading. */
337 int dev_zero_fd;
338 #endif
340 /* Allocate pages in chunks of this size, to throttle calls to memory
341 allocation routines. The first page is used, the rest go onto the
342 free list. */
343 size_t quire_size;
345 /* The file descriptor for debugging output. */
346 FILE *debug_file;
347 } G;
349 /* A zone allocation structure. There is one of these for every
350 distinct allocation zone. */
351 struct alloc_zone
353 /* The most recent free chunk is saved here, instead of in the linked
354 free list, to decrease list manipulation. It is most likely that we
355 will want this one. */
356 char *cached_free;
357 size_t cached_free_size;
359 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
360 FREE_BIN_DELTA. All other chunks are in slot 0. */
361 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
363 /* The highest bin index which might be non-empty. It may turn out
364 to be empty, in which case we have to search downwards. */
365 size_t high_free_bin;
367 /* Bytes currently allocated in this zone. */
368 size_t allocated;
370 /* Linked list of the small pages in this zone. */
371 struct small_page_entry *pages;
373 /* Doubly linked list of large pages in this zone. */
374 struct large_page_entry *large_pages;
376 /* If we are currently marking this zone, a pointer to the mark bits. */
377 mark_type *mark_bits;
379 /* Name of the zone. */
380 const char *name;
382 /* The number of small pages currently allocated in this zone. */
383 size_t n_small_pages;
385 /* Bytes allocated at the end of the last collection. */
386 size_t allocated_last_gc;
388 /* Total amount of memory mapped. */
389 size_t bytes_mapped;
391 /* A cache of free system pages. */
392 struct small_page_entry *free_pages;
394 /* Next zone in the linked list of zones. */
395 struct alloc_zone *next_zone;
397 /* True if this zone was collected during this collection. */
398 bool was_collected;
400 /* True if this zone should be destroyed after the next collection. */
401 bool dead;
403 struct
405 /* Total GC-allocated memory. */
406 unsigned long long total_allocated;
407 /* Total overhead for GC-allocated memory. */
408 unsigned long long total_overhead;
410 /* Total allocations and overhead for sizes less than 32, 64 and 128.
411 These sizes are interesting because they are typical cache line
412 sizes. */
414 unsigned long long total_allocated_under32;
415 unsigned long long total_overhead_under32;
417 unsigned long long total_allocated_under64;
418 unsigned long long total_overhead_under64;
420 unsigned long long total_allocated_under128;
421 unsigned long long total_overhead_under128;
422 } stats;
423 } main_zone;
425 /* Some default zones. */
426 struct alloc_zone rtl_zone;
427 struct alloc_zone tree_zone;
428 struct alloc_zone tree_id_zone;
430 /* The PCH zone does not need a normal zone structure, and it does
431 not live on the linked list of zones. */
432 struct pch_zone
434 /* The start of the PCH zone. NULL if there is none. */
435 char *page;
437 /* The end of the PCH zone. NULL if there is none. */
438 char *end;
440 /* The size of the PCH zone. 0 if there is none. */
441 size_t bytes;
443 /* The allocation bitmap for the PCH zone. */
444 alloc_type *alloc_bits;
446 /* If we are currently marking, the mark bitmap for the PCH zone.
447 When it is first read in, we could avoid marking the PCH,
448 because it will not contain any pointers to GC memory outside
449 of the PCH; however, the PCH is currently mapped as writable,
450 so we must mark it in case new pointers are added. */
451 mark_type *mark_bits;
452 } pch_zone;
454 #ifdef USING_MMAP
455 static char *alloc_anon (char *, size_t, struct alloc_zone *);
456 #endif
457 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
458 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
459 static void free_chunk (char *, size_t, struct alloc_zone *);
460 static void free_small_page (struct small_page_entry *);
461 static void free_large_page (struct large_page_entry *);
462 static void release_pages (struct alloc_zone *);
463 static void sweep_pages (struct alloc_zone *);
464 static bool ggc_collect_1 (struct alloc_zone *, bool);
465 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
467 /* Traverse the page table and find the entry for a page.
468 Die (probably) if the object wasn't allocated via GC. */
470 static inline page_entry *
471 lookup_page_table_entry (const void *p)
473 page_entry ***base;
474 size_t L1, L2;
476 #if HOST_BITS_PER_PTR <= 32
477 base = &G.lookup[0];
478 #else
479 page_table table = G.lookup;
480 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
481 while (table->high_bits != high_bits)
482 table = table->next;
483 base = &table->table[0];
484 #endif
486 /* Extract the level 1 and 2 indices. */
487 L1 = LOOKUP_L1 (p);
488 L2 = LOOKUP_L2 (p);
490 return base[L1][L2];
493 /* Traverse the page table and find the entry for a page.
494 Return NULL if the object wasn't allocated via the GC. */
496 static inline page_entry *
497 lookup_page_table_if_allocated (const void *p)
499 page_entry ***base;
500 size_t L1, L2;
502 #if HOST_BITS_PER_PTR <= 32
503 base = &G.lookup[0];
504 #else
505 page_table table = G.lookup;
506 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
507 while (1)
509 if (table == NULL)
510 return NULL;
511 if (table->high_bits == high_bits)
512 break;
513 table = table->next;
515 base = &table->table[0];
516 #endif
518 /* Extract the level 1 and 2 indices. */
519 L1 = LOOKUP_L1 (p);
520 if (! base[L1])
521 return NULL;
523 L2 = LOOKUP_L2 (p);
524 if (L2 >= PAGE_L2_SIZE)
525 return NULL;
526 /* We might have a page entry which does not correspond exactly to a
527 system page. */
528 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
529 return NULL;
531 return base[L1][L2];
534 /* Set the page table entry for the page that starts at P. If ENTRY
535 is NULL, clear the entry. */
537 static void
538 set_page_table_entry (void *p, page_entry *entry)
540 page_entry ***base;
541 size_t L1, L2;
543 #if HOST_BITS_PER_PTR <= 32
544 base = &G.lookup[0];
545 #else
546 page_table table;
547 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
548 for (table = G.lookup; table; table = table->next)
549 if (table->high_bits == high_bits)
550 goto found;
552 /* Not found -- allocate a new table. */
553 table = XCNEW (struct page_table_chain);
554 table->next = G.lookup;
555 table->high_bits = high_bits;
556 G.lookup = table;
557 found:
558 base = &table->table[0];
559 #endif
561 /* Extract the level 1 and 2 indices. */
562 L1 = LOOKUP_L1 (p);
563 L2 = LOOKUP_L2 (p);
565 if (base[L1] == NULL)
566 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
568 base[L1][L2] = entry;
571 /* Find the page table entry associated with OBJECT. */
573 static inline struct page_entry *
574 zone_get_object_page (const void *object)
576 return lookup_page_table_entry (object);
579 /* Find which element of the alloc_bits array OBJECT should be
580 recorded in. */
581 static inline unsigned int
582 zone_get_object_alloc_word (const void *object)
584 return (((size_t) object & (GGC_PAGE_SIZE - 1))
585 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
588 /* Find which bit of the appropriate word in the alloc_bits array
589 OBJECT should be recorded in. */
590 static inline unsigned int
591 zone_get_object_alloc_bit (const void *object)
593 return (((size_t) object / BYTES_PER_ALLOC_BIT)
594 % (8 * sizeof (alloc_type)));
597 /* Find which element of the mark_bits array OBJECT should be recorded
598 in. */
599 static inline unsigned int
600 zone_get_object_mark_word (const void *object)
602 return (((size_t) object & (GGC_PAGE_SIZE - 1))
603 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
606 /* Find which bit of the appropriate word in the mark_bits array
607 OBJECT should be recorded in. */
608 static inline unsigned int
609 zone_get_object_mark_bit (const void *object)
611 return (((size_t) object / BYTES_PER_MARK_BIT)
612 % (8 * sizeof (mark_type)));
615 /* Set the allocation bit corresponding to OBJECT in its page's
616 bitmap. Used to split this object from the preceding one. */
617 static inline void
618 zone_set_object_alloc_bit (const void *object)
620 struct small_page_entry *page
621 = (struct small_page_entry *) zone_get_object_page (object);
622 unsigned int start_word = zone_get_object_alloc_word (object);
623 unsigned int start_bit = zone_get_object_alloc_bit (object);
625 page->alloc_bits[start_word] |= 1L << start_bit;
628 /* Clear the allocation bit corresponding to OBJECT in PAGE's
629 bitmap. Used to coalesce this object with the preceding
630 one. */
631 static inline void
632 zone_clear_object_alloc_bit (struct small_page_entry *page,
633 const void *object)
635 unsigned int start_word = zone_get_object_alloc_word (object);
636 unsigned int start_bit = zone_get_object_alloc_bit (object);
638 /* Would xor be quicker? */
639 page->alloc_bits[start_word] &= ~(1L << start_bit);
642 /* Find the size of the object which starts at START_WORD and
643 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
644 Helper function for ggc_get_size and zone_find_object_size. */
646 static inline size_t
647 zone_object_size_1 (alloc_type *alloc_bits,
648 size_t start_word, size_t start_bit,
649 size_t max_size)
651 size_t size;
652 alloc_type alloc_word;
653 int indx;
655 /* Load the first word. */
656 alloc_word = alloc_bits[start_word++];
658 /* If that was the last bit in this word, we'll want to continue
659 with the next word. Otherwise, handle the rest of this word. */
660 if (start_bit)
662 indx = alloc_ffs (alloc_word >> start_bit);
663 if (indx)
664 /* indx is 1-based. We started at the bit after the object's
665 start, but we also ended at the bit after the object's end.
666 It cancels out. */
667 return indx * BYTES_PER_ALLOC_BIT;
669 /* The extra 1 accounts for the starting unit, before start_bit. */
670 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
672 if (size >= max_size)
673 return max_size;
675 alloc_word = alloc_bits[start_word++];
677 else
678 size = BYTES_PER_ALLOC_BIT;
680 while (alloc_word == 0)
682 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
683 if (size >= max_size)
684 return max_size;
685 alloc_word = alloc_bits[start_word++];
688 indx = alloc_ffs (alloc_word);
689 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
692 /* Find the size of OBJECT on small page PAGE. */
694 static inline size_t
695 zone_find_object_size (struct small_page_entry *page,
696 const void *object)
698 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
699 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
700 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
701 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
702 - (const char *) object);
704 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
705 max_size);
708 /* highest_bit assumes that alloc_type is 32 bits. */
709 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
711 /* Find the highest set bit in VALUE. Returns the bit number of that
712 bit, using the same values as ffs. */
713 static inline alloc_type
714 highest_bit (alloc_type value)
716 /* This also assumes that alloc_type is unsigned. */
717 value |= value >> 1;
718 value |= value >> 2;
719 value |= value >> 4;
720 value |= value >> 8;
721 value |= value >> 16;
722 value = value ^ (value >> 1);
723 return alloc_ffs (value);
726 /* Find the offset from the start of an object to P, which may point
727 into the interior of the object. */
729 static unsigned long
730 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
731 size_t start_bit)
733 unsigned int offset_in_bits;
734 alloc_type alloc_word = alloc_bits[start_word];
736 /* Mask off any bits after the initial bit, but make sure to include
737 the initial bit in the result. Note that START_BIT is
738 0-based. */
739 if (start_bit < 8 * sizeof (alloc_type) - 1)
740 alloc_word &= (1 << (start_bit + 1)) - 1;
741 offset_in_bits = start_bit;
743 /* Search for the start of the object. */
744 while (alloc_word == 0 && start_word > 0)
746 alloc_word = alloc_bits[--start_word];
747 offset_in_bits += 8 * sizeof (alloc_type);
749 /* We must always find a set bit. */
750 gcc_assert (alloc_word != 0);
751 /* Note that the result of highest_bit is 1-based. */
752 offset_in_bits -= highest_bit (alloc_word) - 1;
754 return BYTES_PER_ALLOC_BIT * offset_in_bits;
757 /* Allocate the mark bits for every zone, and set the pointers on each
758 page. */
759 static void
760 zone_allocate_marks (void)
762 struct alloc_zone *zone;
764 for (zone = G.zones; zone; zone = zone->next_zone)
766 struct small_page_entry *page;
767 mark_type *cur_marks;
768 size_t mark_words, mark_words_per_page;
769 #ifdef ENABLE_CHECKING
770 size_t n = 0;
771 #endif
773 mark_words_per_page
774 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
775 mark_words = zone->n_small_pages * mark_words_per_page;
776 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
777 mark_words);
778 cur_marks = zone->mark_bits;
779 for (page = zone->pages; page; page = page->next)
781 page->mark_bits = cur_marks;
782 cur_marks += mark_words_per_page;
783 #ifdef ENABLE_CHECKING
784 n++;
785 #endif
787 gcc_checking_assert (n == zone->n_small_pages);
790 /* We don't collect the PCH zone, but we do have to mark it
791 (for now). */
792 if (pch_zone.bytes)
793 pch_zone.mark_bits
794 = (mark_type *) xcalloc (sizeof (mark_type),
795 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
798 /* After marking and sweeping, release the memory used for mark bits. */
799 static void
800 zone_free_marks (void)
802 struct alloc_zone *zone;
804 for (zone = G.zones; zone; zone = zone->next_zone)
805 if (zone->mark_bits)
807 free (zone->mark_bits);
808 zone->mark_bits = NULL;
811 if (pch_zone.bytes)
813 free (pch_zone.mark_bits);
814 pch_zone.mark_bits = NULL;
818 #ifdef USING_MMAP
819 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
820 (if non-null). The ifdef structure here is intended to cause a
821 compile error unless exactly one of the HAVE_* is defined. */
823 static inline char *
824 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
826 #ifdef HAVE_MMAP_ANON
827 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
828 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
829 #endif
830 #ifdef HAVE_MMAP_DEV_ZERO
831 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
832 MAP_PRIVATE, G.dev_zero_fd, 0);
833 #endif
835 if (page == (char *) MAP_FAILED)
837 perror ("virtual memory exhausted");
838 exit (FATAL_EXIT_CODE);
841 /* Remember that we allocated this memory. */
842 zone->bytes_mapped += size;
844 /* Pretend we don't have access to the allocated pages. We'll enable
845 access to smaller pieces of the area in ggc_internal_alloc. Discard the
846 handle to avoid handle leak. */
847 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
849 return page;
851 #endif
853 /* Allocate a new page for allocating small objects in ZONE, and
854 return an entry for it. */
856 static struct small_page_entry *
857 alloc_small_page (struct alloc_zone *zone)
859 struct small_page_entry *entry;
861 /* Check the list of free pages for one we can use. */
862 entry = zone->free_pages;
863 if (entry != NULL)
865 /* Recycle the allocated memory from this page ... */
866 zone->free_pages = entry->next;
868 else
870 /* We want just one page. Allocate a bunch of them and put the
871 extras on the freelist. (Can only do this optimization with
872 mmap for backing store.) */
873 struct small_page_entry *e, *f = zone->free_pages;
874 int i;
875 char *page;
877 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
879 /* This loop counts down so that the chain will be in ascending
880 memory order. */
881 for (i = G.quire_size - 1; i >= 1; i--)
883 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
884 e->common.page = page + (i << GGC_PAGE_SHIFT);
885 e->common.zone = zone;
886 e->next = f;
887 f = e;
888 set_page_table_entry (e->common.page, &e->common);
891 zone->free_pages = f;
893 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
894 entry->common.page = page;
895 entry->common.zone = zone;
896 set_page_table_entry (page, &entry->common);
899 zone->n_small_pages++;
901 if (GGC_DEBUG_LEVEL >= 2)
902 fprintf (G.debug_file,
903 "Allocating %s page at %p, data %p-%p\n",
904 entry->common.zone->name, (PTR) entry, entry->common.page,
905 entry->common.page + SMALL_PAGE_SIZE - 1);
907 return entry;
910 /* Allocate a large page of size SIZE in ZONE. */
912 static struct large_page_entry *
913 alloc_large_page (size_t size, struct alloc_zone *zone)
915 struct large_page_entry *entry;
916 char *page;
917 size_t needed_size;
919 needed_size = size + sizeof (struct large_page_entry);
920 page = XNEWVAR (char, needed_size);
922 entry = (struct large_page_entry *) page;
924 entry->next = NULL;
925 entry->common.page = page + sizeof (struct large_page_entry);
926 entry->common.large_p = true;
927 entry->common.pch_p = false;
928 entry->common.zone = zone;
929 entry->common.survived = 0;
930 entry->mark_p = false;
931 entry->bytes = size;
932 entry->prev = NULL;
934 set_page_table_entry (entry->common.page, &entry->common);
936 if (GGC_DEBUG_LEVEL >= 2)
937 fprintf (G.debug_file,
938 "Allocating %s large page at %p, data %p-%p\n",
939 entry->common.zone->name, (PTR) entry, entry->common.page,
940 entry->common.page + SMALL_PAGE_SIZE - 1);
942 return entry;
946 /* For a page that is no longer needed, put it on the free page list. */
948 static inline void
949 free_small_page (struct small_page_entry *entry)
951 if (GGC_DEBUG_LEVEL >= 2)
952 fprintf (G.debug_file,
953 "Deallocating %s page at %p, data %p-%p\n",
954 entry->common.zone->name, (PTR) entry,
955 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
957 gcc_assert (!entry->common.large_p);
959 /* Mark the page as inaccessible. Discard the handle to
960 avoid handle leak. */
961 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
962 SMALL_PAGE_SIZE));
964 entry->next = entry->common.zone->free_pages;
965 entry->common.zone->free_pages = entry;
966 entry->common.zone->n_small_pages--;
969 /* Release a large page that is no longer needed. */
971 static inline void
972 free_large_page (struct large_page_entry *entry)
974 if (GGC_DEBUG_LEVEL >= 2)
975 fprintf (G.debug_file,
976 "Deallocating %s page at %p, data %p-%p\n",
977 entry->common.zone->name, (PTR) entry,
978 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
980 gcc_assert (entry->common.large_p);
982 set_page_table_entry (entry->common.page, NULL);
983 free (entry);
986 /* Release the free page cache to the system. */
988 static void
989 release_pages (struct alloc_zone *zone)
991 #ifdef USING_MMAP
992 struct small_page_entry *p, *next;
993 char *start;
994 size_t len;
996 /* Gather up adjacent pages so they are unmapped together. */
997 p = zone->free_pages;
999 while (p)
1001 start = p->common.page;
1002 next = p->next;
1003 len = SMALL_PAGE_SIZE;
1004 set_page_table_entry (p->common.page, NULL);
1005 p = next;
1007 while (p && p->common.page == start + len)
1009 next = p->next;
1010 len += SMALL_PAGE_SIZE;
1011 set_page_table_entry (p->common.page, NULL);
1012 p = next;
1015 munmap (start, len);
1016 zone->bytes_mapped -= len;
1019 zone->free_pages = NULL;
1020 #endif
1023 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1025 static inline void
1026 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1028 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1029 size_t bin = 0;
1031 bin = SIZE_BIN_DOWN (size);
1032 gcc_assert (bin != 0);
1033 if (bin > NUM_FREE_BINS)
1035 bin = 0;
1036 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1037 sizeof (struct
1038 alloc_chunk)));
1039 chunk->size = size;
1040 chunk->next_free = zone->free_chunks[bin];
1041 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1042 + sizeof (struct
1043 alloc_chunk),
1044 size
1045 - sizeof (struct
1046 alloc_chunk)));
1048 else
1050 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1051 sizeof (struct
1052 alloc_chunk *)));
1053 chunk->next_free = zone->free_chunks[bin];
1054 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1055 + sizeof (struct
1056 alloc_chunk *),
1057 size
1058 - sizeof (struct
1059 alloc_chunk *)));
1062 zone->free_chunks[bin] = chunk;
1063 if (bin > zone->high_free_bin)
1064 zone->high_free_bin = bin;
1065 if (GGC_DEBUG_LEVEL >= 3)
1066 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1069 /* For a given size of memory requested for allocation, return the
1070 actual size that is going to be allocated. */
1072 size_t
1073 ggc_round_alloc_size (size_t requested_size)
1075 size_t size;
1077 /* Make sure that zero-sized allocations get a unique and freeable
1078 pointer. */
1079 if (requested_size == 0)
1080 size = MAX_ALIGNMENT;
1081 else
1082 size = (requested_size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1084 return size;
1087 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1089 void *
1090 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1091 MEM_STAT_DECL)
1093 size_t bin;
1094 size_t csize;
1095 struct small_page_entry *entry;
1096 struct alloc_chunk *chunk, **pp;
1097 void *result;
1098 size_t size = ggc_round_alloc_size (orig_size);
1100 /* Try to allocate the object from several different sources. Each
1101 of these cases is responsible for setting RESULT and SIZE to
1102 describe the allocated block, before jumping to FOUND. If a
1103 chunk is split, the allocate bit for the new chunk should also be
1104 set.
1106 Large objects are handled specially. However, they'll just fail
1107 the next couple of conditions, so we can wait to check for them
1108 below. The large object case is relatively rare (< 1%), so this
1109 is a win. */
1111 /* First try to split the last chunk we allocated. For best
1112 fragmentation behavior it would be better to look for a
1113 free bin of the appropriate size for a small object. However,
1114 we're unlikely (1% - 7%) to find one, and this gives better
1115 locality behavior anyway. This case handles the lion's share
1116 of all calls to this function. */
1117 if (size <= zone->cached_free_size)
1119 result = zone->cached_free;
1121 zone->cached_free_size -= size;
1122 if (zone->cached_free_size)
1124 zone->cached_free += size;
1125 zone_set_object_alloc_bit (zone->cached_free);
1128 goto found;
1131 /* Next, try to find a free bin of the exactly correct size. */
1133 /* We want to round SIZE up, rather than down, but we know it's
1134 already aligned to at least FREE_BIN_DELTA, so we can just
1135 shift. */
1136 bin = SIZE_BIN_DOWN (size);
1138 if (bin <= NUM_FREE_BINS
1139 && (chunk = zone->free_chunks[bin]) != NULL)
1141 /* We have a chunk of the right size. Pull it off the free list
1142 and use it. */
1144 zone->free_chunks[bin] = chunk->next_free;
1146 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1147 == FREE_BIN_DELTA. */
1148 result = chunk;
1150 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1151 may now be wrong, if this was the last chunk in the high bin.
1152 Rather than fixing it up now, wait until we need to search
1153 the free bins. */
1155 goto found;
1158 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1159 to split. We can find one in the too-big bin, or in the largest
1160 sized bin with a chunk in it. Try the largest normal-sized bin
1161 first. */
1163 if (zone->high_free_bin > bin)
1165 /* Find the highest numbered free bin. It will be at or below
1166 the watermark. */
1167 while (zone->high_free_bin > bin
1168 && zone->free_chunks[zone->high_free_bin] == NULL)
1169 zone->high_free_bin--;
1171 if (zone->high_free_bin > bin)
1173 size_t tbin = zone->high_free_bin;
1174 chunk = zone->free_chunks[tbin];
1176 /* Remove the chunk from its previous bin. */
1177 zone->free_chunks[tbin] = chunk->next_free;
1179 result = (char *) chunk;
1181 /* Save the rest of the chunk for future allocation. */
1182 if (zone->cached_free_size)
1183 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1185 chunk = (struct alloc_chunk *) ((char *) result + size);
1186 zone->cached_free = (char *) chunk;
1187 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1189 /* Mark the new free chunk as an object, so that we can
1190 find the size of the newly allocated object. */
1191 zone_set_object_alloc_bit (chunk);
1193 /* HIGH_FREE_BIN may now be wrong, if this was the last
1194 chunk in the high bin. Rather than fixing it up now,
1195 wait until we need to search the free bins. */
1197 goto found;
1201 /* Failing that, look through the "other" bucket for a chunk
1202 that is large enough. */
1203 pp = &(zone->free_chunks[0]);
1204 chunk = *pp;
1205 while (chunk && chunk->size < size)
1207 pp = &chunk->next_free;
1208 chunk = *pp;
1211 if (chunk)
1213 /* Remove the chunk from its previous bin. */
1214 *pp = chunk->next_free;
1216 result = (char *) chunk;
1218 /* Save the rest of the chunk for future allocation, if there's any
1219 left over. */
1220 csize = chunk->size;
1221 if (csize > size)
1223 if (zone->cached_free_size)
1224 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1226 chunk = (struct alloc_chunk *) ((char *) result + size);
1227 zone->cached_free = (char *) chunk;
1228 zone->cached_free_size = csize - size;
1230 /* Mark the new free chunk as an object. */
1231 zone_set_object_alloc_bit (chunk);
1234 goto found;
1237 /* Handle large allocations. We could choose any threshold between
1238 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1239 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1240 be guaranteed to have a unique entry in the lookup table. Large
1241 allocations will always fall through to here. */
1242 if (size > GGC_PAGE_SIZE)
1244 struct large_page_entry *entry = alloc_large_page (size, zone);
1246 entry->common.survived = 0;
1248 entry->next = zone->large_pages;
1249 if (zone->large_pages)
1250 zone->large_pages->prev = entry;
1251 zone->large_pages = entry;
1253 result = entry->common.page;
1255 goto found;
1258 /* Failing everything above, allocate a new small page. */
1260 entry = alloc_small_page (zone);
1261 entry->next = zone->pages;
1262 zone->pages = entry;
1264 /* Mark the first chunk in the new page. */
1265 entry->alloc_bits[0] = 1;
1267 result = entry->common.page;
1268 if (size < SMALL_PAGE_SIZE)
1270 if (zone->cached_free_size)
1271 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1273 zone->cached_free = (char *) result + size;
1274 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1276 /* Mark the new free chunk as an object. */
1277 zone_set_object_alloc_bit (zone->cached_free);
1280 found:
1282 /* We could save TYPE in the chunk, but we don't use that for
1283 anything yet. If we wanted to, we could do it by adding it
1284 either before the beginning of the chunk or after its end,
1285 and adjusting the size and pointer appropriately. */
1287 /* We'll probably write to this after we return. */
1288 prefetchw (result);
1290 #ifdef ENABLE_GC_CHECKING
1291 /* `Poison' the entire allocated object. */
1292 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1293 memset (result, 0xaf, size);
1294 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1295 size - orig_size));
1296 #endif
1298 /* Tell Valgrind that the memory is there, but its content isn't
1299 defined. The bytes at the end of the object are still marked
1300 unaccessible. */
1301 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1303 /* Keep track of how many bytes are being allocated. This
1304 information is used in deciding when to collect. */
1305 zone->allocated += size;
1307 timevar_ggc_mem_total += size;
1309 if (GATHER_STATISTICS)
1310 ggc_record_overhead (orig_size, size - orig_size, result FINAL_PASS_MEM_STAT);
1313 size_t object_size = size;
1314 size_t overhead = object_size - orig_size;
1316 zone->stats.total_overhead += overhead;
1317 zone->stats.total_allocated += object_size;
1319 if (orig_size <= 32)
1321 zone->stats.total_overhead_under32 += overhead;
1322 zone->stats.total_allocated_under32 += object_size;
1324 if (orig_size <= 64)
1326 zone->stats.total_overhead_under64 += overhead;
1327 zone->stats.total_allocated_under64 += object_size;
1329 if (orig_size <= 128)
1331 zone->stats.total_overhead_under128 += overhead;
1332 zone->stats.total_allocated_under128 += object_size;
1335 #endif
1337 if (GGC_DEBUG_LEVEL >= 3)
1338 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1339 (unsigned long) size, result);
1341 return result;
1344 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1345 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1347 void *
1348 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1349 struct alloc_zone *zone MEM_STAT_DECL)
1351 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1352 memset (result, 0, orig_size);
1353 return result;
1357 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1358 for that type. */
1360 void *
1361 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1362 MEM_STAT_DECL)
1364 switch (gte)
1366 case gt_ggc_e_14lang_tree_node:
1367 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1369 case gt_ggc_e_7rtx_def:
1370 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1372 case gt_ggc_e_9rtvec_def:
1373 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1375 default:
1376 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1380 /* Normal GC allocation simply allocates into the main zone. */
1382 void *
1383 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1385 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1388 /* Poison the chunk. */
1389 #ifdef ENABLE_GC_CHECKING
1390 #define poison_region(PTR, SIZE) \
1391 do { \
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1393 memset ((PTR), 0xa5, (SIZE)); \
1394 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1395 } while (0)
1396 #else
1397 #define poison_region(PTR, SIZE)
1398 #endif
1400 /* Free the object at P. */
1402 void
1403 ggc_free (void *p)
1405 struct page_entry *page;
1407 if (GATHER_STATISTICS)
1408 ggc_free_overhead (p);
1410 poison_region (p, ggc_get_size (p));
1412 page = zone_get_object_page (p);
1414 if (page->large_p)
1416 struct large_page_entry *large_page
1417 = (struct large_page_entry *) page;
1419 /* Remove the page from the linked list. */
1420 if (large_page->prev)
1421 large_page->prev->next = large_page->next;
1422 else
1424 gcc_assert (large_page->common.zone->large_pages == large_page);
1425 large_page->common.zone->large_pages = large_page->next;
1427 if (large_page->next)
1428 large_page->next->prev = large_page->prev;
1430 large_page->common.zone->allocated -= large_page->bytes;
1432 /* Release the memory associated with this object. */
1433 free_large_page (large_page);
1435 else if (page->pch_p)
1436 /* Don't do anything. We won't allocate a new object from the
1437 PCH zone so there's no point in releasing anything. */
1439 else
1441 size_t size = ggc_get_size (p);
1443 page->zone->allocated -= size;
1445 /* Add the chunk to the free list. We don't bother with coalescing,
1446 since we are likely to want a chunk of this size again. */
1447 free_chunk ((char *)p, size, page->zone);
1451 /* Mark function for strings. */
1453 void
1454 gt_ggc_m_S (const void *p)
1456 page_entry *entry;
1457 unsigned long offset;
1459 if (!p)
1460 return;
1462 /* Look up the page on which the object is alloced. . */
1463 entry = lookup_page_table_if_allocated (p);
1464 if (! entry)
1465 return;
1467 if (entry->pch_p)
1469 size_t alloc_word, alloc_bit, t;
1470 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1471 alloc_word = t / (8 * sizeof (alloc_type));
1472 alloc_bit = t % (8 * sizeof (alloc_type));
1473 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1474 alloc_bit);
1476 else if (entry->large_p)
1478 struct large_page_entry *le = (struct large_page_entry *) entry;
1479 offset = ((const char *) p) - entry->page;
1480 gcc_assert (offset < le->bytes);
1482 else
1484 struct small_page_entry *se = (struct small_page_entry *) entry;
1485 unsigned int start_word = zone_get_object_alloc_word (p);
1486 unsigned int start_bit = zone_get_object_alloc_bit (p);
1487 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1489 /* On some platforms a char* will not necessarily line up on an
1490 allocation boundary, so we have to update the offset to
1491 account for the leftover bytes. */
1492 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1495 if (offset)
1497 /* Here we've seen a char* which does not point to the beginning
1498 of an allocated object. We assume it points to the middle of
1499 a STRING_CST. */
1500 gcc_assert (offset == offsetof (struct tree_string, str));
1501 p = ((const char *) p) - offset;
1502 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1503 return;
1506 /* Inefficient, but also unlikely to matter. */
1507 ggc_set_mark (p);
1511 /* User-callable entry points for marking string X. */
1513 void
1514 gt_ggc_mx (const char *& x)
1516 gt_ggc_m_S (x);
1519 void
1520 gt_ggc_mx (unsigned char *& x)
1522 gt_ggc_m_S (x);
1525 void
1526 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1530 /* If P is not marked, mark it and return false. Otherwise return true.
1531 P must have been allocated by the GC allocator; it mustn't point to
1532 static objects, stack variables, or memory allocated with malloc. */
1535 ggc_set_mark (const void *p)
1537 struct page_entry *page;
1538 const char *ptr = (const char *) p;
1540 page = zone_get_object_page (p);
1542 if (page->pch_p)
1544 size_t mark_word, mark_bit, offset;
1545 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1546 mark_word = offset / (8 * sizeof (mark_type));
1547 mark_bit = offset % (8 * sizeof (mark_type));
1549 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1550 return 1;
1551 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1553 else if (page->large_p)
1555 struct large_page_entry *large_page
1556 = (struct large_page_entry *) page;
1558 if (large_page->mark_p)
1559 return 1;
1560 large_page->mark_p = true;
1562 else
1564 struct small_page_entry *small_page
1565 = (struct small_page_entry *) page;
1567 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1568 & (1 << zone_get_object_mark_bit (p)))
1569 return 1;
1570 small_page->mark_bits[zone_get_object_mark_word (p)]
1571 |= (1 << zone_get_object_mark_bit (p));
1574 if (GGC_DEBUG_LEVEL >= 4)
1575 fprintf (G.debug_file, "Marking %p\n", p);
1577 return 0;
1580 /* Return 1 if P has been marked, zero otherwise.
1581 P must have been allocated by the GC allocator; it mustn't point to
1582 static objects, stack variables, or memory allocated with malloc. */
1585 ggc_marked_p (const void *p)
1587 struct page_entry *page;
1588 const char *ptr = (const char *) p;
1590 page = zone_get_object_page (p);
1592 if (page->pch_p)
1594 size_t mark_word, mark_bit, offset;
1595 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1596 mark_word = offset / (8 * sizeof (mark_type));
1597 mark_bit = offset % (8 * sizeof (mark_type));
1599 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1602 if (page->large_p)
1604 struct large_page_entry *large_page
1605 = (struct large_page_entry *) page;
1607 return large_page->mark_p;
1609 else
1611 struct small_page_entry *small_page
1612 = (struct small_page_entry *) page;
1614 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1615 & (1 << zone_get_object_mark_bit (p)));
1619 /* Return the size of the gc-able object P. */
1621 size_t
1622 ggc_get_size (const void *p)
1624 struct page_entry *page;
1625 const char *ptr = (const char *) p;
1627 page = zone_get_object_page (p);
1629 if (page->pch_p)
1631 size_t alloc_word, alloc_bit, offset, max_size;
1632 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1633 alloc_word = offset / (8 * sizeof (alloc_type));
1634 alloc_bit = offset % (8 * sizeof (alloc_type));
1635 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1636 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1637 max_size);
1640 if (page->large_p)
1641 return ((struct large_page_entry *)page)->bytes;
1642 else
1643 return zone_find_object_size ((struct small_page_entry *) page, p);
1646 /* Initialize the ggc-zone-mmap allocator. */
1647 void
1648 init_ggc (void)
1650 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1651 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1652 the current assumptions to hold. */
1654 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1656 /* Set up the main zone by hand. */
1657 main_zone.name = "Main zone";
1658 G.zones = &main_zone;
1660 /* Allocate the default zones. */
1661 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1662 new_ggc_zone_1 (&tree_zone, "Tree zone");
1663 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1665 G.pagesize = getpagesize();
1666 G.lg_pagesize = exact_log2 (G.pagesize);
1667 G.page_mask = ~(G.pagesize - 1);
1669 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1670 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1672 /* Allocate 16 system pages at a time. */
1673 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1675 /* Calculate the size of the allocation bitmap and other overhead. */
1676 /* Right now we allocate bits for the page header and bitmap. These
1677 are wasted, but a little tricky to eliminate. */
1678 G.small_page_overhead
1679 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1680 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1682 #ifdef HAVE_MMAP_DEV_ZERO
1683 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1684 gcc_assert (G.dev_zero_fd != -1);
1685 #endif
1687 #if 0
1688 G.debug_file = fopen ("ggc-mmap.debug", "w");
1689 setlinebuf (G.debug_file);
1690 #else
1691 G.debug_file = stdout;
1692 #endif
1694 #ifdef USING_MMAP
1695 /* StunOS has an amazing off-by-one error for the first mmap allocation
1696 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1697 believe, is an unaligned page allocation, which would cause us to
1698 hork badly if we tried to use it. */
1700 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1701 struct small_page_entry *e;
1702 if ((size_t)p & (G.pagesize - 1))
1704 /* How losing. Discard this one and try another. If we still
1705 can't get something useful, give up. */
1707 p = alloc_anon (NULL, G.pagesize, &main_zone);
1708 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1711 if (GGC_PAGE_SIZE == G.pagesize)
1713 /* We have a good page, might as well hold onto it... */
1714 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1715 e->common.page = p;
1716 e->common.zone = &main_zone;
1717 e->next = main_zone.free_pages;
1718 set_page_table_entry (e->common.page, &e->common);
1719 main_zone.free_pages = e;
1721 else
1723 munmap (p, G.pagesize);
1726 #endif
1729 /* Start a new GGC zone. */
1731 static void
1732 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1734 new_zone->name = name;
1735 new_zone->next_zone = G.zones->next_zone;
1736 G.zones->next_zone = new_zone;
1739 /* Free all empty pages and objects within a page for a given zone */
1741 static void
1742 sweep_pages (struct alloc_zone *zone)
1744 struct large_page_entry **lpp, *lp, *lnext;
1745 struct small_page_entry **spp, *sp, *snext;
1746 char *last_free;
1747 size_t allocated = 0;
1748 bool nomarksinpage;
1750 /* First, reset the free_chunks lists, since we are going to
1751 re-free free chunks in hopes of coalescing them into large chunks. */
1752 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1753 zone->high_free_bin = 0;
1754 zone->cached_free = NULL;
1755 zone->cached_free_size = 0;
1757 /* Large pages are all or none affairs. Either they are completely
1758 empty, or they are completely full. */
1759 lpp = &zone->large_pages;
1760 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1762 gcc_assert (lp->common.large_p);
1764 lnext = lp->next;
1766 /* This page has now survived another collection. */
1767 lp->common.survived++;
1769 if (lp->mark_p)
1771 lp->mark_p = false;
1772 allocated += lp->bytes;
1773 lpp = &lp->next;
1775 else
1777 *lpp = lnext;
1778 #ifdef ENABLE_GC_CHECKING
1779 /* Poison the page. */
1780 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1781 #endif
1782 if (lp->prev)
1783 lp->prev->next = lp->next;
1784 if (lp->next)
1785 lp->next->prev = lp->prev;
1786 free_large_page (lp);
1790 spp = &zone->pages;
1791 for (sp = zone->pages; sp != NULL; sp = snext)
1793 char *object, *last_object;
1794 char *end;
1795 alloc_type *alloc_word_p;
1796 mark_type *mark_word_p;
1798 gcc_assert (!sp->common.large_p);
1800 snext = sp->next;
1802 /* This page has now survived another collection. */
1803 sp->common.survived++;
1805 /* Step through all chunks, consolidate those that are free and
1806 insert them into the free lists. Note that consolidation
1807 slows down collection slightly. */
1809 last_object = object = sp->common.page;
1810 end = sp->common.page + SMALL_PAGE_SIZE;
1811 last_free = NULL;
1812 nomarksinpage = true;
1813 mark_word_p = sp->mark_bits;
1814 alloc_word_p = sp->alloc_bits;
1816 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1818 object = sp->common.page;
1821 unsigned int i, n;
1822 alloc_type alloc_word;
1823 mark_type mark_word;
1825 alloc_word = *alloc_word_p++;
1826 mark_word = *mark_word_p++;
1828 if (mark_word)
1829 nomarksinpage = false;
1831 /* There ought to be some way to do this without looping... */
1832 i = 0;
1833 while ((n = alloc_ffs (alloc_word)) != 0)
1835 /* Extend the current state for n - 1 bits. We can't
1836 shift alloc_word by n, even though it isn't used in the
1837 loop, in case only the highest bit was set. */
1838 alloc_word >>= n - 1;
1839 mark_word >>= n - 1;
1840 object += BYTES_PER_MARK_BIT * (n - 1);
1842 if (mark_word & 1)
1844 if (last_free)
1846 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1847 object
1848 - last_free));
1849 poison_region (last_free, object - last_free);
1850 free_chunk (last_free, object - last_free, zone);
1851 last_free = NULL;
1853 else
1854 allocated += object - last_object;
1855 last_object = object;
1857 else
1859 if (last_free == NULL)
1861 last_free = object;
1862 allocated += object - last_object;
1864 else
1865 zone_clear_object_alloc_bit (sp, object);
1868 /* Shift to just after the alloc bit we handled. */
1869 alloc_word >>= 1;
1870 mark_word >>= 1;
1871 object += BYTES_PER_MARK_BIT;
1873 i += n;
1876 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1878 while (object < end);
1880 if (nomarksinpage)
1882 *spp = snext;
1883 #ifdef ENABLE_GC_CHECKING
1884 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1885 SMALL_PAGE_SIZE));
1886 /* Poison the page. */
1887 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1888 #endif
1889 free_small_page (sp);
1890 continue;
1892 else if (last_free)
1894 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1895 object - last_free));
1896 poison_region (last_free, object - last_free);
1897 free_chunk (last_free, object - last_free, zone);
1899 else
1900 allocated += object - last_object;
1902 spp = &sp->next;
1905 zone->allocated = allocated;
1908 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1909 is true if we need to mark before sweeping, false if some other
1910 zone collection has already performed marking for us. Returns true
1911 if we collected, false otherwise. */
1913 static bool
1914 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1916 #if 0
1917 /* */
1919 int i;
1920 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1922 struct alloc_chunk *chunk;
1923 int n, tot;
1925 n = 0;
1926 tot = 0;
1927 chunk = zone->free_chunks[i];
1928 while (chunk)
1930 n++;
1931 tot += chunk->size;
1932 chunk = chunk->next_free;
1934 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1935 i, n, tot);
1938 /* */
1939 #endif
1941 if (!quiet_flag)
1942 fprintf (stderr, " {%s GC %luk -> ",
1943 zone->name, (unsigned long) zone->allocated / 1024);
1945 /* Zero the total allocated bytes. This will be recalculated in the
1946 sweep phase. */
1947 zone->allocated = 0;
1949 /* Release the pages we freed the last time we collected, but didn't
1950 reuse in the interim. */
1951 release_pages (zone);
1953 if (need_marking)
1955 zone_allocate_marks ();
1956 ggc_mark_roots ();
1957 if (GATHER_STATISTICS)
1958 ggc_prune_overhead_list ();
1961 sweep_pages (zone);
1962 zone->was_collected = true;
1963 zone->allocated_last_gc = zone->allocated;
1965 if (!quiet_flag)
1966 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1967 return true;
1970 /* Calculate the average page survival rate in terms of number of
1971 collections. */
1973 static float
1974 calculate_average_page_survival (struct alloc_zone *zone)
1976 float count = 0.0;
1977 float survival = 0.0;
1978 struct small_page_entry *p;
1979 struct large_page_entry *lp;
1980 for (p = zone->pages; p; p = p->next)
1982 count += 1.0;
1983 survival += p->common.survived;
1985 for (lp = zone->large_pages; lp; lp = lp->next)
1987 count += 1.0;
1988 survival += lp->common.survived;
1990 return survival/count;
1993 /* Top level collection routine. */
1995 void
1996 ggc_collect (void)
1998 struct alloc_zone *zone;
1999 bool marked = false;
2001 timevar_push (TV_GC);
2003 if (!ggc_force_collect)
2005 float allocated_last_gc = 0, allocated = 0, min_expand;
2007 for (zone = G.zones; zone; zone = zone->next_zone)
2009 allocated_last_gc += zone->allocated_last_gc;
2010 allocated += zone->allocated;
2013 allocated_last_gc =
2014 MAX (allocated_last_gc,
2015 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2016 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2018 if (allocated < allocated_last_gc + min_expand)
2020 timevar_pop (TV_GC);
2021 return;
2025 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2027 /* Start by possibly collecting the main zone. */
2028 main_zone.was_collected = false;
2029 marked |= ggc_collect_1 (&main_zone, true);
2031 /* In order to keep the number of collections down, we don't
2032 collect other zones unless we are collecting the main zone. This
2033 gives us roughly the same number of collections as we used to
2034 have with the old gc. The number of collection is important
2035 because our main slowdown (according to profiling) is now in
2036 marking. So if we mark twice as often as we used to, we'll be
2037 twice as slow. Hopefully we'll avoid this cost when we mark
2038 zone-at-a-time. */
2039 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2040 keep this code in case the heuristics are further refined. */
2042 if (main_zone.was_collected)
2044 struct alloc_zone *zone;
2046 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2048 zone->was_collected = false;
2049 marked |= ggc_collect_1 (zone, !marked);
2053 /* Print page survival stats, if someone wants them. */
2054 if (GATHER_STATISTICS && GGC_DEBUG_LEVEL >= 2)
2056 for (zone = G.zones; zone; zone = zone->next_zone)
2058 if (zone->was_collected)
2060 float f = calculate_average_page_survival (zone);
2061 printf ("Average page survival in zone `%s' is %f\n",
2062 zone->name, f);
2067 if (marked)
2068 zone_free_marks ();
2070 /* Free dead zones. */
2071 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2073 if (zone->next_zone->dead)
2075 struct alloc_zone *dead_zone = zone->next_zone;
2077 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2079 /* The zone must be empty. */
2080 gcc_assert (!dead_zone->allocated);
2082 /* Unchain the dead zone, release all its pages and free it. */
2083 zone->next_zone = zone->next_zone->next_zone;
2084 release_pages (dead_zone);
2085 free (dead_zone);
2089 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2091 timevar_pop (TV_GC);
2094 /* Print allocation statistics. */
2095 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2096 ? (x) \
2097 : ((x) < 1024*1024*10 \
2098 ? (x) / 1024 \
2099 : (x) / (1024*1024))))
2100 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2102 void
2103 ggc_print_statistics (void)
2105 struct alloc_zone *zone;
2106 struct ggc_statistics stats;
2107 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2108 size_t pte_overhead, i;
2110 /* Clear the statistics. */
2111 memset (&stats, 0, sizeof (stats));
2113 /* Make sure collection will really occur. */
2114 ggc_force_collect = true;
2116 /* Collect and print the statistics common across collectors. */
2117 ggc_print_common_statistics (stderr, &stats);
2119 ggc_force_collect = false;
2121 /* Release free pages so that we will not count the bytes allocated
2122 there as part of the total allocated memory. */
2123 for (zone = G.zones; zone; zone = zone->next_zone)
2124 release_pages (zone);
2126 /* Collect some information about the various sizes of
2127 allocation. */
2128 fprintf (stderr,
2129 "Memory still allocated at the end of the compilation process\n");
2131 fprintf (stderr, "%20s %10s %10s %10s\n",
2132 "Zone", "Allocated", "Used", "Overhead");
2133 for (zone = G.zones; zone; zone = zone->next_zone)
2135 struct large_page_entry *large_page;
2136 size_t overhead, allocated, in_use;
2138 /* Skip empty zones. */
2139 if (!zone->pages && !zone->large_pages)
2140 continue;
2142 allocated = in_use = 0;
2144 overhead = sizeof (struct alloc_zone);
2146 for (large_page = zone->large_pages; large_page != NULL;
2147 large_page = large_page->next)
2149 allocated += large_page->bytes;
2150 in_use += large_page->bytes;
2151 overhead += sizeof (struct large_page_entry);
2154 /* There's no easy way to walk through the small pages finding
2155 used and unused objects. Instead, add all the pages, and
2156 subtract out the free list. */
2158 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2159 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2160 overhead += G.small_page_overhead * zone->n_small_pages;
2162 for (i = 0; i <= NUM_FREE_BINS; i++)
2164 struct alloc_chunk *chunk = zone->free_chunks[i];
2165 while (chunk)
2167 in_use -= ggc_get_size (chunk);
2168 chunk = chunk->next_free;
2172 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2173 zone->name,
2174 SCALE (allocated), LABEL (allocated),
2175 SCALE (in_use), LABEL (in_use),
2176 SCALE (overhead), LABEL (overhead));
2178 gcc_assert (in_use == zone->allocated);
2180 total_overhead += overhead;
2181 total_allocated += zone->allocated;
2182 total_bytes_mapped += zone->bytes_mapped;
2185 /* Count the size of the page table as best we can. */
2186 #if HOST_BITS_PER_PTR <= 32
2187 pte_overhead = sizeof (G.lookup);
2188 for (i = 0; i < PAGE_L1_SIZE; i++)
2189 if (G.lookup[i])
2190 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2191 #else
2193 page_table table = G.lookup;
2194 pte_overhead = 0;
2195 while (table)
2197 pte_overhead += sizeof (*table);
2198 for (i = 0; i < PAGE_L1_SIZE; i++)
2199 if (table->table[i])
2200 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2201 table = table->next;
2204 #endif
2205 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2206 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2207 total_overhead += pte_overhead;
2209 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2210 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2211 SCALE (total_allocated), LABEL(total_allocated),
2212 SCALE (total_overhead), LABEL (total_overhead));
2214 if (GATHER_STATISTICS)
2216 unsigned long long all_overhead = 0, all_allocated = 0;
2217 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2218 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2219 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2221 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2223 for (zone = G.zones; zone; zone = zone->next_zone)
2225 all_overhead += zone->stats.total_overhead;
2226 all_allocated += zone->stats.total_allocated;
2228 all_allocated_under32 += zone->stats.total_allocated_under32;
2229 all_overhead_under32 += zone->stats.total_overhead_under32;
2231 all_allocated_under64 += zone->stats.total_allocated_under64;
2232 all_overhead_under64 += zone->stats.total_overhead_under64;
2234 all_allocated_under128 += zone->stats.total_allocated_under128;
2235 all_overhead_under128 += zone->stats.total_overhead_under128;
2237 fprintf (stderr, "%20s: %10lld\n",
2238 zone->name, zone->stats.total_allocated);
2241 fprintf (stderr, "\n");
2243 fprintf (stderr, "Total Overhead: %10lld\n",
2244 all_overhead);
2245 fprintf (stderr, "Total Allocated: %10lld\n",
2246 all_allocated);
2248 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2249 all_overhead_under32);
2250 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2251 all_allocated_under32);
2252 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2253 all_overhead_under64);
2254 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2255 all_allocated_under64);
2256 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2257 all_overhead_under128);
2258 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2259 all_allocated_under128);
2263 /* Precompiled header support. */
2265 /* For precompiled headers, we sort objects based on their type. We
2266 also sort various objects into their own buckets; currently this
2267 covers strings and IDENTIFIER_NODE trees. The choices of how
2268 to sort buckets have not yet been tuned. */
2270 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2272 #define OTHER_BUCKET (gt_types_enum_last + 0)
2273 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2274 #define STRING_BUCKET (gt_types_enum_last + 2)
2276 struct ggc_pch_ondisk
2278 size_t total;
2279 size_t type_totals[NUM_PCH_BUCKETS];
2282 struct ggc_pch_data
2284 struct ggc_pch_ondisk d;
2285 size_t base;
2286 size_t orig_base;
2287 size_t alloc_size;
2288 alloc_type *alloc_bits;
2289 size_t type_bases[NUM_PCH_BUCKETS];
2290 size_t start_offset;
2293 /* Initialize the PCH data structure. */
2295 struct ggc_pch_data *
2296 init_ggc_pch (void)
2298 return XCNEW (struct ggc_pch_data);
2301 /* Return which of the page-aligned buckets the object at X, with type
2302 TYPE, should be sorted into in the PCH. Strings will have
2303 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2304 of unknown type will also have TYPE equal to gt_types_enum_last. */
2306 static int
2307 pch_bucket (void *x, enum gt_types_enum type,
2308 bool is_string)
2310 /* Sort identifiers into their own bucket, to improve locality
2311 when searching the identifier hash table. */
2312 if (type == gt_ggc_e_14lang_tree_node
2313 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2314 return IDENTIFIER_BUCKET;
2315 else if (type == gt_types_enum_last)
2317 if (is_string)
2318 return STRING_BUCKET;
2319 return OTHER_BUCKET;
2321 return type;
2324 /* Add the size of object X to the size of the PCH data. */
2326 void
2327 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2328 size_t size, bool is_string, enum gt_types_enum type)
2330 /* NOTE: Right now we don't need to align up the size of any objects.
2331 Strings can be unaligned, and everything else is allocated to a
2332 MAX_ALIGNMENT boundary already. */
2334 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2337 /* Return the total size of the PCH data. */
2339 size_t
2340 ggc_pch_total_size (struct ggc_pch_data *d)
2342 int i;
2343 size_t alloc_size, total_size;
2345 total_size = 0;
2346 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2348 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2349 total_size += d->d.type_totals[i];
2351 d->d.total = total_size;
2353 /* Include the size of the allocation bitmap. */
2354 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2355 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2356 d->alloc_size = alloc_size;
2358 return d->d.total + alloc_size;
2361 /* Set the base address for the objects in the PCH file. */
2363 void
2364 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2366 int i;
2367 size_t base = (size_t) base_;
2369 d->base = d->orig_base = base;
2370 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2372 d->type_bases[i] = base;
2373 base += d->d.type_totals[i];
2376 if (d->alloc_bits == NULL)
2377 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2380 /* Allocate a place for object X of size SIZE in the PCH file. */
2382 char *
2383 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2384 size_t size, bool is_string,
2385 enum gt_types_enum type)
2387 size_t alloc_word, alloc_bit;
2388 char *result;
2389 int bucket = pch_bucket (x, type, is_string);
2391 /* Record the start of the object in the allocation bitmap. We
2392 can't assert that the allocation bit is previously clear, because
2393 strings may violate the invariant that they are at least
2394 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2395 should not be called for strings. */
2396 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2397 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2398 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2399 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2400 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2402 /* Place the object at the current pointer for this bucket. */
2403 result = (char *) d->type_bases[bucket];
2404 d->type_bases[bucket] += size;
2405 return result;
2408 /* Prepare to write out the PCH data to file F. */
2410 void
2411 ggc_pch_prepare_write (struct ggc_pch_data *d,
2412 FILE *f)
2414 /* We seek around a lot while writing. Record where the end
2415 of the padding in the PCH file is, so that we can
2416 locate each object's offset. */
2417 d->start_offset = ftell (f);
2420 /* Write out object X of SIZE to file F. */
2422 void
2423 ggc_pch_write_object (struct ggc_pch_data *d,
2424 FILE *f, void *x, void *newx,
2425 size_t size, bool is_string ATTRIBUTE_UNUSED)
2427 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2428 fatal_error ("can%'t seek PCH file: %m");
2430 if (fwrite (x, size, 1, f) != 1)
2431 fatal_error ("can%'t write PCH file: %m");
2434 void
2435 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2437 /* Write out the allocation bitmap. */
2438 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2439 fatal_error ("can%'t seek PCH file: %m");
2441 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2442 fatal_error ("can%'t write PCH file: %m");
2444 /* Done with the PCH, so write out our footer. */
2445 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2446 fatal_error ("can%'t write PCH file: %m");
2448 free (d->alloc_bits);
2449 free (d);
2452 /* The PCH file from F has been mapped at ADDR. Read in any
2453 additional data from the file and set up the GC state. */
2455 void
2456 ggc_pch_read (FILE *f, void *addr)
2458 struct ggc_pch_ondisk d;
2459 size_t alloc_size;
2460 struct alloc_zone *zone;
2461 struct page_entry *pch_page;
2462 char *p;
2464 if (fread (&d, sizeof (d), 1, f) != 1)
2465 fatal_error ("can%'t read PCH file: %m");
2467 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2468 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2470 pch_zone.bytes = d.total;
2471 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2472 pch_zone.page = (char *) addr;
2473 pch_zone.end = (char *) pch_zone.alloc_bits;
2475 if (GATHER_STATISTICS)
2477 /* We've just read in a PCH file. So, every object that used to be
2478 allocated is now free. */
2479 zone_allocate_marks ();
2480 ggc_prune_overhead_list ();
2481 zone_free_marks ();
2484 for (zone = G.zones; zone; zone = zone->next_zone)
2486 struct small_page_entry *page, *next_page;
2487 struct large_page_entry *large_page, *next_large_page;
2489 zone->allocated = 0;
2491 /* Clear the zone's free chunk list. */
2492 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2493 zone->high_free_bin = 0;
2494 zone->cached_free = NULL;
2495 zone->cached_free_size = 0;
2497 /* Move all the small pages onto the free list. */
2498 for (page = zone->pages; page != NULL; page = next_page)
2500 next_page = page->next;
2501 memset (page->alloc_bits, 0,
2502 G.small_page_overhead - PAGE_OVERHEAD);
2503 free_small_page (page);
2506 /* Discard all the large pages. */
2507 for (large_page = zone->large_pages; large_page != NULL;
2508 large_page = next_large_page)
2510 next_large_page = large_page->next;
2511 free_large_page (large_page);
2514 zone->pages = NULL;
2515 zone->large_pages = NULL;
2518 /* Allocate the dummy page entry for the PCH, and set all pages
2519 mapped into the PCH to reference it. */
2520 pch_page = XCNEW (struct page_entry);
2521 pch_page->page = pch_zone.page;
2522 pch_page->pch_p = true;
2524 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2525 set_page_table_entry (p, pch_page);