2011-10-08 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ggc-zone.c
blobd0c1d79f53d9ba256904ff118cced85b0b1ed7de
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "diagnostic-core.h"
33 #include "flags.h"
34 #include "ggc.h"
35 #include "ggc-internal.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39 #include "plugin.h"
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43 #ifdef HAVE_MMAP_ANON
44 # undef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
48 #ifdef HAVE_MMAP_DEV_ZERO
49 # define USING_MMAP
50 #endif
52 #ifndef USING_MMAP
53 #error Zone collector requires mmap
54 #endif
56 #if (GCC_VERSION < 3001)
57 #define prefetch(X) ((void) X)
58 #define prefetchw(X) ((void) X)
59 #else
60 #define prefetch(X) __builtin_prefetch (X)
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
62 #endif
64 /* FUTURE NOTES:
66 If we track inter-zone pointers, we can mark single zones at a
67 time.
69 If we have a zone where we guarantee no inter-zone pointers, we
70 could mark that zone separately.
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
74 marking quickly. */
76 /* Strategy:
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
80 objects are greater than page size.
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. The free page list is currently per-zone. */
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105 #endif
107 /* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
110 come in here. */
112 struct alloc_chunk {
113 struct alloc_chunk *next_free;
114 unsigned int size;
117 /* The size of the fixed-size portion of a small page descriptor. */
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
120 /* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127 #define GGC_PAGE_SIZE 4096
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129 #define GGC_PAGE_SHIFT 12
131 #if 0
132 /* Alternative definitions which use the runtime page size. */
133 #define GGC_PAGE_SIZE G.pagesize
134 #define GGC_PAGE_MASK G.page_mask
135 #define GGC_PAGE_SHIFT G.lg_pagesize
136 #endif
138 /* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
143 /* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
148 today. */
150 #define NUM_FREE_BINS 64
151 #define FREE_BIN_DELTA MAX_ALIGNMENT
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
154 /* Allocation and marking parameters. */
156 /* The smallest allocatable unit to keep track of. */
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
159 /* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
162 to sweeping. */
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
167 /* We use this structure to determine the alignment required for
168 allocations.
170 There are several things wrong with this estimation of alignment.
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
184 struct max_alignment {
185 char c;
186 union {
187 HOST_WIDEST_INT i;
188 double d;
189 } u;
192 /* The biggest alignment required. */
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
196 /* Compute the smallest multiple of F that is >= X. */
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
200 /* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
204 typedef unsigned int alloc_type;
205 typedef unsigned int mark_type;
206 #define alloc_ffs(x) ffs(x)
208 /* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
210 PCH. */
211 typedef struct page_entry
213 /* The address at which the memory is allocated. */
214 char *page;
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone *zone;
219 #ifdef GATHER_STATISTICS
220 /* How many collections we've survived. */
221 size_t survived;
222 #endif
224 /* Does this page contain small objects, or one large object? */
225 bool large_p;
227 /* Is this page part of the loaded PCH? */
228 bool pch_p;
229 } page_entry;
231 /* Additional data needed for small pages. */
232 struct small_page_entry
234 struct page_entry common;
236 /* The next small page entry, or NULL if this is the last. */
237 struct small_page_entry *next;
239 /* If currently marking this zone, a pointer to the mark bits
240 for this page. If we aren't currently marking this zone,
241 this pointer may be stale (pointing to freed memory). */
242 mark_type *mark_bits;
244 /* The allocation bitmap. This array extends far enough to have
245 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
246 alloc_type alloc_bits[1];
249 /* Additional data needed for large pages. */
250 struct large_page_entry
252 struct page_entry common;
254 /* The next large page entry, or NULL if this is the last. */
255 struct large_page_entry *next;
257 /* The number of bytes allocated, not including the page entry. */
258 size_t bytes;
260 /* The previous page in the list, so that we can unlink this one. */
261 struct large_page_entry *prev;
263 /* During marking, is this object marked? */
264 bool mark_p;
267 /* A two-level tree is used to look up the page-entry for a given
268 pointer. Two chunks of the pointer's bits are extracted to index
269 the first and second levels of the tree, as follows:
271 HOST_PAGE_SIZE_BITS
272 32 | |
273 msb +----------------+----+------+------+ lsb
274 | | |
275 PAGE_L1_BITS |
277 PAGE_L2_BITS
279 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
280 pages are aligned on system page boundaries. The next most
281 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
282 index values in the lookup table, respectively.
284 For 32-bit architectures and the settings below, there are no
285 leftover bits. For architectures with wider pointers, the lookup
286 tree points to a list of pages, which must be scanned to find the
287 correct one. */
289 #define PAGE_L1_BITS (8)
290 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
291 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
292 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
294 #define LOOKUP_L1(p) \
295 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
297 #define LOOKUP_L2(p) \
298 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
300 #if HOST_BITS_PER_PTR <= 32
302 /* On 32-bit hosts, we use a two level page table, as pictured above. */
303 typedef page_entry **page_table[PAGE_L1_SIZE];
305 #else
307 /* On 64-bit hosts, we use the same two level page tables plus a linked
308 list that disambiguates the top 32-bits. There will almost always be
309 exactly one entry in the list. */
310 typedef struct page_table_chain
312 struct page_table_chain *next;
313 size_t high_bits;
314 page_entry **table[PAGE_L1_SIZE];
315 } *page_table;
317 #endif
319 /* The global variables. */
320 static struct globals
322 /* The linked list of zones. */
323 struct alloc_zone *zones;
325 /* Lookup table for associating allocation pages with object addresses. */
326 page_table lookup;
328 /* The system's page size, and related constants. */
329 size_t pagesize;
330 size_t lg_pagesize;
331 size_t page_mask;
333 /* The size to allocate for a small page entry. This includes
334 the size of the structure and the size of the allocation
335 bitmap. */
336 size_t small_page_overhead;
338 #if defined (HAVE_MMAP_DEV_ZERO)
339 /* A file descriptor open to /dev/zero for reading. */
340 int dev_zero_fd;
341 #endif
343 /* Allocate pages in chunks of this size, to throttle calls to memory
344 allocation routines. The first page is used, the rest go onto the
345 free list. */
346 size_t quire_size;
348 /* The file descriptor for debugging output. */
349 FILE *debug_file;
350 } G;
352 /* A zone allocation structure. There is one of these for every
353 distinct allocation zone. */
354 struct alloc_zone
356 /* The most recent free chunk is saved here, instead of in the linked
357 free list, to decrease list manipulation. It is most likely that we
358 will want this one. */
359 char *cached_free;
360 size_t cached_free_size;
362 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
363 FREE_BIN_DELTA. All other chunks are in slot 0. */
364 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
366 /* The highest bin index which might be non-empty. It may turn out
367 to be empty, in which case we have to search downwards. */
368 size_t high_free_bin;
370 /* Bytes currently allocated in this zone. */
371 size_t allocated;
373 /* Linked list of the small pages in this zone. */
374 struct small_page_entry *pages;
376 /* Doubly linked list of large pages in this zone. */
377 struct large_page_entry *large_pages;
379 /* If we are currently marking this zone, a pointer to the mark bits. */
380 mark_type *mark_bits;
382 /* Name of the zone. */
383 const char *name;
385 /* The number of small pages currently allocated in this zone. */
386 size_t n_small_pages;
388 /* Bytes allocated at the end of the last collection. */
389 size_t allocated_last_gc;
391 /* Total amount of memory mapped. */
392 size_t bytes_mapped;
394 /* A cache of free system pages. */
395 struct small_page_entry *free_pages;
397 /* Next zone in the linked list of zones. */
398 struct alloc_zone *next_zone;
400 /* True if this zone was collected during this collection. */
401 bool was_collected;
403 /* True if this zone should be destroyed after the next collection. */
404 bool dead;
406 #ifdef GATHER_STATISTICS
407 struct
409 /* Total GC-allocated memory. */
410 unsigned long long total_allocated;
411 /* Total overhead for GC-allocated memory. */
412 unsigned long long total_overhead;
414 /* Total allocations and overhead for sizes less than 32, 64 and 128.
415 These sizes are interesting because they are typical cache line
416 sizes. */
418 unsigned long long total_allocated_under32;
419 unsigned long long total_overhead_under32;
421 unsigned long long total_allocated_under64;
422 unsigned long long total_overhead_under64;
424 unsigned long long total_allocated_under128;
425 unsigned long long total_overhead_under128;
426 } stats;
427 #endif
428 } main_zone;
430 /* Some default zones. */
431 struct alloc_zone rtl_zone;
432 struct alloc_zone tree_zone;
433 struct alloc_zone tree_id_zone;
435 /* The PCH zone does not need a normal zone structure, and it does
436 not live on the linked list of zones. */
437 struct pch_zone
439 /* The start of the PCH zone. NULL if there is none. */
440 char *page;
442 /* The end of the PCH zone. NULL if there is none. */
443 char *end;
445 /* The size of the PCH zone. 0 if there is none. */
446 size_t bytes;
448 /* The allocation bitmap for the PCH zone. */
449 alloc_type *alloc_bits;
451 /* If we are currently marking, the mark bitmap for the PCH zone.
452 When it is first read in, we could avoid marking the PCH,
453 because it will not contain any pointers to GC memory outside
454 of the PCH; however, the PCH is currently mapped as writable,
455 so we must mark it in case new pointers are added. */
456 mark_type *mark_bits;
457 } pch_zone;
459 #ifdef USING_MMAP
460 static char *alloc_anon (char *, size_t, struct alloc_zone *);
461 #endif
462 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
463 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
464 static void free_chunk (char *, size_t, struct alloc_zone *);
465 static void free_small_page (struct small_page_entry *);
466 static void free_large_page (struct large_page_entry *);
467 static void release_pages (struct alloc_zone *);
468 static void sweep_pages (struct alloc_zone *);
469 static bool ggc_collect_1 (struct alloc_zone *, bool);
470 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
472 /* Traverse the page table and find the entry for a page.
473 Die (probably) if the object wasn't allocated via GC. */
475 static inline page_entry *
476 lookup_page_table_entry (const void *p)
478 page_entry ***base;
479 size_t L1, L2;
481 #if HOST_BITS_PER_PTR <= 32
482 base = &G.lookup[0];
483 #else
484 page_table table = G.lookup;
485 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
486 while (table->high_bits != high_bits)
487 table = table->next;
488 base = &table->table[0];
489 #endif
491 /* Extract the level 1 and 2 indices. */
492 L1 = LOOKUP_L1 (p);
493 L2 = LOOKUP_L2 (p);
495 return base[L1][L2];
498 /* Traverse the page table and find the entry for a page.
499 Return NULL if the object wasn't allocated via the GC. */
501 static inline page_entry *
502 lookup_page_table_if_allocated (const void *p)
504 page_entry ***base;
505 size_t L1, L2;
507 #if HOST_BITS_PER_PTR <= 32
508 base = &G.lookup[0];
509 #else
510 page_table table = G.lookup;
511 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
512 while (1)
514 if (table == NULL)
515 return NULL;
516 if (table->high_bits == high_bits)
517 break;
518 table = table->next;
520 base = &table->table[0];
521 #endif
523 /* Extract the level 1 and 2 indices. */
524 L1 = LOOKUP_L1 (p);
525 if (! base[L1])
526 return NULL;
528 L2 = LOOKUP_L2 (p);
529 if (L2 >= PAGE_L2_SIZE)
530 return NULL;
531 /* We might have a page entry which does not correspond exactly to a
532 system page. */
533 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
534 return NULL;
536 return base[L1][L2];
539 /* Set the page table entry for the page that starts at P. If ENTRY
540 is NULL, clear the entry. */
542 static void
543 set_page_table_entry (void *p, page_entry *entry)
545 page_entry ***base;
546 size_t L1, L2;
548 #if HOST_BITS_PER_PTR <= 32
549 base = &G.lookup[0];
550 #else
551 page_table table;
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
553 for (table = G.lookup; table; table = table->next)
554 if (table->high_bits == high_bits)
555 goto found;
557 /* Not found -- allocate a new table. */
558 table = XCNEW (struct page_table_chain);
559 table->next = G.lookup;
560 table->high_bits = high_bits;
561 G.lookup = table;
562 found:
563 base = &table->table[0];
564 #endif
566 /* Extract the level 1 and 2 indices. */
567 L1 = LOOKUP_L1 (p);
568 L2 = LOOKUP_L2 (p);
570 if (base[L1] == NULL)
571 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
573 base[L1][L2] = entry;
576 /* Find the page table entry associated with OBJECT. */
578 static inline struct page_entry *
579 zone_get_object_page (const void *object)
581 return lookup_page_table_entry (object);
584 /* Find which element of the alloc_bits array OBJECT should be
585 recorded in. */
586 static inline unsigned int
587 zone_get_object_alloc_word (const void *object)
589 return (((size_t) object & (GGC_PAGE_SIZE - 1))
590 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
593 /* Find which bit of the appropriate word in the alloc_bits array
594 OBJECT should be recorded in. */
595 static inline unsigned int
596 zone_get_object_alloc_bit (const void *object)
598 return (((size_t) object / BYTES_PER_ALLOC_BIT)
599 % (8 * sizeof (alloc_type)));
602 /* Find which element of the mark_bits array OBJECT should be recorded
603 in. */
604 static inline unsigned int
605 zone_get_object_mark_word (const void *object)
607 return (((size_t) object & (GGC_PAGE_SIZE - 1))
608 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
611 /* Find which bit of the appropriate word in the mark_bits array
612 OBJECT should be recorded in. */
613 static inline unsigned int
614 zone_get_object_mark_bit (const void *object)
616 return (((size_t) object / BYTES_PER_MARK_BIT)
617 % (8 * sizeof (mark_type)));
620 /* Set the allocation bit corresponding to OBJECT in its page's
621 bitmap. Used to split this object from the preceding one. */
622 static inline void
623 zone_set_object_alloc_bit (const void *object)
625 struct small_page_entry *page
626 = (struct small_page_entry *) zone_get_object_page (object);
627 unsigned int start_word = zone_get_object_alloc_word (object);
628 unsigned int start_bit = zone_get_object_alloc_bit (object);
630 page->alloc_bits[start_word] |= 1L << start_bit;
633 /* Clear the allocation bit corresponding to OBJECT in PAGE's
634 bitmap. Used to coalesce this object with the preceding
635 one. */
636 static inline void
637 zone_clear_object_alloc_bit (struct small_page_entry *page,
638 const void *object)
640 unsigned int start_word = zone_get_object_alloc_word (object);
641 unsigned int start_bit = zone_get_object_alloc_bit (object);
643 /* Would xor be quicker? */
644 page->alloc_bits[start_word] &= ~(1L << start_bit);
647 /* Find the size of the object which starts at START_WORD and
648 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
649 Helper function for ggc_get_size and zone_find_object_size. */
651 static inline size_t
652 zone_object_size_1 (alloc_type *alloc_bits,
653 size_t start_word, size_t start_bit,
654 size_t max_size)
656 size_t size;
657 alloc_type alloc_word;
658 int indx;
660 /* Load the first word. */
661 alloc_word = alloc_bits[start_word++];
663 /* If that was the last bit in this word, we'll want to continue
664 with the next word. Otherwise, handle the rest of this word. */
665 if (start_bit)
667 indx = alloc_ffs (alloc_word >> start_bit);
668 if (indx)
669 /* indx is 1-based. We started at the bit after the object's
670 start, but we also ended at the bit after the object's end.
671 It cancels out. */
672 return indx * BYTES_PER_ALLOC_BIT;
674 /* The extra 1 accounts for the starting unit, before start_bit. */
675 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
677 if (size >= max_size)
678 return max_size;
680 alloc_word = alloc_bits[start_word++];
682 else
683 size = BYTES_PER_ALLOC_BIT;
685 while (alloc_word == 0)
687 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
688 if (size >= max_size)
689 return max_size;
690 alloc_word = alloc_bits[start_word++];
693 indx = alloc_ffs (alloc_word);
694 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
697 /* Find the size of OBJECT on small page PAGE. */
699 static inline size_t
700 zone_find_object_size (struct small_page_entry *page,
701 const void *object)
703 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
704 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
705 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
706 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
707 - (const char *) object);
709 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
710 max_size);
713 /* highest_bit assumes that alloc_type is 32 bits. */
714 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
716 /* Find the highest set bit in VALUE. Returns the bit number of that
717 bit, using the same values as ffs. */
718 static inline alloc_type
719 highest_bit (alloc_type value)
721 /* This also assumes that alloc_type is unsigned. */
722 value |= value >> 1;
723 value |= value >> 2;
724 value |= value >> 4;
725 value |= value >> 8;
726 value |= value >> 16;
727 value = value ^ (value >> 1);
728 return alloc_ffs (value);
731 /* Find the offset from the start of an object to P, which may point
732 into the interior of the object. */
734 static unsigned long
735 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
736 size_t start_bit)
738 unsigned int offset_in_bits;
739 alloc_type alloc_word = alloc_bits[start_word];
741 /* Mask off any bits after the initial bit, but make sure to include
742 the initial bit in the result. Note that START_BIT is
743 0-based. */
744 if (start_bit < 8 * sizeof (alloc_type) - 1)
745 alloc_word &= (1 << (start_bit + 1)) - 1;
746 offset_in_bits = start_bit;
748 /* Search for the start of the object. */
749 while (alloc_word == 0 && start_word > 0)
751 alloc_word = alloc_bits[--start_word];
752 offset_in_bits += 8 * sizeof (alloc_type);
754 /* We must always find a set bit. */
755 gcc_assert (alloc_word != 0);
756 /* Note that the result of highest_bit is 1-based. */
757 offset_in_bits -= highest_bit (alloc_word) - 1;
759 return BYTES_PER_ALLOC_BIT * offset_in_bits;
762 /* Allocate the mark bits for every zone, and set the pointers on each
763 page. */
764 static void
765 zone_allocate_marks (void)
767 struct alloc_zone *zone;
769 for (zone = G.zones; zone; zone = zone->next_zone)
771 struct small_page_entry *page;
772 mark_type *cur_marks;
773 size_t mark_words, mark_words_per_page;
774 #ifdef ENABLE_CHECKING
775 size_t n = 0;
776 #endif
778 mark_words_per_page
779 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
780 mark_words = zone->n_small_pages * mark_words_per_page;
781 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
782 mark_words);
783 cur_marks = zone->mark_bits;
784 for (page = zone->pages; page; page = page->next)
786 page->mark_bits = cur_marks;
787 cur_marks += mark_words_per_page;
788 #ifdef ENABLE_CHECKING
789 n++;
790 #endif
792 gcc_checking_assert (n == zone->n_small_pages);
795 /* We don't collect the PCH zone, but we do have to mark it
796 (for now). */
797 if (pch_zone.bytes)
798 pch_zone.mark_bits
799 = (mark_type *) xcalloc (sizeof (mark_type),
800 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
803 /* After marking and sweeping, release the memory used for mark bits. */
804 static void
805 zone_free_marks (void)
807 struct alloc_zone *zone;
809 for (zone = G.zones; zone; zone = zone->next_zone)
810 if (zone->mark_bits)
812 free (zone->mark_bits);
813 zone->mark_bits = NULL;
816 if (pch_zone.bytes)
818 free (pch_zone.mark_bits);
819 pch_zone.mark_bits = NULL;
823 #ifdef USING_MMAP
824 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825 (if non-null). The ifdef structure here is intended to cause a
826 compile error unless exactly one of the HAVE_* is defined. */
828 static inline char *
829 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
831 #ifdef HAVE_MMAP_ANON
832 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
833 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
834 #endif
835 #ifdef HAVE_MMAP_DEV_ZERO
836 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
837 MAP_PRIVATE, G.dev_zero_fd, 0);
838 #endif
840 if (page == (char *) MAP_FAILED)
842 perror ("virtual memory exhausted");
843 exit (FATAL_EXIT_CODE);
846 /* Remember that we allocated this memory. */
847 zone->bytes_mapped += size;
849 /* Pretend we don't have access to the allocated pages. We'll enable
850 access to smaller pieces of the area in ggc_internal_alloc. Discard the
851 handle to avoid handle leak. */
852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
854 return page;
856 #endif
858 /* Allocate a new page for allocating small objects in ZONE, and
859 return an entry for it. */
861 static struct small_page_entry *
862 alloc_small_page (struct alloc_zone *zone)
864 struct small_page_entry *entry;
866 /* Check the list of free pages for one we can use. */
867 entry = zone->free_pages;
868 if (entry != NULL)
870 /* Recycle the allocated memory from this page ... */
871 zone->free_pages = entry->next;
873 else
875 /* We want just one page. Allocate a bunch of them and put the
876 extras on the freelist. (Can only do this optimization with
877 mmap for backing store.) */
878 struct small_page_entry *e, *f = zone->free_pages;
879 int i;
880 char *page;
882 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
884 /* This loop counts down so that the chain will be in ascending
885 memory order. */
886 for (i = G.quire_size - 1; i >= 1; i--)
888 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
889 e->common.page = page + (i << GGC_PAGE_SHIFT);
890 e->common.zone = zone;
891 e->next = f;
892 f = e;
893 set_page_table_entry (e->common.page, &e->common);
896 zone->free_pages = f;
898 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
899 entry->common.page = page;
900 entry->common.zone = zone;
901 set_page_table_entry (page, &entry->common);
904 zone->n_small_pages++;
906 if (GGC_DEBUG_LEVEL >= 2)
907 fprintf (G.debug_file,
908 "Allocating %s page at %p, data %p-%p\n",
909 entry->common.zone->name, (PTR) entry, entry->common.page,
910 entry->common.page + SMALL_PAGE_SIZE - 1);
912 return entry;
915 /* Allocate a large page of size SIZE in ZONE. */
917 static struct large_page_entry *
918 alloc_large_page (size_t size, struct alloc_zone *zone)
920 struct large_page_entry *entry;
921 char *page;
922 size_t needed_size;
924 needed_size = size + sizeof (struct large_page_entry);
925 page = XNEWVAR (char, needed_size);
927 entry = (struct large_page_entry *) page;
929 entry->next = NULL;
930 entry->common.page = page + sizeof (struct large_page_entry);
931 entry->common.large_p = true;
932 entry->common.pch_p = false;
933 entry->common.zone = zone;
934 #ifdef GATHER_STATISTICS
935 entry->common.survived = 0;
936 #endif
937 entry->mark_p = false;
938 entry->bytes = size;
939 entry->prev = NULL;
941 set_page_table_entry (entry->common.page, &entry->common);
943 if (GGC_DEBUG_LEVEL >= 2)
944 fprintf (G.debug_file,
945 "Allocating %s large page at %p, data %p-%p\n",
946 entry->common.zone->name, (PTR) entry, entry->common.page,
947 entry->common.page + SMALL_PAGE_SIZE - 1);
949 return entry;
953 /* For a page that is no longer needed, put it on the free page list. */
955 static inline void
956 free_small_page (struct small_page_entry *entry)
958 if (GGC_DEBUG_LEVEL >= 2)
959 fprintf (G.debug_file,
960 "Deallocating %s page at %p, data %p-%p\n",
961 entry->common.zone->name, (PTR) entry,
962 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
964 gcc_assert (!entry->common.large_p);
966 /* Mark the page as inaccessible. Discard the handle to
967 avoid handle leak. */
968 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
969 SMALL_PAGE_SIZE));
971 entry->next = entry->common.zone->free_pages;
972 entry->common.zone->free_pages = entry;
973 entry->common.zone->n_small_pages--;
976 /* Release a large page that is no longer needed. */
978 static inline void
979 free_large_page (struct large_page_entry *entry)
981 if (GGC_DEBUG_LEVEL >= 2)
982 fprintf (G.debug_file,
983 "Deallocating %s page at %p, data %p-%p\n",
984 entry->common.zone->name, (PTR) entry,
985 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
987 gcc_assert (entry->common.large_p);
989 set_page_table_entry (entry->common.page, NULL);
990 free (entry);
993 /* Release the free page cache to the system. */
995 static void
996 release_pages (struct alloc_zone *zone)
998 #ifdef USING_MMAP
999 struct small_page_entry *p, *next;
1000 char *start;
1001 size_t len;
1003 /* Gather up adjacent pages so they are unmapped together. */
1004 p = zone->free_pages;
1006 while (p)
1008 start = p->common.page;
1009 next = p->next;
1010 len = SMALL_PAGE_SIZE;
1011 set_page_table_entry (p->common.page, NULL);
1012 p = next;
1014 while (p && p->common.page == start + len)
1016 next = p->next;
1017 len += SMALL_PAGE_SIZE;
1018 set_page_table_entry (p->common.page, NULL);
1019 p = next;
1022 munmap (start, len);
1023 zone->bytes_mapped -= len;
1026 zone->free_pages = NULL;
1027 #endif
1030 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1032 static inline void
1033 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1035 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1036 size_t bin = 0;
1038 bin = SIZE_BIN_DOWN (size);
1039 gcc_assert (bin != 0);
1040 if (bin > NUM_FREE_BINS)
1042 bin = 0;
1043 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1044 sizeof (struct
1045 alloc_chunk)));
1046 chunk->size = size;
1047 chunk->next_free = zone->free_chunks[bin];
1048 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1049 + sizeof (struct
1050 alloc_chunk),
1051 size
1052 - sizeof (struct
1053 alloc_chunk)));
1055 else
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1058 sizeof (struct
1059 alloc_chunk *)));
1060 chunk->next_free = zone->free_chunks[bin];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1062 + sizeof (struct
1063 alloc_chunk *),
1064 size
1065 - sizeof (struct
1066 alloc_chunk *)));
1069 zone->free_chunks[bin] = chunk;
1070 if (bin > zone->high_free_bin)
1071 zone->high_free_bin = bin;
1072 if (GGC_DEBUG_LEVEL >= 3)
1073 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1076 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1078 void *
1079 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1080 MEM_STAT_DECL)
1082 size_t bin;
1083 size_t csize;
1084 struct small_page_entry *entry;
1085 struct alloc_chunk *chunk, **pp;
1086 void *result;
1087 size_t size = orig_size;
1089 /* Make sure that zero-sized allocations get a unique and freeable
1090 pointer. */
1091 if (size == 0)
1092 size = MAX_ALIGNMENT;
1093 else
1094 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1096 /* Try to allocate the object from several different sources. Each
1097 of these cases is responsible for setting RESULT and SIZE to
1098 describe the allocated block, before jumping to FOUND. If a
1099 chunk is split, the allocate bit for the new chunk should also be
1100 set.
1102 Large objects are handled specially. However, they'll just fail
1103 the next couple of conditions, so we can wait to check for them
1104 below. The large object case is relatively rare (< 1%), so this
1105 is a win. */
1107 /* First try to split the last chunk we allocated. For best
1108 fragmentation behavior it would be better to look for a
1109 free bin of the appropriate size for a small object. However,
1110 we're unlikely (1% - 7%) to find one, and this gives better
1111 locality behavior anyway. This case handles the lion's share
1112 of all calls to this function. */
1113 if (size <= zone->cached_free_size)
1115 result = zone->cached_free;
1117 zone->cached_free_size -= size;
1118 if (zone->cached_free_size)
1120 zone->cached_free += size;
1121 zone_set_object_alloc_bit (zone->cached_free);
1124 goto found;
1127 /* Next, try to find a free bin of the exactly correct size. */
1129 /* We want to round SIZE up, rather than down, but we know it's
1130 already aligned to at least FREE_BIN_DELTA, so we can just
1131 shift. */
1132 bin = SIZE_BIN_DOWN (size);
1134 if (bin <= NUM_FREE_BINS
1135 && (chunk = zone->free_chunks[bin]) != NULL)
1137 /* We have a chunk of the right size. Pull it off the free list
1138 and use it. */
1140 zone->free_chunks[bin] = chunk->next_free;
1142 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1143 == FREE_BIN_DELTA. */
1144 result = chunk;
1146 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1147 may now be wrong, if this was the last chunk in the high bin.
1148 Rather than fixing it up now, wait until we need to search
1149 the free bins. */
1151 goto found;
1154 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1155 to split. We can find one in the too-big bin, or in the largest
1156 sized bin with a chunk in it. Try the largest normal-sized bin
1157 first. */
1159 if (zone->high_free_bin > bin)
1161 /* Find the highest numbered free bin. It will be at or below
1162 the watermark. */
1163 while (zone->high_free_bin > bin
1164 && zone->free_chunks[zone->high_free_bin] == NULL)
1165 zone->high_free_bin--;
1167 if (zone->high_free_bin > bin)
1169 size_t tbin = zone->high_free_bin;
1170 chunk = zone->free_chunks[tbin];
1172 /* Remove the chunk from its previous bin. */
1173 zone->free_chunks[tbin] = chunk->next_free;
1175 result = (char *) chunk;
1177 /* Save the rest of the chunk for future allocation. */
1178 if (zone->cached_free_size)
1179 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1181 chunk = (struct alloc_chunk *) ((char *) result + size);
1182 zone->cached_free = (char *) chunk;
1183 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1185 /* Mark the new free chunk as an object, so that we can
1186 find the size of the newly allocated object. */
1187 zone_set_object_alloc_bit (chunk);
1189 /* HIGH_FREE_BIN may now be wrong, if this was the last
1190 chunk in the high bin. Rather than fixing it up now,
1191 wait until we need to search the free bins. */
1193 goto found;
1197 /* Failing that, look through the "other" bucket for a chunk
1198 that is large enough. */
1199 pp = &(zone->free_chunks[0]);
1200 chunk = *pp;
1201 while (chunk && chunk->size < size)
1203 pp = &chunk->next_free;
1204 chunk = *pp;
1207 if (chunk)
1209 /* Remove the chunk from its previous bin. */
1210 *pp = chunk->next_free;
1212 result = (char *) chunk;
1214 /* Save the rest of the chunk for future allocation, if there's any
1215 left over. */
1216 csize = chunk->size;
1217 if (csize > size)
1219 if (zone->cached_free_size)
1220 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1222 chunk = (struct alloc_chunk *) ((char *) result + size);
1223 zone->cached_free = (char *) chunk;
1224 zone->cached_free_size = csize - size;
1226 /* Mark the new free chunk as an object. */
1227 zone_set_object_alloc_bit (chunk);
1230 goto found;
1233 /* Handle large allocations. We could choose any threshold between
1234 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1235 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1236 be guaranteed to have a unique entry in the lookup table. Large
1237 allocations will always fall through to here. */
1238 if (size > GGC_PAGE_SIZE)
1240 struct large_page_entry *entry = alloc_large_page (size, zone);
1242 #ifdef GATHER_STATISTICS
1243 entry->common.survived = 0;
1244 #endif
1246 entry->next = zone->large_pages;
1247 if (zone->large_pages)
1248 zone->large_pages->prev = entry;
1249 zone->large_pages = entry;
1251 result = entry->common.page;
1253 goto found;
1256 /* Failing everything above, allocate a new small page. */
1258 entry = alloc_small_page (zone);
1259 entry->next = zone->pages;
1260 zone->pages = entry;
1262 /* Mark the first chunk in the new page. */
1263 entry->alloc_bits[0] = 1;
1265 result = entry->common.page;
1266 if (size < SMALL_PAGE_SIZE)
1268 if (zone->cached_free_size)
1269 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1271 zone->cached_free = (char *) result + size;
1272 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1274 /* Mark the new free chunk as an object. */
1275 zone_set_object_alloc_bit (zone->cached_free);
1278 found:
1280 /* We could save TYPE in the chunk, but we don't use that for
1281 anything yet. If we wanted to, we could do it by adding it
1282 either before the beginning of the chunk or after its end,
1283 and adjusting the size and pointer appropriately. */
1285 /* We'll probably write to this after we return. */
1286 prefetchw (result);
1288 #ifdef ENABLE_GC_CHECKING
1289 /* `Poison' the entire allocated object. */
1290 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1291 memset (result, 0xaf, size);
1292 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1293 size - orig_size));
1294 #endif
1296 /* Tell Valgrind that the memory is there, but its content isn't
1297 defined. The bytes at the end of the object are still marked
1298 unaccessible. */
1299 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1301 /* Keep track of how many bytes are being allocated. This
1302 information is used in deciding when to collect. */
1303 zone->allocated += size;
1305 timevar_ggc_mem_total += size;
1307 #ifdef GATHER_STATISTICS
1308 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1311 size_t object_size = size;
1312 size_t overhead = object_size - orig_size;
1314 zone->stats.total_overhead += overhead;
1315 zone->stats.total_allocated += object_size;
1317 if (orig_size <= 32)
1319 zone->stats.total_overhead_under32 += overhead;
1320 zone->stats.total_allocated_under32 += object_size;
1322 if (orig_size <= 64)
1324 zone->stats.total_overhead_under64 += overhead;
1325 zone->stats.total_allocated_under64 += object_size;
1327 if (orig_size <= 128)
1329 zone->stats.total_overhead_under128 += overhead;
1330 zone->stats.total_allocated_under128 += object_size;
1333 #endif
1335 if (GGC_DEBUG_LEVEL >= 3)
1336 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1337 (unsigned long) size, result);
1339 return result;
1342 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1343 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1345 void *
1346 ggc_internal_cleared_alloc_zone_stat (size_t orig_size,
1347 struct alloc_zone *zone MEM_STAT_DECL)
1349 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone);
1350 memset (result, 0, orig_size);
1351 return result;
1355 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1356 for that type. */
1358 void *
1359 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1360 MEM_STAT_DECL)
1362 switch (gte)
1364 case gt_ggc_e_14lang_tree_node:
1365 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone);
1367 case gt_ggc_e_7rtx_def:
1368 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1370 case gt_ggc_e_9rtvec_def:
1371 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone);
1373 default:
1374 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1378 /* Normal GC allocation simply allocates into the main zone. */
1380 void *
1381 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1383 return ggc_internal_alloc_zone_pass_stat (size, &main_zone);
1386 /* Poison the chunk. */
1387 #ifdef ENABLE_GC_CHECKING
1388 #define poison_region(PTR, SIZE) \
1389 do { \
1390 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1391 memset ((PTR), 0xa5, (SIZE)); \
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1393 } while (0)
1394 #else
1395 #define poison_region(PTR, SIZE)
1396 #endif
1398 /* Free the object at P. */
1400 void
1401 ggc_free (void *p)
1403 struct page_entry *page;
1405 #ifdef GATHER_STATISTICS
1406 ggc_free_overhead (p);
1407 #endif
1409 poison_region (p, ggc_get_size (p));
1411 page = zone_get_object_page (p);
1413 if (page->large_p)
1415 struct large_page_entry *large_page
1416 = (struct large_page_entry *) page;
1418 /* Remove the page from the linked list. */
1419 if (large_page->prev)
1420 large_page->prev->next = large_page->next;
1421 else
1423 gcc_assert (large_page->common.zone->large_pages == large_page);
1424 large_page->common.zone->large_pages = large_page->next;
1426 if (large_page->next)
1427 large_page->next->prev = large_page->prev;
1429 large_page->common.zone->allocated -= large_page->bytes;
1431 /* Release the memory associated with this object. */
1432 free_large_page (large_page);
1434 else if (page->pch_p)
1435 /* Don't do anything. We won't allocate a new object from the
1436 PCH zone so there's no point in releasing anything. */
1438 else
1440 size_t size = ggc_get_size (p);
1442 page->zone->allocated -= size;
1444 /* Add the chunk to the free list. We don't bother with coalescing,
1445 since we are likely to want a chunk of this size again. */
1446 free_chunk ((char *)p, size, page->zone);
1450 /* Mark function for strings. */
1452 void
1453 gt_ggc_m_S (const void *p)
1455 page_entry *entry;
1456 unsigned long offset;
1458 if (!p)
1459 return;
1461 /* Look up the page on which the object is alloced. . */
1462 entry = lookup_page_table_if_allocated (p);
1463 if (! entry)
1464 return;
1466 if (entry->pch_p)
1468 size_t alloc_word, alloc_bit, t;
1469 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1470 alloc_word = t / (8 * sizeof (alloc_type));
1471 alloc_bit = t % (8 * sizeof (alloc_type));
1472 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1473 alloc_bit);
1475 else if (entry->large_p)
1477 struct large_page_entry *le = (struct large_page_entry *) entry;
1478 offset = ((const char *) p) - entry->page;
1479 gcc_assert (offset < le->bytes);
1481 else
1483 struct small_page_entry *se = (struct small_page_entry *) entry;
1484 unsigned int start_word = zone_get_object_alloc_word (p);
1485 unsigned int start_bit = zone_get_object_alloc_bit (p);
1486 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1488 /* On some platforms a char* will not necessarily line up on an
1489 allocation boundary, so we have to update the offset to
1490 account for the leftover bytes. */
1491 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1494 if (offset)
1496 /* Here we've seen a char* which does not point to the beginning
1497 of an allocated object. We assume it points to the middle of
1498 a STRING_CST. */
1499 gcc_assert (offset == offsetof (struct tree_string, str));
1500 p = ((const char *) p) - offset;
1501 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1502 return;
1505 /* Inefficient, but also unlikely to matter. */
1506 ggc_set_mark (p);
1509 /* If P is not marked, mark it and return false. Otherwise return true.
1510 P must have been allocated by the GC allocator; it mustn't point to
1511 static objects, stack variables, or memory allocated with malloc. */
1514 ggc_set_mark (const void *p)
1516 struct page_entry *page;
1517 const char *ptr = (const char *) p;
1519 page = zone_get_object_page (p);
1521 if (page->pch_p)
1523 size_t mark_word, mark_bit, offset;
1524 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1525 mark_word = offset / (8 * sizeof (mark_type));
1526 mark_bit = offset % (8 * sizeof (mark_type));
1528 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1529 return 1;
1530 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1532 else if (page->large_p)
1534 struct large_page_entry *large_page
1535 = (struct large_page_entry *) page;
1537 if (large_page->mark_p)
1538 return 1;
1539 large_page->mark_p = true;
1541 else
1543 struct small_page_entry *small_page
1544 = (struct small_page_entry *) page;
1546 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1547 & (1 << zone_get_object_mark_bit (p)))
1548 return 1;
1549 small_page->mark_bits[zone_get_object_mark_word (p)]
1550 |= (1 << zone_get_object_mark_bit (p));
1553 if (GGC_DEBUG_LEVEL >= 4)
1554 fprintf (G.debug_file, "Marking %p\n", p);
1556 return 0;
1559 /* Return 1 if P has been marked, zero otherwise.
1560 P must have been allocated by the GC allocator; it mustn't point to
1561 static objects, stack variables, or memory allocated with malloc. */
1564 ggc_marked_p (const void *p)
1566 struct page_entry *page;
1567 const char *ptr = (const char *) p;
1569 page = zone_get_object_page (p);
1571 if (page->pch_p)
1573 size_t mark_word, mark_bit, offset;
1574 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1575 mark_word = offset / (8 * sizeof (mark_type));
1576 mark_bit = offset % (8 * sizeof (mark_type));
1578 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1581 if (page->large_p)
1583 struct large_page_entry *large_page
1584 = (struct large_page_entry *) page;
1586 return large_page->mark_p;
1588 else
1590 struct small_page_entry *small_page
1591 = (struct small_page_entry *) page;
1593 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1594 & (1 << zone_get_object_mark_bit (p)));
1598 /* Return the size of the gc-able object P. */
1600 size_t
1601 ggc_get_size (const void *p)
1603 struct page_entry *page;
1604 const char *ptr = (const char *) p;
1606 page = zone_get_object_page (p);
1608 if (page->pch_p)
1610 size_t alloc_word, alloc_bit, offset, max_size;
1611 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1612 alloc_word = offset / (8 * sizeof (alloc_type));
1613 alloc_bit = offset % (8 * sizeof (alloc_type));
1614 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1615 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1616 max_size);
1619 if (page->large_p)
1620 return ((struct large_page_entry *)page)->bytes;
1621 else
1622 return zone_find_object_size ((struct small_page_entry *) page, p);
1625 /* Initialize the ggc-zone-mmap allocator. */
1626 void
1627 init_ggc (void)
1629 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1630 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1631 the current assumptions to hold. */
1633 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1635 /* Set up the main zone by hand. */
1636 main_zone.name = "Main zone";
1637 G.zones = &main_zone;
1639 /* Allocate the default zones. */
1640 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1641 new_ggc_zone_1 (&tree_zone, "Tree zone");
1642 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1644 G.pagesize = getpagesize();
1645 G.lg_pagesize = exact_log2 (G.pagesize);
1646 G.page_mask = ~(G.pagesize - 1);
1648 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1649 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1651 /* Allocate 16 system pages at a time. */
1652 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1654 /* Calculate the size of the allocation bitmap and other overhead. */
1655 /* Right now we allocate bits for the page header and bitmap. These
1656 are wasted, but a little tricky to eliminate. */
1657 G.small_page_overhead
1658 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1659 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1661 #ifdef HAVE_MMAP_DEV_ZERO
1662 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1663 gcc_assert (G.dev_zero_fd != -1);
1664 #endif
1666 #if 0
1667 G.debug_file = fopen ("ggc-mmap.debug", "w");
1668 setlinebuf (G.debug_file);
1669 #else
1670 G.debug_file = stdout;
1671 #endif
1673 #ifdef USING_MMAP
1674 /* StunOS has an amazing off-by-one error for the first mmap allocation
1675 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1676 believe, is an unaligned page allocation, which would cause us to
1677 hork badly if we tried to use it. */
1679 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1680 struct small_page_entry *e;
1681 if ((size_t)p & (G.pagesize - 1))
1683 /* How losing. Discard this one and try another. If we still
1684 can't get something useful, give up. */
1686 p = alloc_anon (NULL, G.pagesize, &main_zone);
1687 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1690 if (GGC_PAGE_SIZE == G.pagesize)
1692 /* We have a good page, might as well hold onto it... */
1693 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1694 e->common.page = p;
1695 e->common.zone = &main_zone;
1696 e->next = main_zone.free_pages;
1697 set_page_table_entry (e->common.page, &e->common);
1698 main_zone.free_pages = e;
1700 else
1702 munmap (p, G.pagesize);
1705 #endif
1708 /* Start a new GGC zone. */
1710 static void
1711 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1713 new_zone->name = name;
1714 new_zone->next_zone = G.zones->next_zone;
1715 G.zones->next_zone = new_zone;
1718 /* Free all empty pages and objects within a page for a given zone */
1720 static void
1721 sweep_pages (struct alloc_zone *zone)
1723 struct large_page_entry **lpp, *lp, *lnext;
1724 struct small_page_entry **spp, *sp, *snext;
1725 char *last_free;
1726 size_t allocated = 0;
1727 bool nomarksinpage;
1729 /* First, reset the free_chunks lists, since we are going to
1730 re-free free chunks in hopes of coalescing them into large chunks. */
1731 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1732 zone->high_free_bin = 0;
1733 zone->cached_free = NULL;
1734 zone->cached_free_size = 0;
1736 /* Large pages are all or none affairs. Either they are completely
1737 empty, or they are completely full. */
1738 lpp = &zone->large_pages;
1739 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1741 gcc_assert (lp->common.large_p);
1743 lnext = lp->next;
1745 #ifdef GATHER_STATISTICS
1746 /* This page has now survived another collection. */
1747 lp->common.survived++;
1748 #endif
1750 if (lp->mark_p)
1752 lp->mark_p = false;
1753 allocated += lp->bytes;
1754 lpp = &lp->next;
1756 else
1758 *lpp = lnext;
1759 #ifdef ENABLE_GC_CHECKING
1760 /* Poison the page. */
1761 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1762 #endif
1763 if (lp->prev)
1764 lp->prev->next = lp->next;
1765 if (lp->next)
1766 lp->next->prev = lp->prev;
1767 free_large_page (lp);
1771 spp = &zone->pages;
1772 for (sp = zone->pages; sp != NULL; sp = snext)
1774 char *object, *last_object;
1775 char *end;
1776 alloc_type *alloc_word_p;
1777 mark_type *mark_word_p;
1779 gcc_assert (!sp->common.large_p);
1781 snext = sp->next;
1783 #ifdef GATHER_STATISTICS
1784 /* This page has now survived another collection. */
1785 sp->common.survived++;
1786 #endif
1788 /* Step through all chunks, consolidate those that are free and
1789 insert them into the free lists. Note that consolidation
1790 slows down collection slightly. */
1792 last_object = object = sp->common.page;
1793 end = sp->common.page + SMALL_PAGE_SIZE;
1794 last_free = NULL;
1795 nomarksinpage = true;
1796 mark_word_p = sp->mark_bits;
1797 alloc_word_p = sp->alloc_bits;
1799 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1801 object = sp->common.page;
1804 unsigned int i, n;
1805 alloc_type alloc_word;
1806 mark_type mark_word;
1808 alloc_word = *alloc_word_p++;
1809 mark_word = *mark_word_p++;
1811 if (mark_word)
1812 nomarksinpage = false;
1814 /* There ought to be some way to do this without looping... */
1815 i = 0;
1816 while ((n = alloc_ffs (alloc_word)) != 0)
1818 /* Extend the current state for n - 1 bits. We can't
1819 shift alloc_word by n, even though it isn't used in the
1820 loop, in case only the highest bit was set. */
1821 alloc_word >>= n - 1;
1822 mark_word >>= n - 1;
1823 object += BYTES_PER_MARK_BIT * (n - 1);
1825 if (mark_word & 1)
1827 if (last_free)
1829 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1830 object
1831 - last_free));
1832 poison_region (last_free, object - last_free);
1833 free_chunk (last_free, object - last_free, zone);
1834 last_free = NULL;
1836 else
1837 allocated += object - last_object;
1838 last_object = object;
1840 else
1842 if (last_free == NULL)
1844 last_free = object;
1845 allocated += object - last_object;
1847 else
1848 zone_clear_object_alloc_bit (sp, object);
1851 /* Shift to just after the alloc bit we handled. */
1852 alloc_word >>= 1;
1853 mark_word >>= 1;
1854 object += BYTES_PER_MARK_BIT;
1856 i += n;
1859 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1861 while (object < end);
1863 if (nomarksinpage)
1865 *spp = snext;
1866 #ifdef ENABLE_GC_CHECKING
1867 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1868 SMALL_PAGE_SIZE));
1869 /* Poison the page. */
1870 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1871 #endif
1872 free_small_page (sp);
1873 continue;
1875 else if (last_free)
1877 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1878 object - last_free));
1879 poison_region (last_free, object - last_free);
1880 free_chunk (last_free, object - last_free, zone);
1882 else
1883 allocated += object - last_object;
1885 spp = &sp->next;
1888 zone->allocated = allocated;
1891 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1892 is true if we need to mark before sweeping, false if some other
1893 zone collection has already performed marking for us. Returns true
1894 if we collected, false otherwise. */
1896 static bool
1897 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1899 #if 0
1900 /* */
1902 int i;
1903 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1905 struct alloc_chunk *chunk;
1906 int n, tot;
1908 n = 0;
1909 tot = 0;
1910 chunk = zone->free_chunks[i];
1911 while (chunk)
1913 n++;
1914 tot += chunk->size;
1915 chunk = chunk->next_free;
1917 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1918 i, n, tot);
1921 /* */
1922 #endif
1924 if (!quiet_flag)
1925 fprintf (stderr, " {%s GC %luk -> ",
1926 zone->name, (unsigned long) zone->allocated / 1024);
1928 /* Zero the total allocated bytes. This will be recalculated in the
1929 sweep phase. */
1930 zone->allocated = 0;
1932 /* Release the pages we freed the last time we collected, but didn't
1933 reuse in the interim. */
1934 release_pages (zone);
1936 if (need_marking)
1938 zone_allocate_marks ();
1939 ggc_mark_roots ();
1940 #ifdef GATHER_STATISTICS
1941 ggc_prune_overhead_list ();
1942 #endif
1945 sweep_pages (zone);
1946 zone->was_collected = true;
1947 zone->allocated_last_gc = zone->allocated;
1949 if (!quiet_flag)
1950 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1951 return true;
1954 #ifdef GATHER_STATISTICS
1955 /* Calculate the average page survival rate in terms of number of
1956 collections. */
1958 static float
1959 calculate_average_page_survival (struct alloc_zone *zone)
1961 float count = 0.0;
1962 float survival = 0.0;
1963 struct small_page_entry *p;
1964 struct large_page_entry *lp;
1965 for (p = zone->pages; p; p = p->next)
1967 count += 1.0;
1968 survival += p->common.survived;
1970 for (lp = zone->large_pages; lp; lp = lp->next)
1972 count += 1.0;
1973 survival += lp->common.survived;
1975 return survival/count;
1977 #endif
1979 /* Top level collection routine. */
1981 void
1982 ggc_collect (void)
1984 struct alloc_zone *zone;
1985 bool marked = false;
1987 timevar_push (TV_GC);
1989 if (!ggc_force_collect)
1991 float allocated_last_gc = 0, allocated = 0, min_expand;
1993 for (zone = G.zones; zone; zone = zone->next_zone)
1995 allocated_last_gc += zone->allocated_last_gc;
1996 allocated += zone->allocated;
1999 allocated_last_gc =
2000 MAX (allocated_last_gc,
2001 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2002 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2004 if (allocated < allocated_last_gc + min_expand)
2006 timevar_pop (TV_GC);
2007 return;
2011 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2013 /* Start by possibly collecting the main zone. */
2014 main_zone.was_collected = false;
2015 marked |= ggc_collect_1 (&main_zone, true);
2017 /* In order to keep the number of collections down, we don't
2018 collect other zones unless we are collecting the main zone. This
2019 gives us roughly the same number of collections as we used to
2020 have with the old gc. The number of collection is important
2021 because our main slowdown (according to profiling) is now in
2022 marking. So if we mark twice as often as we used to, we'll be
2023 twice as slow. Hopefully we'll avoid this cost when we mark
2024 zone-at-a-time. */
2025 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2026 keep this code in case the heuristics are further refined. */
2028 if (main_zone.was_collected)
2030 struct alloc_zone *zone;
2032 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2034 zone->was_collected = false;
2035 marked |= ggc_collect_1 (zone, !marked);
2039 #ifdef GATHER_STATISTICS
2040 /* Print page survival stats, if someone wants them. */
2041 if (GGC_DEBUG_LEVEL >= 2)
2043 for (zone = G.zones; zone; zone = zone->next_zone)
2045 if (zone->was_collected)
2047 float f = calculate_average_page_survival (zone);
2048 printf ("Average page survival in zone `%s' is %f\n",
2049 zone->name, f);
2053 #endif
2055 if (marked)
2056 zone_free_marks ();
2058 /* Free dead zones. */
2059 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2061 if (zone->next_zone->dead)
2063 struct alloc_zone *dead_zone = zone->next_zone;
2065 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2067 /* The zone must be empty. */
2068 gcc_assert (!dead_zone->allocated);
2070 /* Unchain the dead zone, release all its pages and free it. */
2071 zone->next_zone = zone->next_zone->next_zone;
2072 release_pages (dead_zone);
2073 free (dead_zone);
2077 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2079 timevar_pop (TV_GC);
2082 /* Print allocation statistics. */
2083 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2084 ? (x) \
2085 : ((x) < 1024*1024*10 \
2086 ? (x) / 1024 \
2087 : (x) / (1024*1024))))
2088 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2090 void
2091 ggc_print_statistics (void)
2093 struct alloc_zone *zone;
2094 struct ggc_statistics stats;
2095 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2096 size_t pte_overhead, i;
2098 /* Clear the statistics. */
2099 memset (&stats, 0, sizeof (stats));
2101 /* Make sure collection will really occur. */
2102 ggc_force_collect = true;
2104 /* Collect and print the statistics common across collectors. */
2105 ggc_print_common_statistics (stderr, &stats);
2107 ggc_force_collect = false;
2109 /* Release free pages so that we will not count the bytes allocated
2110 there as part of the total allocated memory. */
2111 for (zone = G.zones; zone; zone = zone->next_zone)
2112 release_pages (zone);
2114 /* Collect some information about the various sizes of
2115 allocation. */
2116 fprintf (stderr,
2117 "Memory still allocated at the end of the compilation process\n");
2119 fprintf (stderr, "%20s %10s %10s %10s\n",
2120 "Zone", "Allocated", "Used", "Overhead");
2121 for (zone = G.zones; zone; zone = zone->next_zone)
2123 struct large_page_entry *large_page;
2124 size_t overhead, allocated, in_use;
2126 /* Skip empty zones. */
2127 if (!zone->pages && !zone->large_pages)
2128 continue;
2130 allocated = in_use = 0;
2132 overhead = sizeof (struct alloc_zone);
2134 for (large_page = zone->large_pages; large_page != NULL;
2135 large_page = large_page->next)
2137 allocated += large_page->bytes;
2138 in_use += large_page->bytes;
2139 overhead += sizeof (struct large_page_entry);
2142 /* There's no easy way to walk through the small pages finding
2143 used and unused objects. Instead, add all the pages, and
2144 subtract out the free list. */
2146 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2147 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2148 overhead += G.small_page_overhead * zone->n_small_pages;
2150 for (i = 0; i <= NUM_FREE_BINS; i++)
2152 struct alloc_chunk *chunk = zone->free_chunks[i];
2153 while (chunk)
2155 in_use -= ggc_get_size (chunk);
2156 chunk = chunk->next_free;
2160 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2161 zone->name,
2162 SCALE (allocated), LABEL (allocated),
2163 SCALE (in_use), LABEL (in_use),
2164 SCALE (overhead), LABEL (overhead));
2166 gcc_assert (in_use == zone->allocated);
2168 total_overhead += overhead;
2169 total_allocated += zone->allocated;
2170 total_bytes_mapped += zone->bytes_mapped;
2173 /* Count the size of the page table as best we can. */
2174 #if HOST_BITS_PER_PTR <= 32
2175 pte_overhead = sizeof (G.lookup);
2176 for (i = 0; i < PAGE_L1_SIZE; i++)
2177 if (G.lookup[i])
2178 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2179 #else
2181 page_table table = G.lookup;
2182 pte_overhead = 0;
2183 while (table)
2185 pte_overhead += sizeof (*table);
2186 for (i = 0; i < PAGE_L1_SIZE; i++)
2187 if (table->table[i])
2188 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2189 table = table->next;
2192 #endif
2193 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2194 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2195 total_overhead += pte_overhead;
2197 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2198 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2199 SCALE (total_allocated), LABEL(total_allocated),
2200 SCALE (total_overhead), LABEL (total_overhead));
2202 #ifdef GATHER_STATISTICS
2204 unsigned long long all_overhead = 0, all_allocated = 0;
2205 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2206 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2207 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2209 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2211 for (zone = G.zones; zone; zone = zone->next_zone)
2213 all_overhead += zone->stats.total_overhead;
2214 all_allocated += zone->stats.total_allocated;
2216 all_allocated_under32 += zone->stats.total_allocated_under32;
2217 all_overhead_under32 += zone->stats.total_overhead_under32;
2219 all_allocated_under64 += zone->stats.total_allocated_under64;
2220 all_overhead_under64 += zone->stats.total_overhead_under64;
2222 all_allocated_under128 += zone->stats.total_allocated_under128;
2223 all_overhead_under128 += zone->stats.total_overhead_under128;
2225 fprintf (stderr, "%20s: %10lld\n",
2226 zone->name, zone->stats.total_allocated);
2229 fprintf (stderr, "\n");
2231 fprintf (stderr, "Total Overhead: %10lld\n",
2232 all_overhead);
2233 fprintf (stderr, "Total Allocated: %10lld\n",
2234 all_allocated);
2236 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2237 all_overhead_under32);
2238 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2239 all_allocated_under32);
2240 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2241 all_overhead_under64);
2242 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2243 all_allocated_under64);
2244 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2245 all_overhead_under128);
2246 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2247 all_allocated_under128);
2249 #endif
2252 /* Precompiled header support. */
2254 /* For precompiled headers, we sort objects based on their type. We
2255 also sort various objects into their own buckets; currently this
2256 covers strings and IDENTIFIER_NODE trees. The choices of how
2257 to sort buckets have not yet been tuned. */
2259 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2261 #define OTHER_BUCKET (gt_types_enum_last + 0)
2262 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2263 #define STRING_BUCKET (gt_types_enum_last + 2)
2265 struct ggc_pch_ondisk
2267 size_t total;
2268 size_t type_totals[NUM_PCH_BUCKETS];
2271 struct ggc_pch_data
2273 struct ggc_pch_ondisk d;
2274 size_t base;
2275 size_t orig_base;
2276 size_t alloc_size;
2277 alloc_type *alloc_bits;
2278 size_t type_bases[NUM_PCH_BUCKETS];
2279 size_t start_offset;
2282 /* Initialize the PCH data structure. */
2284 struct ggc_pch_data *
2285 init_ggc_pch (void)
2287 return XCNEW (struct ggc_pch_data);
2290 /* Return which of the page-aligned buckets the object at X, with type
2291 TYPE, should be sorted into in the PCH. Strings will have
2292 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2293 of unknown type will also have TYPE equal to gt_types_enum_last. */
2295 static int
2296 pch_bucket (void *x, enum gt_types_enum type,
2297 bool is_string)
2299 /* Sort identifiers into their own bucket, to improve locality
2300 when searching the identifier hash table. */
2301 if (type == gt_ggc_e_14lang_tree_node
2302 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2303 return IDENTIFIER_BUCKET;
2304 else if (type == gt_types_enum_last)
2306 if (is_string)
2307 return STRING_BUCKET;
2308 return OTHER_BUCKET;
2310 return type;
2313 /* Add the size of object X to the size of the PCH data. */
2315 void
2316 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2317 size_t size, bool is_string, enum gt_types_enum type)
2319 /* NOTE: Right now we don't need to align up the size of any objects.
2320 Strings can be unaligned, and everything else is allocated to a
2321 MAX_ALIGNMENT boundary already. */
2323 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2326 /* Return the total size of the PCH data. */
2328 size_t
2329 ggc_pch_total_size (struct ggc_pch_data *d)
2331 int i;
2332 size_t alloc_size, total_size;
2334 total_size = 0;
2335 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2337 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2338 total_size += d->d.type_totals[i];
2340 d->d.total = total_size;
2342 /* Include the size of the allocation bitmap. */
2343 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2344 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2345 d->alloc_size = alloc_size;
2347 return d->d.total + alloc_size;
2350 /* Set the base address for the objects in the PCH file. */
2352 void
2353 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2355 int i;
2356 size_t base = (size_t) base_;
2358 d->base = d->orig_base = base;
2359 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2361 d->type_bases[i] = base;
2362 base += d->d.type_totals[i];
2365 if (d->alloc_bits == NULL)
2366 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2369 /* Allocate a place for object X of size SIZE in the PCH file. */
2371 char *
2372 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2373 size_t size, bool is_string,
2374 enum gt_types_enum type)
2376 size_t alloc_word, alloc_bit;
2377 char *result;
2378 int bucket = pch_bucket (x, type, is_string);
2380 /* Record the start of the object in the allocation bitmap. We
2381 can't assert that the allocation bit is previously clear, because
2382 strings may violate the invariant that they are at least
2383 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2384 should not be called for strings. */
2385 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2386 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2387 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2388 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2389 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2391 /* Place the object at the current pointer for this bucket. */
2392 result = (char *) d->type_bases[bucket];
2393 d->type_bases[bucket] += size;
2394 return result;
2397 /* Prepare to write out the PCH data to file F. */
2399 void
2400 ggc_pch_prepare_write (struct ggc_pch_data *d,
2401 FILE *f)
2403 /* We seek around a lot while writing. Record where the end
2404 of the padding in the PCH file is, so that we can
2405 locate each object's offset. */
2406 d->start_offset = ftell (f);
2409 /* Write out object X of SIZE to file F. */
2411 void
2412 ggc_pch_write_object (struct ggc_pch_data *d,
2413 FILE *f, void *x, void *newx,
2414 size_t size, bool is_string ATTRIBUTE_UNUSED)
2416 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2417 fatal_error ("can%'t seek PCH file: %m");
2419 if (fwrite (x, size, 1, f) != 1)
2420 fatal_error ("can%'t write PCH file: %m");
2423 void
2424 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2426 /* Write out the allocation bitmap. */
2427 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2428 fatal_error ("can%'t seek PCH file: %m");
2430 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2431 fatal_error ("can%'t write PCH file: %m");
2433 /* Done with the PCH, so write out our footer. */
2434 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2435 fatal_error ("can%'t write PCH file: %m");
2437 free (d->alloc_bits);
2438 free (d);
2441 /* The PCH file from F has been mapped at ADDR. Read in any
2442 additional data from the file and set up the GC state. */
2444 void
2445 ggc_pch_read (FILE *f, void *addr)
2447 struct ggc_pch_ondisk d;
2448 size_t alloc_size;
2449 struct alloc_zone *zone;
2450 struct page_entry *pch_page;
2451 char *p;
2453 if (fread (&d, sizeof (d), 1, f) != 1)
2454 fatal_error ("can%'t read PCH file: %m");
2456 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2457 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2459 pch_zone.bytes = d.total;
2460 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2461 pch_zone.page = (char *) addr;
2462 pch_zone.end = (char *) pch_zone.alloc_bits;
2464 /* We've just read in a PCH file. So, every object that used to be
2465 allocated is now free. */
2466 #ifdef GATHER_STATISTICS
2467 zone_allocate_marks ();
2468 ggc_prune_overhead_list ();
2469 zone_free_marks ();
2470 #endif
2472 for (zone = G.zones; zone; zone = zone->next_zone)
2474 struct small_page_entry *page, *next_page;
2475 struct large_page_entry *large_page, *next_large_page;
2477 zone->allocated = 0;
2479 /* Clear the zone's free chunk list. */
2480 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2481 zone->high_free_bin = 0;
2482 zone->cached_free = NULL;
2483 zone->cached_free_size = 0;
2485 /* Move all the small pages onto the free list. */
2486 for (page = zone->pages; page != NULL; page = next_page)
2488 next_page = page->next;
2489 memset (page->alloc_bits, 0,
2490 G.small_page_overhead - PAGE_OVERHEAD);
2491 free_small_page (page);
2494 /* Discard all the large pages. */
2495 for (large_page = zone->large_pages; large_page != NULL;
2496 large_page = next_large_page)
2498 next_large_page = large_page->next;
2499 free_large_page (large_page);
2502 zone->pages = NULL;
2503 zone->large_pages = NULL;
2506 /* Allocate the dummy page entry for the PCH, and set all pages
2507 mapped into the PCH to reference it. */
2508 pch_page = XCNEW (struct page_entry);
2509 pch_page->page = pch_zone.page;
2510 pch_page->pch_p = true;
2512 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2513 set_page_table_entry (p, pch_page);