* config/darwin-c.c: Remove c-tree.h include.
[official-gcc.git] / gcc / ggc-zone.c
bloba8154cc4de0f3e7714b0fcbcd14d8e1e633a2e30
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "flags.h"
34 #include "ggc.h"
35 #include "timevar.h"
36 #include "params.h"
37 #include "bitmap.h"
38 #include "plugin.h"
40 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
41 file open. Prefer either to valloc. */
42 #ifdef HAVE_MMAP_ANON
43 # undef HAVE_MMAP_DEV_ZERO
45 # include <sys/mman.h>
46 # ifndef MAP_FAILED
47 # define MAP_FAILED -1
48 # endif
49 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
50 # define MAP_ANONYMOUS MAP_ANON
51 # endif
52 # define USING_MMAP
53 #endif
55 #ifdef HAVE_MMAP_DEV_ZERO
56 # include <sys/mman.h>
57 # ifndef MAP_FAILED
58 # define MAP_FAILED -1
59 # endif
60 # define USING_MMAP
61 #endif
63 #ifndef USING_MMAP
64 #error Zone collector requires mmap
65 #endif
67 #if (GCC_VERSION < 3001)
68 #define prefetch(X) ((void) X)
69 #define prefetchw(X) ((void) X)
70 #else
71 #define prefetch(X) __builtin_prefetch (X)
72 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
73 #endif
75 /* FUTURE NOTES:
77 If we track inter-zone pointers, we can mark single zones at a
78 time.
80 If we have a zone where we guarantee no inter-zone pointers, we
81 could mark that zone separately.
83 The garbage zone should not be marked, and we should return 1 in
84 ggc_set_mark for any object in the garbage zone, which cuts off
85 marking quickly. */
87 /* Strategy:
89 This garbage-collecting allocator segregates objects into zones.
90 It also segregates objects into "large" and "small" bins. Large
91 objects are greater than page size.
93 Pages for small objects are broken up into chunks. The page has
94 a bitmap which marks the start position of each chunk (whether
95 allocated or free). Free chunks are on one of the zone's free
96 lists and contain a pointer to the next free chunk. Chunks in
97 most of the free lists have a fixed size determined by the
98 free list. Chunks in the "other" sized free list have their size
99 stored right after their chain pointer.
101 Empty pages (of all sizes) are kept on a single page cache list,
102 and are considered first when new pages are required; they are
103 deallocated at the start of the next collection if they haven't
104 been recycled by then. The free page list is currently per-zone. */
106 /* Define GGC_DEBUG_LEVEL to print debugging information.
107 0: No debugging output.
108 1: GC statistics only.
109 2: Page-entry allocations/deallocations as well.
110 3: Object allocations as well.
111 4: Object marks as well. */
112 #define GGC_DEBUG_LEVEL (0)
114 #ifndef HOST_BITS_PER_PTR
115 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 #endif
118 /* This structure manages small free chunks. The SIZE field is only
119 initialized if the chunk is in the "other" sized free list. Large
120 chunks are allocated one at a time to their own page, and so don't
121 come in here. */
123 struct alloc_chunk {
124 struct alloc_chunk *next_free;
125 unsigned int size;
128 /* The size of the fixed-size portion of a small page descriptor. */
129 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
131 /* The collector's idea of the page size. This must be a power of two
132 no larger than the system page size, because pages must be aligned
133 to this amount and are tracked at this granularity in the page
134 table. We choose a size at compile time for efficiency.
136 We could make a better guess at compile time if PAGE_SIZE is a
137 constant in system headers, and PAGE_SHIFT is defined... */
138 #define GGC_PAGE_SIZE 4096
139 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
140 #define GGC_PAGE_SHIFT 12
142 #if 0
143 /* Alternative definitions which use the runtime page size. */
144 #define GGC_PAGE_SIZE G.pagesize
145 #define GGC_PAGE_MASK G.page_mask
146 #define GGC_PAGE_SHIFT G.lg_pagesize
147 #endif
149 /* The size of a small page managed by the garbage collector. This
150 must currently be GGC_PAGE_SIZE, but with a few changes could
151 be any multiple of it to reduce certain kinds of overhead. */
152 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
154 /* Free bin information. These numbers may be in need of re-tuning.
155 In general, decreasing the number of free bins would seem to
156 increase the time it takes to allocate... */
158 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
159 today. */
161 #define NUM_FREE_BINS 64
162 #define FREE_BIN_DELTA MAX_ALIGNMENT
163 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
165 /* Allocation and marking parameters. */
167 /* The smallest allocatable unit to keep track of. */
168 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
170 /* The smallest markable unit. If we require each allocated object
171 to contain at least two allocatable units, we can use half as many
172 bits for the mark bitmap. But this adds considerable complexity
173 to sweeping. */
174 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
176 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
178 /* We use this structure to determine the alignment required for
179 allocations.
181 There are several things wrong with this estimation of alignment.
183 The maximum alignment for a structure is often less than the
184 maximum alignment for a basic data type; for instance, on some
185 targets long long must be aligned to sizeof (int) in a structure
186 and sizeof (long long) in a variable. i386-linux is one example;
187 Darwin is another (sometimes, depending on the compiler in use).
189 Also, long double is not included. Nothing in GCC uses long
190 double, so we assume that this is OK. On powerpc-darwin, adding
191 long double would bring the maximum alignment up to 16 bytes,
192 and until we need long double (or to vectorize compiler operations)
193 that's painfully wasteful. This will need to change, some day. */
195 struct max_alignment {
196 char c;
197 union {
198 HOST_WIDEST_INT i;
199 double d;
200 } u;
203 /* The biggest alignment required. */
205 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
207 /* Compute the smallest multiple of F that is >= X. */
209 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
211 /* Types to use for the allocation and mark bitmaps. It might be
212 a good idea to add ffsl to libiberty and use unsigned long
213 instead; that could speed us up where long is wider than int. */
215 typedef unsigned int alloc_type;
216 typedef unsigned int mark_type;
217 #define alloc_ffs(x) ffs(x)
219 /* A page_entry records the status of an allocation page. This is the
220 common data between all three kinds of pages - small, large, and
221 PCH. */
222 typedef struct page_entry
224 /* The address at which the memory is allocated. */
225 char *page;
227 /* The zone that this page entry belongs to. */
228 struct alloc_zone *zone;
230 #ifdef GATHER_STATISTICS
231 /* How many collections we've survived. */
232 size_t survived;
233 #endif
235 /* Does this page contain small objects, or one large object? */
236 bool large_p;
238 /* Is this page part of the loaded PCH? */
239 bool pch_p;
240 } page_entry;
242 /* Additional data needed for small pages. */
243 struct small_page_entry
245 struct page_entry common;
247 /* The next small page entry, or NULL if this is the last. */
248 struct small_page_entry *next;
250 /* If currently marking this zone, a pointer to the mark bits
251 for this page. If we aren't currently marking this zone,
252 this pointer may be stale (pointing to freed memory). */
253 mark_type *mark_bits;
255 /* The allocation bitmap. This array extends far enough to have
256 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
257 alloc_type alloc_bits[1];
260 /* Additional data needed for large pages. */
261 struct large_page_entry
263 struct page_entry common;
265 /* The next large page entry, or NULL if this is the last. */
266 struct large_page_entry *next;
268 /* The number of bytes allocated, not including the page entry. */
269 size_t bytes;
271 /* The previous page in the list, so that we can unlink this one. */
272 struct large_page_entry *prev;
274 /* During marking, is this object marked? */
275 bool mark_p;
278 /* A two-level tree is used to look up the page-entry for a given
279 pointer. Two chunks of the pointer's bits are extracted to index
280 the first and second levels of the tree, as follows:
282 HOST_PAGE_SIZE_BITS
283 32 | |
284 msb +----------------+----+------+------+ lsb
285 | | |
286 PAGE_L1_BITS |
288 PAGE_L2_BITS
290 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
291 pages are aligned on system page boundaries. The next most
292 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
293 index values in the lookup table, respectively.
295 For 32-bit architectures and the settings below, there are no
296 leftover bits. For architectures with wider pointers, the lookup
297 tree points to a list of pages, which must be scanned to find the
298 correct one. */
300 #define PAGE_L1_BITS (8)
301 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
302 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
303 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
305 #define LOOKUP_L1(p) \
306 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
308 #define LOOKUP_L2(p) \
309 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
311 #if HOST_BITS_PER_PTR <= 32
313 /* On 32-bit hosts, we use a two level page table, as pictured above. */
314 typedef page_entry **page_table[PAGE_L1_SIZE];
316 #else
318 /* On 64-bit hosts, we use the same two level page tables plus a linked
319 list that disambiguates the top 32-bits. There will almost always be
320 exactly one entry in the list. */
321 typedef struct page_table_chain
323 struct page_table_chain *next;
324 size_t high_bits;
325 page_entry **table[PAGE_L1_SIZE];
326 } *page_table;
328 #endif
330 /* The global variables. */
331 static struct globals
333 /* The linked list of zones. */
334 struct alloc_zone *zones;
336 /* Lookup table for associating allocation pages with object addresses. */
337 page_table lookup;
339 /* The system's page size, and related constants. */
340 size_t pagesize;
341 size_t lg_pagesize;
342 size_t page_mask;
344 /* The size to allocate for a small page entry. This includes
345 the size of the structure and the size of the allocation
346 bitmap. */
347 size_t small_page_overhead;
349 #if defined (HAVE_MMAP_DEV_ZERO)
350 /* A file descriptor open to /dev/zero for reading. */
351 int dev_zero_fd;
352 #endif
354 /* Allocate pages in chunks of this size, to throttle calls to memory
355 allocation routines. The first page is used, the rest go onto the
356 free list. */
357 size_t quire_size;
359 /* The file descriptor for debugging output. */
360 FILE *debug_file;
361 } G;
363 /* A zone allocation structure. There is one of these for every
364 distinct allocation zone. */
365 struct alloc_zone
367 /* The most recent free chunk is saved here, instead of in the linked
368 free list, to decrease list manipulation. It is most likely that we
369 will want this one. */
370 char *cached_free;
371 size_t cached_free_size;
373 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
374 FREE_BIN_DELTA. All other chunks are in slot 0. */
375 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
377 /* The highest bin index which might be non-empty. It may turn out
378 to be empty, in which case we have to search downwards. */
379 size_t high_free_bin;
381 /* Bytes currently allocated in this zone. */
382 size_t allocated;
384 /* Linked list of the small pages in this zone. */
385 struct small_page_entry *pages;
387 /* Doubly linked list of large pages in this zone. */
388 struct large_page_entry *large_pages;
390 /* If we are currently marking this zone, a pointer to the mark bits. */
391 mark_type *mark_bits;
393 /* Name of the zone. */
394 const char *name;
396 /* The number of small pages currently allocated in this zone. */
397 size_t n_small_pages;
399 /* Bytes allocated at the end of the last collection. */
400 size_t allocated_last_gc;
402 /* Total amount of memory mapped. */
403 size_t bytes_mapped;
405 /* A cache of free system pages. */
406 struct small_page_entry *free_pages;
408 /* Next zone in the linked list of zones. */
409 struct alloc_zone *next_zone;
411 /* True if this zone was collected during this collection. */
412 bool was_collected;
414 /* True if this zone should be destroyed after the next collection. */
415 bool dead;
417 #ifdef GATHER_STATISTICS
418 struct
420 /* Total memory allocated with ggc_alloc. */
421 unsigned long long total_allocated;
422 /* Total overhead for memory to be allocated with ggc_alloc. */
423 unsigned long long total_overhead;
425 /* Total allocations and overhead for sizes less than 32, 64 and 128.
426 These sizes are interesting because they are typical cache line
427 sizes. */
429 unsigned long long total_allocated_under32;
430 unsigned long long total_overhead_under32;
432 unsigned long long total_allocated_under64;
433 unsigned long long total_overhead_under64;
435 unsigned long long total_allocated_under128;
436 unsigned long long total_overhead_under128;
437 } stats;
438 #endif
439 } main_zone;
441 /* Some default zones. */
442 struct alloc_zone rtl_zone;
443 struct alloc_zone tree_zone;
444 struct alloc_zone tree_id_zone;
446 /* The PCH zone does not need a normal zone structure, and it does
447 not live on the linked list of zones. */
448 struct pch_zone
450 /* The start of the PCH zone. NULL if there is none. */
451 char *page;
453 /* The end of the PCH zone. NULL if there is none. */
454 char *end;
456 /* The size of the PCH zone. 0 if there is none. */
457 size_t bytes;
459 /* The allocation bitmap for the PCH zone. */
460 alloc_type *alloc_bits;
462 /* If we are currently marking, the mark bitmap for the PCH zone.
463 When it is first read in, we could avoid marking the PCH,
464 because it will not contain any pointers to GC memory outside
465 of the PCH; however, the PCH is currently mapped as writable,
466 so we must mark it in case new pointers are added. */
467 mark_type *mark_bits;
468 } pch_zone;
470 #ifdef USING_MMAP
471 static char *alloc_anon (char *, size_t, struct alloc_zone *);
472 #endif
473 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
474 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
475 static void free_chunk (char *, size_t, struct alloc_zone *);
476 static void free_small_page (struct small_page_entry *);
477 static void free_large_page (struct large_page_entry *);
478 static void release_pages (struct alloc_zone *);
479 static void sweep_pages (struct alloc_zone *);
480 static bool ggc_collect_1 (struct alloc_zone *, bool);
481 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
483 /* Traverse the page table and find the entry for a page.
484 Die (probably) if the object wasn't allocated via GC. */
486 static inline page_entry *
487 lookup_page_table_entry (const void *p)
489 page_entry ***base;
490 size_t L1, L2;
492 #if HOST_BITS_PER_PTR <= 32
493 base = &G.lookup[0];
494 #else
495 page_table table = G.lookup;
496 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
497 while (table->high_bits != high_bits)
498 table = table->next;
499 base = &table->table[0];
500 #endif
502 /* Extract the level 1 and 2 indices. */
503 L1 = LOOKUP_L1 (p);
504 L2 = LOOKUP_L2 (p);
506 return base[L1][L2];
509 /* Traverse the page table and find the entry for a page.
510 Return NULL if the object wasn't allocated via the GC. */
512 static inline page_entry *
513 lookup_page_table_if_allocated (const void *p)
515 page_entry ***base;
516 size_t L1, L2;
518 #if HOST_BITS_PER_PTR <= 32
519 base = &G.lookup[0];
520 #else
521 page_table table = G.lookup;
522 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
523 while (1)
525 if (table == NULL)
526 return NULL;
527 if (table->high_bits == high_bits)
528 break;
529 table = table->next;
531 base = &table->table[0];
532 #endif
534 /* Extract the level 1 and 2 indices. */
535 L1 = LOOKUP_L1 (p);
536 if (! base[L1])
537 return NULL;
539 L2 = LOOKUP_L2 (p);
540 if (L2 >= PAGE_L2_SIZE)
541 return NULL;
542 /* We might have a page entry which does not correspond exactly to a
543 system page. */
544 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
545 return NULL;
547 return base[L1][L2];
550 /* Set the page table entry for the page that starts at P. If ENTRY
551 is NULL, clear the entry. */
553 static void
554 set_page_table_entry (void *p, page_entry *entry)
556 page_entry ***base;
557 size_t L1, L2;
559 #if HOST_BITS_PER_PTR <= 32
560 base = &G.lookup[0];
561 #else
562 page_table table;
563 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564 for (table = G.lookup; table; table = table->next)
565 if (table->high_bits == high_bits)
566 goto found;
568 /* Not found -- allocate a new table. */
569 table = XCNEW (struct page_table_chain);
570 table->next = G.lookup;
571 table->high_bits = high_bits;
572 G.lookup = table;
573 found:
574 base = &table->table[0];
575 #endif
577 /* Extract the level 1 and 2 indices. */
578 L1 = LOOKUP_L1 (p);
579 L2 = LOOKUP_L2 (p);
581 if (base[L1] == NULL)
582 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
584 base[L1][L2] = entry;
587 /* Find the page table entry associated with OBJECT. */
589 static inline struct page_entry *
590 zone_get_object_page (const void *object)
592 return lookup_page_table_entry (object);
595 /* Find which element of the alloc_bits array OBJECT should be
596 recorded in. */
597 static inline unsigned int
598 zone_get_object_alloc_word (const void *object)
600 return (((size_t) object & (GGC_PAGE_SIZE - 1))
601 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
604 /* Find which bit of the appropriate word in the alloc_bits array
605 OBJECT should be recorded in. */
606 static inline unsigned int
607 zone_get_object_alloc_bit (const void *object)
609 return (((size_t) object / BYTES_PER_ALLOC_BIT)
610 % (8 * sizeof (alloc_type)));
613 /* Find which element of the mark_bits array OBJECT should be recorded
614 in. */
615 static inline unsigned int
616 zone_get_object_mark_word (const void *object)
618 return (((size_t) object & (GGC_PAGE_SIZE - 1))
619 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
622 /* Find which bit of the appropriate word in the mark_bits array
623 OBJECT should be recorded in. */
624 static inline unsigned int
625 zone_get_object_mark_bit (const void *object)
627 return (((size_t) object / BYTES_PER_MARK_BIT)
628 % (8 * sizeof (mark_type)));
631 /* Set the allocation bit corresponding to OBJECT in its page's
632 bitmap. Used to split this object from the preceding one. */
633 static inline void
634 zone_set_object_alloc_bit (const void *object)
636 struct small_page_entry *page
637 = (struct small_page_entry *) zone_get_object_page (object);
638 unsigned int start_word = zone_get_object_alloc_word (object);
639 unsigned int start_bit = zone_get_object_alloc_bit (object);
641 page->alloc_bits[start_word] |= 1L << start_bit;
644 /* Clear the allocation bit corresponding to OBJECT in PAGE's
645 bitmap. Used to coalesce this object with the preceding
646 one. */
647 static inline void
648 zone_clear_object_alloc_bit (struct small_page_entry *page,
649 const void *object)
651 unsigned int start_word = zone_get_object_alloc_word (object);
652 unsigned int start_bit = zone_get_object_alloc_bit (object);
654 /* Would xor be quicker? */
655 page->alloc_bits[start_word] &= ~(1L << start_bit);
658 /* Find the size of the object which starts at START_WORD and
659 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
660 Helper function for ggc_get_size and zone_find_object_size. */
662 static inline size_t
663 zone_object_size_1 (alloc_type *alloc_bits,
664 size_t start_word, size_t start_bit,
665 size_t max_size)
667 size_t size;
668 alloc_type alloc_word;
669 int indx;
671 /* Load the first word. */
672 alloc_word = alloc_bits[start_word++];
674 /* If that was the last bit in this word, we'll want to continue
675 with the next word. Otherwise, handle the rest of this word. */
676 if (start_bit)
678 indx = alloc_ffs (alloc_word >> start_bit);
679 if (indx)
680 /* indx is 1-based. We started at the bit after the object's
681 start, but we also ended at the bit after the object's end.
682 It cancels out. */
683 return indx * BYTES_PER_ALLOC_BIT;
685 /* The extra 1 accounts for the starting unit, before start_bit. */
686 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
688 if (size >= max_size)
689 return max_size;
691 alloc_word = alloc_bits[start_word++];
693 else
694 size = BYTES_PER_ALLOC_BIT;
696 while (alloc_word == 0)
698 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
699 if (size >= max_size)
700 return max_size;
701 alloc_word = alloc_bits[start_word++];
704 indx = alloc_ffs (alloc_word);
705 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
708 /* Find the size of OBJECT on small page PAGE. */
710 static inline size_t
711 zone_find_object_size (struct small_page_entry *page,
712 const void *object)
714 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
715 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
716 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
717 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
718 - (const char *) object);
720 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
721 max_size);
724 /* highest_bit assumes that alloc_type is 32 bits. */
725 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
727 /* Find the highest set bit in VALUE. Returns the bit number of that
728 bit, using the same values as ffs. */
729 static inline alloc_type
730 highest_bit (alloc_type value)
732 /* This also assumes that alloc_type is unsigned. */
733 value |= value >> 1;
734 value |= value >> 2;
735 value |= value >> 4;
736 value |= value >> 8;
737 value |= value >> 16;
738 value = value ^ (value >> 1);
739 return alloc_ffs (value);
742 /* Find the offset from the start of an object to P, which may point
743 into the interior of the object. */
745 static unsigned long
746 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
747 size_t start_bit)
749 unsigned int offset_in_bits;
750 alloc_type alloc_word = alloc_bits[start_word];
752 /* Mask off any bits after the initial bit, but make sure to include
753 the initial bit in the result. Note that START_BIT is
754 0-based. */
755 if (start_bit < 8 * sizeof (alloc_type) - 1)
756 alloc_word &= (1 << (start_bit + 1)) - 1;
757 offset_in_bits = start_bit;
759 /* Search for the start of the object. */
760 while (alloc_word == 0 && start_word > 0)
762 alloc_word = alloc_bits[--start_word];
763 offset_in_bits += 8 * sizeof (alloc_type);
765 /* We must always find a set bit. */
766 gcc_assert (alloc_word != 0);
767 /* Note that the result of highest_bit is 1-based. */
768 offset_in_bits -= highest_bit (alloc_word) - 1;
770 return BYTES_PER_ALLOC_BIT * offset_in_bits;
773 /* Allocate the mark bits for every zone, and set the pointers on each
774 page. */
775 static void
776 zone_allocate_marks (void)
778 struct alloc_zone *zone;
780 for (zone = G.zones; zone; zone = zone->next_zone)
782 struct small_page_entry *page;
783 mark_type *cur_marks;
784 size_t mark_words, mark_words_per_page;
785 #ifdef ENABLE_CHECKING
786 size_t n = 0;
787 #endif
789 mark_words_per_page
790 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
791 mark_words = zone->n_small_pages * mark_words_per_page;
792 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
793 mark_words);
794 cur_marks = zone->mark_bits;
795 for (page = zone->pages; page; page = page->next)
797 page->mark_bits = cur_marks;
798 cur_marks += mark_words_per_page;
799 #ifdef ENABLE_CHECKING
800 n++;
801 #endif
803 #ifdef ENABLE_CHECKING
804 gcc_assert (n == zone->n_small_pages);
805 #endif
808 /* We don't collect the PCH zone, but we do have to mark it
809 (for now). */
810 if (pch_zone.bytes)
811 pch_zone.mark_bits
812 = (mark_type *) xcalloc (sizeof (mark_type),
813 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
816 /* After marking and sweeping, release the memory used for mark bits. */
817 static void
818 zone_free_marks (void)
820 struct alloc_zone *zone;
822 for (zone = G.zones; zone; zone = zone->next_zone)
823 if (zone->mark_bits)
825 free (zone->mark_bits);
826 zone->mark_bits = NULL;
829 if (pch_zone.bytes)
831 free (pch_zone.mark_bits);
832 pch_zone.mark_bits = NULL;
836 #ifdef USING_MMAP
837 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
838 (if non-null). The ifdef structure here is intended to cause a
839 compile error unless exactly one of the HAVE_* is defined. */
841 static inline char *
842 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
844 #ifdef HAVE_MMAP_ANON
845 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
846 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
847 #endif
848 #ifdef HAVE_MMAP_DEV_ZERO
849 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
850 MAP_PRIVATE, G.dev_zero_fd, 0);
851 #endif
853 if (page == (char *) MAP_FAILED)
855 perror ("virtual memory exhausted");
856 exit (FATAL_EXIT_CODE);
859 /* Remember that we allocated this memory. */
860 zone->bytes_mapped += size;
862 /* Pretend we don't have access to the allocated pages. We'll enable
863 access to smaller pieces of the area in ggc_alloc. Discard the
864 handle to avoid handle leak. */
865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
867 return page;
869 #endif
871 /* Allocate a new page for allocating small objects in ZONE, and
872 return an entry for it. */
874 static struct small_page_entry *
875 alloc_small_page (struct alloc_zone *zone)
877 struct small_page_entry *entry;
879 /* Check the list of free pages for one we can use. */
880 entry = zone->free_pages;
881 if (entry != NULL)
883 /* Recycle the allocated memory from this page ... */
884 zone->free_pages = entry->next;
886 else
888 /* We want just one page. Allocate a bunch of them and put the
889 extras on the freelist. (Can only do this optimization with
890 mmap for backing store.) */
891 struct small_page_entry *e, *f = zone->free_pages;
892 int i;
893 char *page;
895 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
897 /* This loop counts down so that the chain will be in ascending
898 memory order. */
899 for (i = G.quire_size - 1; i >= 1; i--)
901 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
902 e->common.page = page + (i << GGC_PAGE_SHIFT);
903 e->common.zone = zone;
904 e->next = f;
905 f = e;
906 set_page_table_entry (e->common.page, &e->common);
909 zone->free_pages = f;
911 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
912 entry->common.page = page;
913 entry->common.zone = zone;
914 set_page_table_entry (page, &entry->common);
917 zone->n_small_pages++;
919 if (GGC_DEBUG_LEVEL >= 2)
920 fprintf (G.debug_file,
921 "Allocating %s page at %p, data %p-%p\n",
922 entry->common.zone->name, (PTR) entry, entry->common.page,
923 entry->common.page + SMALL_PAGE_SIZE - 1);
925 return entry;
928 /* Allocate a large page of size SIZE in ZONE. */
930 static struct large_page_entry *
931 alloc_large_page (size_t size, struct alloc_zone *zone)
933 struct large_page_entry *entry;
934 char *page;
935 size_t needed_size;
937 needed_size = size + sizeof (struct large_page_entry);
938 page = XNEWVAR (char, needed_size);
940 entry = (struct large_page_entry *) page;
942 entry->next = NULL;
943 entry->common.page = page + sizeof (struct large_page_entry);
944 entry->common.large_p = true;
945 entry->common.pch_p = false;
946 entry->common.zone = zone;
947 #ifdef GATHER_STATISTICS
948 entry->common.survived = 0;
949 #endif
950 entry->mark_p = false;
951 entry->bytes = size;
952 entry->prev = NULL;
954 set_page_table_entry (entry->common.page, &entry->common);
956 if (GGC_DEBUG_LEVEL >= 2)
957 fprintf (G.debug_file,
958 "Allocating %s large page at %p, data %p-%p\n",
959 entry->common.zone->name, (PTR) entry, entry->common.page,
960 entry->common.page + SMALL_PAGE_SIZE - 1);
962 return entry;
966 /* For a page that is no longer needed, put it on the free page list. */
968 static inline void
969 free_small_page (struct small_page_entry *entry)
971 if (GGC_DEBUG_LEVEL >= 2)
972 fprintf (G.debug_file,
973 "Deallocating %s page at %p, data %p-%p\n",
974 entry->common.zone->name, (PTR) entry,
975 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
977 gcc_assert (!entry->common.large_p);
979 /* Mark the page as inaccessible. Discard the handle to
980 avoid handle leak. */
981 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
982 SMALL_PAGE_SIZE));
984 entry->next = entry->common.zone->free_pages;
985 entry->common.zone->free_pages = entry;
986 entry->common.zone->n_small_pages--;
989 /* Release a large page that is no longer needed. */
991 static inline void
992 free_large_page (struct large_page_entry *entry)
994 if (GGC_DEBUG_LEVEL >= 2)
995 fprintf (G.debug_file,
996 "Deallocating %s page at %p, data %p-%p\n",
997 entry->common.zone->name, (PTR) entry,
998 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
1000 gcc_assert (entry->common.large_p);
1002 set_page_table_entry (entry->common.page, NULL);
1003 free (entry);
1006 /* Release the free page cache to the system. */
1008 static void
1009 release_pages (struct alloc_zone *zone)
1011 #ifdef USING_MMAP
1012 struct small_page_entry *p, *next;
1013 char *start;
1014 size_t len;
1016 /* Gather up adjacent pages so they are unmapped together. */
1017 p = zone->free_pages;
1019 while (p)
1021 start = p->common.page;
1022 next = p->next;
1023 len = SMALL_PAGE_SIZE;
1024 set_page_table_entry (p->common.page, NULL);
1025 p = next;
1027 while (p && p->common.page == start + len)
1029 next = p->next;
1030 len += SMALL_PAGE_SIZE;
1031 set_page_table_entry (p->common.page, NULL);
1032 p = next;
1035 munmap (start, len);
1036 zone->bytes_mapped -= len;
1039 zone->free_pages = NULL;
1040 #endif
1043 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1045 static inline void
1046 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1048 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1049 size_t bin = 0;
1051 bin = SIZE_BIN_DOWN (size);
1052 gcc_assert (bin != 0);
1053 if (bin > NUM_FREE_BINS)
1055 bin = 0;
1056 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1057 sizeof (struct
1058 alloc_chunk)));
1059 chunk->size = size;
1060 chunk->next_free = zone->free_chunks[bin];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1062 + sizeof (struct
1063 alloc_chunk),
1064 size
1065 - sizeof (struct
1066 alloc_chunk)));
1068 else
1070 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1071 sizeof (struct
1072 alloc_chunk *)));
1073 chunk->next_free = zone->free_chunks[bin];
1074 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1075 + sizeof (struct
1076 alloc_chunk *),
1077 size
1078 - sizeof (struct
1079 alloc_chunk *)));
1082 zone->free_chunks[bin] = chunk;
1083 if (bin > zone->high_free_bin)
1084 zone->high_free_bin = bin;
1085 if (GGC_DEBUG_LEVEL >= 3)
1086 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1089 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1091 void *
1092 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1093 MEM_STAT_DECL)
1095 size_t bin;
1096 size_t csize;
1097 struct small_page_entry *entry;
1098 struct alloc_chunk *chunk, **pp;
1099 void *result;
1100 size_t size = orig_size;
1102 /* Make sure that zero-sized allocations get a unique and freeable
1103 pointer. */
1104 if (size == 0)
1105 size = MAX_ALIGNMENT;
1106 else
1107 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1109 /* Try to allocate the object from several different sources. Each
1110 of these cases is responsible for setting RESULT and SIZE to
1111 describe the allocated block, before jumping to FOUND. If a
1112 chunk is split, the allocate bit for the new chunk should also be
1113 set.
1115 Large objects are handled specially. However, they'll just fail
1116 the next couple of conditions, so we can wait to check for them
1117 below. The large object case is relatively rare (< 1%), so this
1118 is a win. */
1120 /* First try to split the last chunk we allocated. For best
1121 fragmentation behavior it would be better to look for a
1122 free bin of the appropriate size for a small object. However,
1123 we're unlikely (1% - 7%) to find one, and this gives better
1124 locality behavior anyway. This case handles the lion's share
1125 of all calls to this function. */
1126 if (size <= zone->cached_free_size)
1128 result = zone->cached_free;
1130 zone->cached_free_size -= size;
1131 if (zone->cached_free_size)
1133 zone->cached_free += size;
1134 zone_set_object_alloc_bit (zone->cached_free);
1137 goto found;
1140 /* Next, try to find a free bin of the exactly correct size. */
1142 /* We want to round SIZE up, rather than down, but we know it's
1143 already aligned to at least FREE_BIN_DELTA, so we can just
1144 shift. */
1145 bin = SIZE_BIN_DOWN (size);
1147 if (bin <= NUM_FREE_BINS
1148 && (chunk = zone->free_chunks[bin]) != NULL)
1150 /* We have a chunk of the right size. Pull it off the free list
1151 and use it. */
1153 zone->free_chunks[bin] = chunk->next_free;
1155 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1156 == FREE_BIN_DELTA. */
1157 result = chunk;
1159 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1160 may now be wrong, if this was the last chunk in the high bin.
1161 Rather than fixing it up now, wait until we need to search
1162 the free bins. */
1164 goto found;
1167 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1168 to split. We can find one in the too-big bin, or in the largest
1169 sized bin with a chunk in it. Try the largest normal-sized bin
1170 first. */
1172 if (zone->high_free_bin > bin)
1174 /* Find the highest numbered free bin. It will be at or below
1175 the watermark. */
1176 while (zone->high_free_bin > bin
1177 && zone->free_chunks[zone->high_free_bin] == NULL)
1178 zone->high_free_bin--;
1180 if (zone->high_free_bin > bin)
1182 size_t tbin = zone->high_free_bin;
1183 chunk = zone->free_chunks[tbin];
1185 /* Remove the chunk from its previous bin. */
1186 zone->free_chunks[tbin] = chunk->next_free;
1188 result = (char *) chunk;
1190 /* Save the rest of the chunk for future allocation. */
1191 if (zone->cached_free_size)
1192 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1194 chunk = (struct alloc_chunk *) ((char *) result + size);
1195 zone->cached_free = (char *) chunk;
1196 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1198 /* Mark the new free chunk as an object, so that we can
1199 find the size of the newly allocated object. */
1200 zone_set_object_alloc_bit (chunk);
1202 /* HIGH_FREE_BIN may now be wrong, if this was the last
1203 chunk in the high bin. Rather than fixing it up now,
1204 wait until we need to search the free bins. */
1206 goto found;
1210 /* Failing that, look through the "other" bucket for a chunk
1211 that is large enough. */
1212 pp = &(zone->free_chunks[0]);
1213 chunk = *pp;
1214 while (chunk && chunk->size < size)
1216 pp = &chunk->next_free;
1217 chunk = *pp;
1220 if (chunk)
1222 /* Remove the chunk from its previous bin. */
1223 *pp = chunk->next_free;
1225 result = (char *) chunk;
1227 /* Save the rest of the chunk for future allocation, if there's any
1228 left over. */
1229 csize = chunk->size;
1230 if (csize > size)
1232 if (zone->cached_free_size)
1233 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1235 chunk = (struct alloc_chunk *) ((char *) result + size);
1236 zone->cached_free = (char *) chunk;
1237 zone->cached_free_size = csize - size;
1239 /* Mark the new free chunk as an object. */
1240 zone_set_object_alloc_bit (chunk);
1243 goto found;
1246 /* Handle large allocations. We could choose any threshold between
1247 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1248 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1249 be guaranteed to have a unique entry in the lookup table. Large
1250 allocations will always fall through to here. */
1251 if (size > GGC_PAGE_SIZE)
1253 struct large_page_entry *entry = alloc_large_page (size, zone);
1255 #ifdef GATHER_STATISTICS
1256 entry->common.survived = 0;
1257 #endif
1259 entry->next = zone->large_pages;
1260 if (zone->large_pages)
1261 zone->large_pages->prev = entry;
1262 zone->large_pages = entry;
1264 result = entry->common.page;
1266 goto found;
1269 /* Failing everything above, allocate a new small page. */
1271 entry = alloc_small_page (zone);
1272 entry->next = zone->pages;
1273 zone->pages = entry;
1275 /* Mark the first chunk in the new page. */
1276 entry->alloc_bits[0] = 1;
1278 result = entry->common.page;
1279 if (size < SMALL_PAGE_SIZE)
1281 if (zone->cached_free_size)
1282 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1284 zone->cached_free = (char *) result + size;
1285 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1287 /* Mark the new free chunk as an object. */
1288 zone_set_object_alloc_bit (zone->cached_free);
1291 found:
1293 /* We could save TYPE in the chunk, but we don't use that for
1294 anything yet. If we wanted to, we could do it by adding it
1295 either before the beginning of the chunk or after its end,
1296 and adjusting the size and pointer appropriately. */
1298 /* We'll probably write to this after we return. */
1299 prefetchw (result);
1301 #ifdef ENABLE_GC_CHECKING
1302 /* `Poison' the entire allocated object. */
1303 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1304 memset (result, 0xaf, size);
1305 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1306 size - orig_size));
1307 #endif
1309 /* Tell Valgrind that the memory is there, but its content isn't
1310 defined. The bytes at the end of the object are still marked
1311 unaccessible. */
1312 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1314 /* Keep track of how many bytes are being allocated. This
1315 information is used in deciding when to collect. */
1316 zone->allocated += size;
1318 timevar_ggc_mem_total += size;
1320 #ifdef GATHER_STATISTICS
1321 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1324 size_t object_size = size;
1325 size_t overhead = object_size - orig_size;
1327 zone->stats.total_overhead += overhead;
1328 zone->stats.total_allocated += object_size;
1330 if (orig_size <= 32)
1332 zone->stats.total_overhead_under32 += overhead;
1333 zone->stats.total_allocated_under32 += object_size;
1335 if (orig_size <= 64)
1337 zone->stats.total_overhead_under64 += overhead;
1338 zone->stats.total_allocated_under64 += object_size;
1340 if (orig_size <= 128)
1342 zone->stats.total_overhead_under128 += overhead;
1343 zone->stats.total_allocated_under128 += object_size;
1346 #endif
1348 if (GGC_DEBUG_LEVEL >= 3)
1349 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1350 (unsigned long) size, result);
1352 return result;
1355 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1356 for that type. */
1358 void *
1359 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1360 MEM_STAT_DECL)
1362 switch (gte)
1364 case gt_ggc_e_14lang_tree_node:
1365 return ggc_alloc_zone_pass_stat (size, &tree_zone);
1367 case gt_ggc_e_7rtx_def:
1368 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1370 case gt_ggc_e_9rtvec_def:
1371 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1373 default:
1374 return ggc_alloc_zone_pass_stat (size, &main_zone);
1378 /* Normal ggc_alloc simply allocates into the main zone. */
1380 void *
1381 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1383 return ggc_alloc_zone_pass_stat (size, &main_zone);
1386 /* Poison the chunk. */
1387 #ifdef ENABLE_GC_CHECKING
1388 #define poison_region(PTR, SIZE) \
1389 do { \
1390 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1391 memset ((PTR), 0xa5, (SIZE)); \
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1393 } while (0)
1394 #else
1395 #define poison_region(PTR, SIZE)
1396 #endif
1398 /* Free the object at P. */
1400 void
1401 ggc_free (void *p)
1403 struct page_entry *page;
1405 #ifdef GATHER_STATISTICS
1406 ggc_free_overhead (p);
1407 #endif
1409 poison_region (p, ggc_get_size (p));
1411 page = zone_get_object_page (p);
1413 if (page->large_p)
1415 struct large_page_entry *large_page
1416 = (struct large_page_entry *) page;
1418 /* Remove the page from the linked list. */
1419 if (large_page->prev)
1420 large_page->prev->next = large_page->next;
1421 else
1423 gcc_assert (large_page->common.zone->large_pages == large_page);
1424 large_page->common.zone->large_pages = large_page->next;
1426 if (large_page->next)
1427 large_page->next->prev = large_page->prev;
1429 large_page->common.zone->allocated -= large_page->bytes;
1431 /* Release the memory associated with this object. */
1432 free_large_page (large_page);
1434 else if (page->pch_p)
1435 /* Don't do anything. We won't allocate a new object from the
1436 PCH zone so there's no point in releasing anything. */
1438 else
1440 size_t size = ggc_get_size (p);
1442 page->zone->allocated -= size;
1444 /* Add the chunk to the free list. We don't bother with coalescing,
1445 since we are likely to want a chunk of this size again. */
1446 free_chunk ((char *)p, size, page->zone);
1450 /* Mark function for strings. */
1452 void
1453 gt_ggc_m_S (const void *p)
1455 page_entry *entry;
1456 unsigned long offset;
1458 if (!p)
1459 return;
1461 /* Look up the page on which the object is alloced. . */
1462 entry = lookup_page_table_if_allocated (p);
1463 if (! entry)
1464 return;
1466 if (entry->pch_p)
1468 size_t alloc_word, alloc_bit, t;
1469 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1470 alloc_word = t / (8 * sizeof (alloc_type));
1471 alloc_bit = t % (8 * sizeof (alloc_type));
1472 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1473 alloc_bit);
1475 else if (entry->large_p)
1477 struct large_page_entry *le = (struct large_page_entry *) entry;
1478 offset = ((const char *) p) - entry->page;
1479 gcc_assert (offset < le->bytes);
1481 else
1483 struct small_page_entry *se = (struct small_page_entry *) entry;
1484 unsigned int start_word = zone_get_object_alloc_word (p);
1485 unsigned int start_bit = zone_get_object_alloc_bit (p);
1486 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1488 /* On some platforms a char* will not necessarily line up on an
1489 allocation boundary, so we have to update the offset to
1490 account for the leftover bytes. */
1491 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1494 if (offset)
1496 /* Here we've seen a char* which does not point to the beginning
1497 of an allocated object. We assume it points to the middle of
1498 a STRING_CST. */
1499 gcc_assert (offset == offsetof (struct tree_string, str));
1500 p = ((const char *) p) - offset;
1501 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1502 return;
1505 /* Inefficient, but also unlikely to matter. */
1506 ggc_set_mark (p);
1509 /* If P is not marked, mark it and return false. Otherwise return true.
1510 P must have been allocated by the GC allocator; it mustn't point to
1511 static objects, stack variables, or memory allocated with malloc. */
1514 ggc_set_mark (const void *p)
1516 struct page_entry *page;
1517 const char *ptr = (const char *) p;
1519 page = zone_get_object_page (p);
1521 if (page->pch_p)
1523 size_t mark_word, mark_bit, offset;
1524 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1525 mark_word = offset / (8 * sizeof (mark_type));
1526 mark_bit = offset % (8 * sizeof (mark_type));
1528 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1529 return 1;
1530 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1532 else if (page->large_p)
1534 struct large_page_entry *large_page
1535 = (struct large_page_entry *) page;
1537 if (large_page->mark_p)
1538 return 1;
1539 large_page->mark_p = true;
1541 else
1543 struct small_page_entry *small_page
1544 = (struct small_page_entry *) page;
1546 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1547 & (1 << zone_get_object_mark_bit (p)))
1548 return 1;
1549 small_page->mark_bits[zone_get_object_mark_word (p)]
1550 |= (1 << zone_get_object_mark_bit (p));
1553 if (GGC_DEBUG_LEVEL >= 4)
1554 fprintf (G.debug_file, "Marking %p\n", p);
1556 return 0;
1559 /* Return 1 if P has been marked, zero otherwise.
1560 P must have been allocated by the GC allocator; it mustn't point to
1561 static objects, stack variables, or memory allocated with malloc. */
1564 ggc_marked_p (const void *p)
1566 struct page_entry *page;
1567 const char *ptr = (const char *) p;
1569 page = zone_get_object_page (p);
1571 if (page->pch_p)
1573 size_t mark_word, mark_bit, offset;
1574 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1575 mark_word = offset / (8 * sizeof (mark_type));
1576 mark_bit = offset % (8 * sizeof (mark_type));
1578 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1581 if (page->large_p)
1583 struct large_page_entry *large_page
1584 = (struct large_page_entry *) page;
1586 return large_page->mark_p;
1588 else
1590 struct small_page_entry *small_page
1591 = (struct small_page_entry *) page;
1593 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1594 & (1 << zone_get_object_mark_bit (p)));
1598 /* Return the size of the gc-able object P. */
1600 size_t
1601 ggc_get_size (const void *p)
1603 struct page_entry *page;
1604 const char *ptr = (const char *) p;
1606 page = zone_get_object_page (p);
1608 if (page->pch_p)
1610 size_t alloc_word, alloc_bit, offset, max_size;
1611 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1612 alloc_word = offset / (8 * sizeof (alloc_type));
1613 alloc_bit = offset % (8 * sizeof (alloc_type));
1614 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1615 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1616 max_size);
1619 if (page->large_p)
1620 return ((struct large_page_entry *)page)->bytes;
1621 else
1622 return zone_find_object_size ((struct small_page_entry *) page, p);
1625 /* Initialize the ggc-zone-mmap allocator. */
1626 void
1627 init_ggc (void)
1629 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1630 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1631 the current assumptions to hold. */
1633 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1635 /* Set up the main zone by hand. */
1636 main_zone.name = "Main zone";
1637 G.zones = &main_zone;
1639 /* Allocate the default zones. */
1640 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1641 new_ggc_zone_1 (&tree_zone, "Tree zone");
1642 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1644 G.pagesize = getpagesize();
1645 G.lg_pagesize = exact_log2 (G.pagesize);
1646 G.page_mask = ~(G.pagesize - 1);
1648 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1649 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1651 /* Allocate 16 system pages at a time. */
1652 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1654 /* Calculate the size of the allocation bitmap and other overhead. */
1655 /* Right now we allocate bits for the page header and bitmap. These
1656 are wasted, but a little tricky to eliminate. */
1657 G.small_page_overhead
1658 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1659 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1661 #ifdef HAVE_MMAP_DEV_ZERO
1662 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1663 gcc_assert (G.dev_zero_fd != -1);
1664 #endif
1666 #if 0
1667 G.debug_file = fopen ("ggc-mmap.debug", "w");
1668 setlinebuf (G.debug_file);
1669 #else
1670 G.debug_file = stdout;
1671 #endif
1673 #ifdef USING_MMAP
1674 /* StunOS has an amazing off-by-one error for the first mmap allocation
1675 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1676 believe, is an unaligned page allocation, which would cause us to
1677 hork badly if we tried to use it. */
1679 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1680 struct small_page_entry *e;
1681 if ((size_t)p & (G.pagesize - 1))
1683 /* How losing. Discard this one and try another. If we still
1684 can't get something useful, give up. */
1686 p = alloc_anon (NULL, G.pagesize, &main_zone);
1687 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1690 if (GGC_PAGE_SIZE == G.pagesize)
1692 /* We have a good page, might as well hold onto it... */
1693 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1694 e->common.page = p;
1695 e->common.zone = &main_zone;
1696 e->next = main_zone.free_pages;
1697 set_page_table_entry (e->common.page, &e->common);
1698 main_zone.free_pages = e;
1700 else
1702 munmap (p, G.pagesize);
1705 #endif
1708 /* Start a new GGC zone. */
1710 static void
1711 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1713 new_zone->name = name;
1714 new_zone->next_zone = G.zones->next_zone;
1715 G.zones->next_zone = new_zone;
1718 struct alloc_zone *
1719 new_ggc_zone (const char * name)
1721 struct alloc_zone *new_zone = XCNEW (struct alloc_zone);
1722 new_ggc_zone_1 (new_zone, name);
1723 return new_zone;
1726 /* Destroy a GGC zone. */
1727 void
1728 destroy_ggc_zone (struct alloc_zone * dead_zone)
1730 struct alloc_zone *z;
1732 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1733 /* Just find that zone. */
1734 continue;
1736 /* We should have found the zone in the list. Anything else is fatal. */
1737 gcc_assert (z);
1739 /* z is dead, baby. z is dead. */
1740 z->dead = true;
1743 /* Free all empty pages and objects within a page for a given zone */
1745 static void
1746 sweep_pages (struct alloc_zone *zone)
1748 struct large_page_entry **lpp, *lp, *lnext;
1749 struct small_page_entry **spp, *sp, *snext;
1750 char *last_free;
1751 size_t allocated = 0;
1752 bool nomarksinpage;
1754 /* First, reset the free_chunks lists, since we are going to
1755 re-free free chunks in hopes of coalescing them into large chunks. */
1756 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1757 zone->high_free_bin = 0;
1758 zone->cached_free = NULL;
1759 zone->cached_free_size = 0;
1761 /* Large pages are all or none affairs. Either they are completely
1762 empty, or they are completely full. */
1763 lpp = &zone->large_pages;
1764 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1766 gcc_assert (lp->common.large_p);
1768 lnext = lp->next;
1770 #ifdef GATHER_STATISTICS
1771 /* This page has now survived another collection. */
1772 lp->common.survived++;
1773 #endif
1775 if (lp->mark_p)
1777 lp->mark_p = false;
1778 allocated += lp->bytes;
1779 lpp = &lp->next;
1781 else
1783 *lpp = lnext;
1784 #ifdef ENABLE_GC_CHECKING
1785 /* Poison the page. */
1786 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1787 #endif
1788 if (lp->prev)
1789 lp->prev->next = lp->next;
1790 if (lp->next)
1791 lp->next->prev = lp->prev;
1792 free_large_page (lp);
1796 spp = &zone->pages;
1797 for (sp = zone->pages; sp != NULL; sp = snext)
1799 char *object, *last_object;
1800 char *end;
1801 alloc_type *alloc_word_p;
1802 mark_type *mark_word_p;
1804 gcc_assert (!sp->common.large_p);
1806 snext = sp->next;
1808 #ifdef GATHER_STATISTICS
1809 /* This page has now survived another collection. */
1810 sp->common.survived++;
1811 #endif
1813 /* Step through all chunks, consolidate those that are free and
1814 insert them into the free lists. Note that consolidation
1815 slows down collection slightly. */
1817 last_object = object = sp->common.page;
1818 end = sp->common.page + SMALL_PAGE_SIZE;
1819 last_free = NULL;
1820 nomarksinpage = true;
1821 mark_word_p = sp->mark_bits;
1822 alloc_word_p = sp->alloc_bits;
1824 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1826 object = sp->common.page;
1829 unsigned int i, n;
1830 alloc_type alloc_word;
1831 mark_type mark_word;
1833 alloc_word = *alloc_word_p++;
1834 mark_word = *mark_word_p++;
1836 if (mark_word)
1837 nomarksinpage = false;
1839 /* There ought to be some way to do this without looping... */
1840 i = 0;
1841 while ((n = alloc_ffs (alloc_word)) != 0)
1843 /* Extend the current state for n - 1 bits. We can't
1844 shift alloc_word by n, even though it isn't used in the
1845 loop, in case only the highest bit was set. */
1846 alloc_word >>= n - 1;
1847 mark_word >>= n - 1;
1848 object += BYTES_PER_MARK_BIT * (n - 1);
1850 if (mark_word & 1)
1852 if (last_free)
1854 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1855 object
1856 - last_free));
1857 poison_region (last_free, object - last_free);
1858 free_chunk (last_free, object - last_free, zone);
1859 last_free = NULL;
1861 else
1862 allocated += object - last_object;
1863 last_object = object;
1865 else
1867 if (last_free == NULL)
1869 last_free = object;
1870 allocated += object - last_object;
1872 else
1873 zone_clear_object_alloc_bit (sp, object);
1876 /* Shift to just after the alloc bit we handled. */
1877 alloc_word >>= 1;
1878 mark_word >>= 1;
1879 object += BYTES_PER_MARK_BIT;
1881 i += n;
1884 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1886 while (object < end);
1888 if (nomarksinpage)
1890 *spp = snext;
1891 #ifdef ENABLE_GC_CHECKING
1892 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1893 SMALL_PAGE_SIZE));
1894 /* Poison the page. */
1895 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1896 #endif
1897 free_small_page (sp);
1898 continue;
1900 else if (last_free)
1902 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1903 object - last_free));
1904 poison_region (last_free, object - last_free);
1905 free_chunk (last_free, object - last_free, zone);
1907 else
1908 allocated += object - last_object;
1910 spp = &sp->next;
1913 zone->allocated = allocated;
1916 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1917 is true if we need to mark before sweeping, false if some other
1918 zone collection has already performed marking for us. Returns true
1919 if we collected, false otherwise. */
1921 static bool
1922 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1924 #if 0
1925 /* */
1927 int i;
1928 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1930 struct alloc_chunk *chunk;
1931 int n, tot;
1933 n = 0;
1934 tot = 0;
1935 chunk = zone->free_chunks[i];
1936 while (chunk)
1938 n++;
1939 tot += chunk->size;
1940 chunk = chunk->next_free;
1942 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1943 i, n, tot);
1946 /* */
1947 #endif
1949 if (!quiet_flag)
1950 fprintf (stderr, " {%s GC %luk -> ",
1951 zone->name, (unsigned long) zone->allocated / 1024);
1953 /* Zero the total allocated bytes. This will be recalculated in the
1954 sweep phase. */
1955 zone->allocated = 0;
1957 /* Release the pages we freed the last time we collected, but didn't
1958 reuse in the interim. */
1959 release_pages (zone);
1961 if (need_marking)
1963 zone_allocate_marks ();
1964 ggc_mark_roots ();
1965 #ifdef GATHER_STATISTICS
1966 ggc_prune_overhead_list ();
1967 #endif
1970 sweep_pages (zone);
1971 zone->was_collected = true;
1972 zone->allocated_last_gc = zone->allocated;
1974 if (!quiet_flag)
1975 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1976 return true;
1979 #ifdef GATHER_STATISTICS
1980 /* Calculate the average page survival rate in terms of number of
1981 collections. */
1983 static float
1984 calculate_average_page_survival (struct alloc_zone *zone)
1986 float count = 0.0;
1987 float survival = 0.0;
1988 struct small_page_entry *p;
1989 struct large_page_entry *lp;
1990 for (p = zone->pages; p; p = p->next)
1992 count += 1.0;
1993 survival += p->common.survived;
1995 for (lp = zone->large_pages; lp; lp = lp->next)
1997 count += 1.0;
1998 survival += lp->common.survived;
2000 return survival/count;
2002 #endif
2004 /* Top level collection routine. */
2006 void
2007 ggc_collect (void)
2009 struct alloc_zone *zone;
2010 bool marked = false;
2012 timevar_push (TV_GC);
2014 if (!ggc_force_collect)
2016 float allocated_last_gc = 0, allocated = 0, min_expand;
2018 for (zone = G.zones; zone; zone = zone->next_zone)
2020 allocated_last_gc += zone->allocated_last_gc;
2021 allocated += zone->allocated;
2024 allocated_last_gc =
2025 MAX (allocated_last_gc,
2026 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2027 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2029 if (allocated < allocated_last_gc + min_expand)
2031 timevar_pop (TV_GC);
2032 return;
2036 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2038 /* Start by possibly collecting the main zone. */
2039 main_zone.was_collected = false;
2040 marked |= ggc_collect_1 (&main_zone, true);
2042 /* In order to keep the number of collections down, we don't
2043 collect other zones unless we are collecting the main zone. This
2044 gives us roughly the same number of collections as we used to
2045 have with the old gc. The number of collection is important
2046 because our main slowdown (according to profiling) is now in
2047 marking. So if we mark twice as often as we used to, we'll be
2048 twice as slow. Hopefully we'll avoid this cost when we mark
2049 zone-at-a-time. */
2050 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2051 keep this code in case the heuristics are further refined. */
2053 if (main_zone.was_collected)
2055 struct alloc_zone *zone;
2057 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2059 zone->was_collected = false;
2060 marked |= ggc_collect_1 (zone, !marked);
2064 #ifdef GATHER_STATISTICS
2065 /* Print page survival stats, if someone wants them. */
2066 if (GGC_DEBUG_LEVEL >= 2)
2068 for (zone = G.zones; zone; zone = zone->next_zone)
2070 if (zone->was_collected)
2072 float f = calculate_average_page_survival (zone);
2073 printf ("Average page survival in zone `%s' is %f\n",
2074 zone->name, f);
2078 #endif
2080 if (marked)
2081 zone_free_marks ();
2083 /* Free dead zones. */
2084 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2086 if (zone->next_zone->dead)
2088 struct alloc_zone *dead_zone = zone->next_zone;
2090 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2092 /* The zone must be empty. */
2093 gcc_assert (!dead_zone->allocated);
2095 /* Unchain the dead zone, release all its pages and free it. */
2096 zone->next_zone = zone->next_zone->next_zone;
2097 release_pages (dead_zone);
2098 free (dead_zone);
2102 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2104 timevar_pop (TV_GC);
2107 /* Print allocation statistics. */
2108 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2109 ? (x) \
2110 : ((x) < 1024*1024*10 \
2111 ? (x) / 1024 \
2112 : (x) / (1024*1024))))
2113 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2115 void
2116 ggc_print_statistics (void)
2118 struct alloc_zone *zone;
2119 struct ggc_statistics stats;
2120 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2121 size_t pte_overhead, i;
2123 /* Clear the statistics. */
2124 memset (&stats, 0, sizeof (stats));
2126 /* Make sure collection will really occur. */
2127 ggc_force_collect = true;
2129 /* Collect and print the statistics common across collectors. */
2130 ggc_print_common_statistics (stderr, &stats);
2132 ggc_force_collect = false;
2134 /* Release free pages so that we will not count the bytes allocated
2135 there as part of the total allocated memory. */
2136 for (zone = G.zones; zone; zone = zone->next_zone)
2137 release_pages (zone);
2139 /* Collect some information about the various sizes of
2140 allocation. */
2141 fprintf (stderr,
2142 "Memory still allocated at the end of the compilation process\n");
2144 fprintf (stderr, "%20s %10s %10s %10s\n",
2145 "Zone", "Allocated", "Used", "Overhead");
2146 for (zone = G.zones; zone; zone = zone->next_zone)
2148 struct large_page_entry *large_page;
2149 size_t overhead, allocated, in_use;
2151 /* Skip empty zones. */
2152 if (!zone->pages && !zone->large_pages)
2153 continue;
2155 allocated = in_use = 0;
2157 overhead = sizeof (struct alloc_zone);
2159 for (large_page = zone->large_pages; large_page != NULL;
2160 large_page = large_page->next)
2162 allocated += large_page->bytes;
2163 in_use += large_page->bytes;
2164 overhead += sizeof (struct large_page_entry);
2167 /* There's no easy way to walk through the small pages finding
2168 used and unused objects. Instead, add all the pages, and
2169 subtract out the free list. */
2171 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2172 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2173 overhead += G.small_page_overhead * zone->n_small_pages;
2175 for (i = 0; i <= NUM_FREE_BINS; i++)
2177 struct alloc_chunk *chunk = zone->free_chunks[i];
2178 while (chunk)
2180 in_use -= ggc_get_size (chunk);
2181 chunk = chunk->next_free;
2185 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2186 zone->name,
2187 SCALE (allocated), LABEL (allocated),
2188 SCALE (in_use), LABEL (in_use),
2189 SCALE (overhead), LABEL (overhead));
2191 gcc_assert (in_use == zone->allocated);
2193 total_overhead += overhead;
2194 total_allocated += zone->allocated;
2195 total_bytes_mapped += zone->bytes_mapped;
2198 /* Count the size of the page table as best we can. */
2199 #if HOST_BITS_PER_PTR <= 32
2200 pte_overhead = sizeof (G.lookup);
2201 for (i = 0; i < PAGE_L1_SIZE; i++)
2202 if (G.lookup[i])
2203 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2204 #else
2206 page_table table = G.lookup;
2207 pte_overhead = 0;
2208 while (table)
2210 pte_overhead += sizeof (*table);
2211 for (i = 0; i < PAGE_L1_SIZE; i++)
2212 if (table->table[i])
2213 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2214 table = table->next;
2217 #endif
2218 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2219 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2220 total_overhead += pte_overhead;
2222 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2223 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2224 SCALE (total_allocated), LABEL(total_allocated),
2225 SCALE (total_overhead), LABEL (total_overhead));
2227 #ifdef GATHER_STATISTICS
2229 unsigned long long all_overhead = 0, all_allocated = 0;
2230 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2231 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2232 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2234 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2236 for (zone = G.zones; zone; zone = zone->next_zone)
2238 all_overhead += zone->stats.total_overhead;
2239 all_allocated += zone->stats.total_allocated;
2241 all_allocated_under32 += zone->stats.total_allocated_under32;
2242 all_overhead_under32 += zone->stats.total_overhead_under32;
2244 all_allocated_under64 += zone->stats.total_allocated_under64;
2245 all_overhead_under64 += zone->stats.total_overhead_under64;
2247 all_allocated_under128 += zone->stats.total_allocated_under128;
2248 all_overhead_under128 += zone->stats.total_overhead_under128;
2250 fprintf (stderr, "%20s: %10lld\n",
2251 zone->name, zone->stats.total_allocated);
2254 fprintf (stderr, "\n");
2256 fprintf (stderr, "Total Overhead: %10lld\n",
2257 all_overhead);
2258 fprintf (stderr, "Total Allocated: %10lld\n",
2259 all_allocated);
2261 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2262 all_overhead_under32);
2263 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2264 all_allocated_under32);
2265 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2266 all_overhead_under64);
2267 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2268 all_allocated_under64);
2269 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2270 all_overhead_under128);
2271 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2272 all_allocated_under128);
2274 #endif
2277 /* Precompiled header support. */
2279 /* For precompiled headers, we sort objects based on their type. We
2280 also sort various objects into their own buckets; currently this
2281 covers strings and IDENTIFIER_NODE trees. The choices of how
2282 to sort buckets have not yet been tuned. */
2284 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2286 #define OTHER_BUCKET (gt_types_enum_last + 0)
2287 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2288 #define STRING_BUCKET (gt_types_enum_last + 2)
2290 struct ggc_pch_ondisk
2292 size_t total;
2293 size_t type_totals[NUM_PCH_BUCKETS];
2296 struct ggc_pch_data
2298 struct ggc_pch_ondisk d;
2299 size_t base;
2300 size_t orig_base;
2301 size_t alloc_size;
2302 alloc_type *alloc_bits;
2303 size_t type_bases[NUM_PCH_BUCKETS];
2304 size_t start_offset;
2307 /* Initialize the PCH data structure. */
2309 struct ggc_pch_data *
2310 init_ggc_pch (void)
2312 return XCNEW (struct ggc_pch_data);
2315 /* Return which of the page-aligned buckets the object at X, with type
2316 TYPE, should be sorted into in the PCH. Strings will have
2317 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2318 of unknown type will also have TYPE equal to gt_types_enum_last. */
2320 static int
2321 pch_bucket (void *x, enum gt_types_enum type,
2322 bool is_string)
2324 /* Sort identifiers into their own bucket, to improve locality
2325 when searching the identifier hash table. */
2326 if (type == gt_ggc_e_14lang_tree_node
2327 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2328 return IDENTIFIER_BUCKET;
2329 else if (type == gt_types_enum_last)
2331 if (is_string)
2332 return STRING_BUCKET;
2333 return OTHER_BUCKET;
2335 return type;
2338 /* Add the size of object X to the size of the PCH data. */
2340 void
2341 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2342 size_t size, bool is_string, enum gt_types_enum type)
2344 /* NOTE: Right now we don't need to align up the size of any objects.
2345 Strings can be unaligned, and everything else is allocated to a
2346 MAX_ALIGNMENT boundary already. */
2348 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2351 /* Return the total size of the PCH data. */
2353 size_t
2354 ggc_pch_total_size (struct ggc_pch_data *d)
2356 int i;
2357 size_t alloc_size, total_size;
2359 total_size = 0;
2360 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2362 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2363 total_size += d->d.type_totals[i];
2365 d->d.total = total_size;
2367 /* Include the size of the allocation bitmap. */
2368 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2369 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2370 d->alloc_size = alloc_size;
2372 return d->d.total + alloc_size;
2375 /* Set the base address for the objects in the PCH file. */
2377 void
2378 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2380 int i;
2381 size_t base = (size_t) base_;
2383 d->base = d->orig_base = base;
2384 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2386 d->type_bases[i] = base;
2387 base += d->d.type_totals[i];
2390 if (d->alloc_bits == NULL)
2391 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2394 /* Allocate a place for object X of size SIZE in the PCH file. */
2396 char *
2397 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2398 size_t size, bool is_string,
2399 enum gt_types_enum type)
2401 size_t alloc_word, alloc_bit;
2402 char *result;
2403 int bucket = pch_bucket (x, type, is_string);
2405 /* Record the start of the object in the allocation bitmap. We
2406 can't assert that the allocation bit is previously clear, because
2407 strings may violate the invariant that they are at least
2408 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2409 should not be called for strings. */
2410 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2411 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2412 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2413 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2414 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2416 /* Place the object at the current pointer for this bucket. */
2417 result = (char *) d->type_bases[bucket];
2418 d->type_bases[bucket] += size;
2419 return result;
2422 /* Prepare to write out the PCH data to file F. */
2424 void
2425 ggc_pch_prepare_write (struct ggc_pch_data *d,
2426 FILE *f)
2428 /* We seek around a lot while writing. Record where the end
2429 of the padding in the PCH file is, so that we can
2430 locate each object's offset. */
2431 d->start_offset = ftell (f);
2434 /* Write out object X of SIZE to file F. */
2436 void
2437 ggc_pch_write_object (struct ggc_pch_data *d,
2438 FILE *f, void *x, void *newx,
2439 size_t size, bool is_string ATTRIBUTE_UNUSED)
2441 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2442 fatal_error ("can't seek PCH file: %m");
2444 if (fwrite (x, size, 1, f) != 1)
2445 fatal_error ("can't write PCH file: %m");
2448 void
2449 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2451 /* Write out the allocation bitmap. */
2452 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2453 fatal_error ("can't seek PCH file: %m");
2455 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2456 fatal_error ("can't write PCH file: %m");
2458 /* Done with the PCH, so write out our footer. */
2459 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2460 fatal_error ("can't write PCH file: %m");
2462 free (d->alloc_bits);
2463 free (d);
2466 /* The PCH file from F has been mapped at ADDR. Read in any
2467 additional data from the file and set up the GC state. */
2469 void
2470 ggc_pch_read (FILE *f, void *addr)
2472 struct ggc_pch_ondisk d;
2473 size_t alloc_size;
2474 struct alloc_zone *zone;
2475 struct page_entry *pch_page;
2476 char *p;
2478 if (fread (&d, sizeof (d), 1, f) != 1)
2479 fatal_error ("can't read PCH file: %m");
2481 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2482 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2484 pch_zone.bytes = d.total;
2485 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2486 pch_zone.page = (char *) addr;
2487 pch_zone.end = (char *) pch_zone.alloc_bits;
2489 /* We've just read in a PCH file. So, every object that used to be
2490 allocated is now free. */
2491 for (zone = G.zones; zone; zone = zone->next_zone)
2493 struct small_page_entry *page, *next_page;
2494 struct large_page_entry *large_page, *next_large_page;
2496 zone->allocated = 0;
2498 /* Clear the zone's free chunk list. */
2499 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2500 zone->high_free_bin = 0;
2501 zone->cached_free = NULL;
2502 zone->cached_free_size = 0;
2504 /* Move all the small pages onto the free list. */
2505 for (page = zone->pages; page != NULL; page = next_page)
2507 next_page = page->next;
2508 memset (page->alloc_bits, 0,
2509 G.small_page_overhead - PAGE_OVERHEAD);
2510 free_small_page (page);
2513 /* Discard all the large pages. */
2514 for (large_page = zone->large_pages; large_page != NULL;
2515 large_page = next_large_page)
2517 next_large_page = large_page->next;
2518 free_large_page (large_page);
2521 zone->pages = NULL;
2522 zone->large_pages = NULL;
2525 /* Allocate the dummy page entry for the PCH, and set all pages
2526 mapped into the PCH to reference it. */
2527 pch_page = XCNEW (struct page_entry);
2528 pch_page->page = pch_zone.page;
2529 pch_page->pch_p = true;
2531 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2532 set_page_table_entry (p, pch_page);