Merge with trank @ 137446
[official-gcc.git] / gcc / ggc-page.c
blob4ac73d4af4f314a46e80c75dfe743c695ac60fb4
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
35 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
36 file open. Prefer either to valloc. */
37 #ifdef HAVE_MMAP_ANON
38 # undef HAVE_MMAP_DEV_ZERO
40 # include <sys/mman.h>
41 # ifndef MAP_FAILED
42 # define MAP_FAILED -1
43 # endif
44 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
45 # define MAP_ANONYMOUS MAP_ANON
46 # endif
47 # define USING_MMAP
49 #endif
51 #ifdef HAVE_MMAP_DEV_ZERO
53 # include <sys/mman.h>
54 # ifndef MAP_FAILED
55 # define MAP_FAILED -1
56 # endif
57 # define USING_MMAP
59 #endif
61 #ifndef USING_MMAP
62 #define USING_MALLOC_PAGE_GROUPS
63 #endif
65 /* Strategy:
67 This garbage-collecting allocator allocates objects on one of a set
68 of pages. Each page can allocate objects of a single size only;
69 available sizes are powers of two starting at four bytes. The size
70 of an allocation request is rounded up to the next power of two
71 (`order'), and satisfied from the appropriate page.
73 Each page is recorded in a page-entry, which also maintains an
74 in-use bitmap of object positions on the page. This allows the
75 allocation state of a particular object to be flipped without
76 touching the page itself.
78 Each page-entry also has a context depth, which is used to track
79 pushing and popping of allocation contexts. Only objects allocated
80 in the current (highest-numbered) context may be collected.
82 Page entries are arranged in an array of singly-linked lists. The
83 array is indexed by the allocation size, in bits, of the pages on
84 it; i.e. all pages on a list allocate objects of the same size.
85 Pages are ordered on the list such that all non-full pages precede
86 all full pages, with non-full pages arranged in order of decreasing
87 context depth.
89 Empty pages (of all orders) are kept on a single page cache list,
90 and are considered first when new pages are required; they are
91 deallocated at the start of the next collection if they haven't
92 been recycled by then. */
94 /* Define GGC_DEBUG_LEVEL to print debugging information.
95 0: No debugging output.
96 1: GC statistics only.
97 2: Page-entry allocations/deallocations as well.
98 3: Object allocations as well.
99 4: Object marks as well. */
100 #define GGC_DEBUG_LEVEL (0)
102 #ifndef HOST_BITS_PER_PTR
103 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
104 #endif
107 /* A two-level tree is used to look up the page-entry for a given
108 pointer. Two chunks of the pointer's bits are extracted to index
109 the first and second levels of the tree, as follows:
111 HOST_PAGE_SIZE_BITS
112 32 | |
113 msb +----------------+----+------+------+ lsb
114 | | |
115 PAGE_L1_BITS |
117 PAGE_L2_BITS
119 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
120 pages are aligned on system page boundaries. The next most
121 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
122 index values in the lookup table, respectively.
124 For 32-bit architectures and the settings below, there are no
125 leftover bits. For architectures with wider pointers, the lookup
126 tree points to a list of pages, which must be scanned to find the
127 correct one. */
129 #define PAGE_L1_BITS (8)
130 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
131 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
132 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
134 #define LOOKUP_L1(p) \
135 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
137 #define LOOKUP_L2(p) \
138 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
140 /* The number of objects per allocation page, for objects on a page of
141 the indicated ORDER. */
142 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
144 /* The number of objects in P. */
145 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
147 /* The size of an object on a page of the indicated ORDER. */
148 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
150 /* For speed, we avoid doing a general integer divide to locate the
151 offset in the allocation bitmap, by precalculating numbers M, S
152 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
153 within the page which is evenly divisible by the object size Z. */
154 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
155 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
156 #define OFFSET_TO_BIT(OFFSET, ORDER) \
157 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
159 /* The number of extra orders, not corresponding to power-of-two sized
160 objects. */
162 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
164 #define RTL_SIZE(NSLOTS) \
165 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
167 #define TREE_EXP_SIZE(OPS) \
168 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
170 /* The Ith entry is the maximum size of an object to be stored in the
171 Ith extra order. Adding a new entry to this array is the *only*
172 thing you need to do to add a new special allocation size. */
174 static const size_t extra_order_size_table[] = {
175 sizeof (struct stmt_ann_d),
176 sizeof (struct var_ann_d),
177 sizeof (struct tree_decl_non_common),
178 sizeof (struct tree_field_decl),
179 sizeof (struct tree_parm_decl),
180 sizeof (struct tree_var_decl),
181 sizeof (struct tree_list),
182 sizeof (struct tree_ssa_name),
183 sizeof (struct function),
184 sizeof (struct basic_block_def),
185 sizeof (bitmap_element),
186 sizeof (bitmap_head),
187 /* PHI nodes with one to three arguments are already covered by the
188 above sizes. */
189 sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
190 TREE_EXP_SIZE (2),
191 RTL_SIZE (2), /* MEM, PLUS, etc. */
192 RTL_SIZE (9), /* INSN */
195 /* The total number of orders. */
197 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
199 /* We use this structure to determine the alignment required for
200 allocations. For power-of-two sized allocations, that's not a
201 problem, but it does matter for odd-sized allocations. */
203 struct max_alignment {
204 char c;
205 union {
206 HOST_WIDEST_INT i;
207 long double d;
208 } u;
211 /* The biggest alignment required. */
213 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
215 /* Compute the smallest nonnegative number which when added to X gives
216 a multiple of F. */
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220 /* Compute the smallest multiple of F that is >= X. */
222 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
224 /* The Ith entry is the number of objects on a page or order I. */
226 static unsigned objects_per_page_table[NUM_ORDERS];
228 /* The Ith entry is the size of an object on a page of order I. */
230 static size_t object_size_table[NUM_ORDERS];
232 /* The Ith entry is a pair of numbers (mult, shift) such that
233 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
234 for all k evenly divisible by OBJECT_SIZE(I). */
236 static struct
238 size_t mult;
239 unsigned int shift;
241 inverse_table[NUM_ORDERS];
243 /* A page_entry records the status of an allocation page. This
244 structure is dynamically sized to fit the bitmap in_use_p. */
245 typedef struct page_entry
247 /* The next page-entry with objects of the same size, or NULL if
248 this is the last page-entry. */
249 struct page_entry *next;
251 /* The previous page-entry with objects of the same size, or NULL if
252 this is the first page-entry. The PREV pointer exists solely to
253 keep the cost of ggc_free manageable. */
254 struct page_entry *prev;
256 /* The number of bytes allocated. (This will always be a multiple
257 of the host system page size.) */
258 size_t bytes;
260 /* The address at which the memory is allocated. */
261 char *page;
263 #ifdef USING_MALLOC_PAGE_GROUPS
264 /* Back pointer to the page group this page came from. */
265 struct page_group *group;
266 #endif
268 /* This is the index in the by_depth varray where this page table
269 can be found. */
270 unsigned long index_by_depth;
272 /* Context depth of this page. */
273 unsigned short context_depth;
275 /* The number of free objects remaining on this page. */
276 unsigned short num_free_objects;
278 /* A likely candidate for the bit position of a free object for the
279 next allocation from this page. */
280 unsigned short next_bit_hint;
282 /* The lg of size of objects allocated from this page. */
283 unsigned char order;
285 /* A bit vector indicating whether or not objects are in use. The
286 Nth bit is one if the Nth object on this page is allocated. This
287 array is dynamically sized. */
288 unsigned long in_use_p[1];
289 } page_entry;
291 #ifdef USING_MALLOC_PAGE_GROUPS
292 /* A page_group describes a large allocation from malloc, from which
293 we parcel out aligned pages. */
294 typedef struct page_group
296 /* A linked list of all extant page groups. */
297 struct page_group *next;
299 /* The address we received from malloc. */
300 char *allocation;
302 /* The size of the block. */
303 size_t alloc_size;
305 /* A bitmask of pages in use. */
306 unsigned int in_use;
307 } page_group;
308 #endif
310 #if HOST_BITS_PER_PTR <= 32
312 /* On 32-bit hosts, we use a two level page table, as pictured above. */
313 typedef page_entry **page_table[PAGE_L1_SIZE];
315 #else
317 /* On 64-bit hosts, we use the same two level page tables plus a linked
318 list that disambiguates the top 32-bits. There will almost always be
319 exactly one entry in the list. */
320 typedef struct page_table_chain
322 struct page_table_chain *next;
323 size_t high_bits;
324 page_entry **table[PAGE_L1_SIZE];
325 } *page_table;
327 #endif
329 /* The rest of the global variables. */
330 static struct globals
332 /* The Nth element in this array is a page with objects of size 2^N.
333 If there are any pages with free objects, they will be at the
334 head of the list. NULL if there are no page-entries for this
335 object size. */
336 page_entry *pages[NUM_ORDERS];
338 /* The Nth element in this array is the last page with objects of
339 size 2^N. NULL if there are no page-entries for this object
340 size. */
341 page_entry *page_tails[NUM_ORDERS];
343 /* Lookup table for associating allocation pages with object addresses. */
344 page_table lookup;
346 /* The system's page size. */
347 size_t pagesize;
348 size_t lg_pagesize;
350 /* Bytes currently allocated. */
351 size_t allocated;
353 /* Bytes currently allocated at the end of the last collection. */
354 size_t allocated_last_gc;
356 /* Total amount of memory mapped. */
357 size_t bytes_mapped;
359 /* Bit N set if any allocations have been done at context depth N. */
360 unsigned long context_depth_allocations;
362 /* Bit N set if any collections have been done at context depth N. */
363 unsigned long context_depth_collections;
365 /* The current depth in the context stack. */
366 unsigned short context_depth;
368 /* A file descriptor open to /dev/zero for reading. */
369 #if defined (HAVE_MMAP_DEV_ZERO)
370 int dev_zero_fd;
371 #endif
373 /* A cache of free system pages. */
374 page_entry *free_pages;
376 #ifdef USING_MALLOC_PAGE_GROUPS
377 page_group *page_groups;
378 #endif
380 /* The file descriptor for debugging output. */
381 FILE *debug_file;
383 /* Current number of elements in use in depth below. */
384 unsigned int depth_in_use;
386 /* Maximum number of elements that can be used before resizing. */
387 unsigned int depth_max;
389 /* Each element of this array is an index in by_depth where the given
390 depth starts. This structure is indexed by that given depth we
391 are interested in. */
392 unsigned int *depth;
394 /* Current number of elements in use in by_depth below. */
395 unsigned int by_depth_in_use;
397 /* Maximum number of elements that can be used before resizing. */
398 unsigned int by_depth_max;
400 /* Each element of this array is a pointer to a page_entry, all
401 page_entries can be found in here by increasing depth.
402 index_by_depth in the page_entry is the index into this data
403 structure where that page_entry can be found. This is used to
404 speed up finding all page_entries at a particular depth. */
405 page_entry **by_depth;
407 /* Each element is a pointer to the saved in_use_p bits, if any,
408 zero otherwise. We allocate them all together, to enable a
409 better runtime data access pattern. */
410 unsigned long **save_in_use;
412 #ifdef ENABLE_GC_ALWAYS_COLLECT
413 /* List of free objects to be verified as actually free on the
414 next collection. */
415 struct free_object
417 void *object;
418 struct free_object *next;
419 } *free_object_list;
420 #endif
422 #ifdef GATHER_STATISTICS
423 struct
425 /* Total memory allocated with ggc_alloc. */
426 unsigned long long total_allocated;
427 /* Total overhead for memory to be allocated with ggc_alloc. */
428 unsigned long long total_overhead;
430 /* Total allocations and overhead for sizes less than 32, 64 and 128.
431 These sizes are interesting because they are typical cache line
432 sizes. */
434 unsigned long long total_allocated_under32;
435 unsigned long long total_overhead_under32;
437 unsigned long long total_allocated_under64;
438 unsigned long long total_overhead_under64;
440 unsigned long long total_allocated_under128;
441 unsigned long long total_overhead_under128;
443 /* The allocations for each of the allocation orders. */
444 unsigned long long total_allocated_per_order[NUM_ORDERS];
446 /* The overhead for each of the allocation orders. */
447 unsigned long long total_overhead_per_order[NUM_ORDERS];
448 } stats;
449 #endif
450 } G;
452 /* The size in bytes required to maintain a bitmap for the objects
453 on a page-entry. */
454 #define BITMAP_SIZE(Num_objects) \
455 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
457 /* Allocate pages in chunks of this size, to throttle calls to memory
458 allocation routines. The first page is used, the rest go onto the
459 free list. This cannot be larger than HOST_BITS_PER_INT for the
460 in_use bitmask for page_group. Hosts that need a different value
461 can override this by defining GGC_QUIRE_SIZE explicitly. */
462 #ifndef GGC_QUIRE_SIZE
463 # ifdef USING_MMAP
464 # define GGC_QUIRE_SIZE 256
465 # else
466 # define GGC_QUIRE_SIZE 16
467 # endif
468 #endif
470 /* Initial guess as to how many page table entries we might need. */
471 #define INITIAL_PTE_COUNT 128
473 static int ggc_allocated_p (const void *);
474 static page_entry *lookup_page_table_entry (const void *);
475 static void set_page_table_entry (void *, page_entry *);
476 #ifdef USING_MMAP
477 static char *alloc_anon (char *, size_t);
478 #endif
479 #ifdef USING_MALLOC_PAGE_GROUPS
480 static size_t page_group_index (char *, char *);
481 static void set_page_group_in_use (page_group *, char *);
482 static void clear_page_group_in_use (page_group *, char *);
483 #endif
484 static struct page_entry * alloc_page (unsigned);
485 static void free_page (struct page_entry *);
486 static void release_pages (void);
487 static void clear_marks (void);
488 static void sweep_pages (void);
489 static void ggc_recalculate_in_use_p (page_entry *);
490 static void compute_inverse (unsigned);
491 static inline void adjust_depth (void);
492 static void move_ptes_to_front (int, int);
494 void debug_print_page_list (int);
495 static void push_depth (unsigned int);
496 static void push_by_depth (page_entry *, unsigned long *);
498 /* Push an entry onto G.depth. */
500 inline static void
501 push_depth (unsigned int i)
503 if (G.depth_in_use >= G.depth_max)
505 G.depth_max *= 2;
506 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
508 G.depth[G.depth_in_use++] = i;
511 /* Push an entry onto G.by_depth and G.save_in_use. */
513 inline static void
514 push_by_depth (page_entry *p, unsigned long *s)
516 if (G.by_depth_in_use >= G.by_depth_max)
518 G.by_depth_max *= 2;
519 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
520 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
521 G.by_depth_max);
523 G.by_depth[G.by_depth_in_use] = p;
524 G.save_in_use[G.by_depth_in_use++] = s;
527 #if (GCC_VERSION < 3001)
528 #define prefetch(X) ((void) X)
529 #else
530 #define prefetch(X) __builtin_prefetch (X)
531 #endif
533 #define save_in_use_p_i(__i) \
534 (G.save_in_use[__i])
535 #define save_in_use_p(__p) \
536 (save_in_use_p_i (__p->index_by_depth))
538 /* Returns nonzero if P was allocated in GC'able memory. */
540 static inline int
541 ggc_allocated_p (const void *p)
543 page_entry ***base;
544 size_t L1, L2;
546 #if HOST_BITS_PER_PTR <= 32
547 base = &G.lookup[0];
548 #else
549 page_table table = G.lookup;
550 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
551 while (1)
553 if (table == NULL)
554 return 0;
555 if (table->high_bits == high_bits)
556 break;
557 table = table->next;
559 base = &table->table[0];
560 #endif
562 /* Extract the level 1 and 2 indices. */
563 L1 = LOOKUP_L1 (p);
564 L2 = LOOKUP_L2 (p);
566 return base[L1] && base[L1][L2];
569 /* Traverse the page table and find the entry for a page.
570 Die (probably) if the object wasn't allocated via GC. */
572 static inline page_entry *
573 lookup_page_table_entry (const void *p)
575 page_entry ***base;
576 size_t L1, L2;
578 #if HOST_BITS_PER_PTR <= 32
579 base = &G.lookup[0];
580 #else
581 page_table table = G.lookup;
582 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
583 while (table->high_bits != high_bits)
584 table = table->next;
585 base = &table->table[0];
586 #endif
588 /* Extract the level 1 and 2 indices. */
589 L1 = LOOKUP_L1 (p);
590 L2 = LOOKUP_L2 (p);
592 return base[L1][L2];
595 /* Set the page table entry for a page. */
597 static void
598 set_page_table_entry (void *p, page_entry *entry)
600 page_entry ***base;
601 size_t L1, L2;
603 #if HOST_BITS_PER_PTR <= 32
604 base = &G.lookup[0];
605 #else
606 page_table table;
607 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
608 for (table = G.lookup; table; table = table->next)
609 if (table->high_bits == high_bits)
610 goto found;
612 /* Not found -- allocate a new table. */
613 table = XCNEW (struct page_table_chain);
614 table->next = G.lookup;
615 table->high_bits = high_bits;
616 G.lookup = table;
617 found:
618 base = &table->table[0];
619 #endif
621 /* Extract the level 1 and 2 indices. */
622 L1 = LOOKUP_L1 (p);
623 L2 = LOOKUP_L2 (p);
625 if (base[L1] == NULL)
626 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
628 base[L1][L2] = entry;
631 /* Prints the page-entry for object size ORDER, for debugging. */
633 void
634 debug_print_page_list (int order)
636 page_entry *p;
637 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
638 (void *) G.page_tails[order]);
639 p = G.pages[order];
640 while (p != NULL)
642 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
643 p->num_free_objects);
644 p = p->next;
646 printf ("NULL\n");
647 fflush (stdout);
650 #ifdef USING_MMAP
651 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
652 (if non-null). The ifdef structure here is intended to cause a
653 compile error unless exactly one of the HAVE_* is defined. */
655 static inline char *
656 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
658 #ifdef HAVE_MMAP_ANON
659 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
660 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
661 #endif
662 #ifdef HAVE_MMAP_DEV_ZERO
663 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
664 MAP_PRIVATE, G.dev_zero_fd, 0);
665 #endif
667 if (page == (char *) MAP_FAILED)
669 perror ("virtual memory exhausted");
670 exit (FATAL_EXIT_CODE);
673 /* Remember that we allocated this memory. */
674 G.bytes_mapped += size;
676 /* Pretend we don't have access to the allocated pages. We'll enable
677 access to smaller pieces of the area in ggc_alloc. Discard the
678 handle to avoid handle leak. */
679 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
681 return page;
683 #endif
684 #ifdef USING_MALLOC_PAGE_GROUPS
685 /* Compute the index for this page into the page group. */
687 static inline size_t
688 page_group_index (char *allocation, char *page)
690 return (size_t) (page - allocation) >> G.lg_pagesize;
693 /* Set and clear the in_use bit for this page in the page group. */
695 static inline void
696 set_page_group_in_use (page_group *group, char *page)
698 group->in_use |= 1 << page_group_index (group->allocation, page);
701 static inline void
702 clear_page_group_in_use (page_group *group, char *page)
704 group->in_use &= ~(1 << page_group_index (group->allocation, page));
706 #endif
708 /* Allocate a new page for allocating objects of size 2^ORDER,
709 and return an entry for it. The entry is not added to the
710 appropriate page_table list. */
712 static inline struct page_entry *
713 alloc_page (unsigned order)
715 struct page_entry *entry, *p, **pp;
716 char *page;
717 size_t num_objects;
718 size_t bitmap_size;
719 size_t page_entry_size;
720 size_t entry_size;
721 #ifdef USING_MALLOC_PAGE_GROUPS
722 page_group *group;
723 #endif
725 num_objects = OBJECTS_PER_PAGE (order);
726 bitmap_size = BITMAP_SIZE (num_objects + 1);
727 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
728 entry_size = num_objects * OBJECT_SIZE (order);
729 if (entry_size < G.pagesize)
730 entry_size = G.pagesize;
732 entry = NULL;
733 page = NULL;
735 /* Check the list of free pages for one we can use. */
736 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
737 if (p->bytes == entry_size)
738 break;
740 if (p != NULL)
742 /* Recycle the allocated memory from this page ... */
743 *pp = p->next;
744 page = p->page;
746 #ifdef USING_MALLOC_PAGE_GROUPS
747 group = p->group;
748 #endif
750 /* ... and, if possible, the page entry itself. */
751 if (p->order == order)
753 entry = p;
754 memset (entry, 0, page_entry_size);
756 else
757 free (p);
759 #ifdef USING_MMAP
760 else if (entry_size == G.pagesize)
762 /* We want just one page. Allocate a bunch of them and put the
763 extras on the freelist. (Can only do this optimization with
764 mmap for backing store.) */
765 struct page_entry *e, *f = G.free_pages;
766 int i;
768 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
770 /* This loop counts down so that the chain will be in ascending
771 memory order. */
772 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
774 e = XCNEWVAR (struct page_entry, page_entry_size);
775 e->order = order;
776 e->bytes = G.pagesize;
777 e->page = page + (i << G.lg_pagesize);
778 e->next = f;
779 f = e;
782 G.free_pages = f;
784 else
785 page = alloc_anon (NULL, entry_size);
786 #endif
787 #ifdef USING_MALLOC_PAGE_GROUPS
788 else
790 /* Allocate a large block of memory and serve out the aligned
791 pages therein. This results in much less memory wastage
792 than the traditional implementation of valloc. */
794 char *allocation, *a, *enda;
795 size_t alloc_size, head_slop, tail_slop;
796 int multiple_pages = (entry_size == G.pagesize);
798 if (multiple_pages)
799 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
800 else
801 alloc_size = entry_size + G.pagesize - 1;
802 allocation = xmalloc (alloc_size);
804 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
805 head_slop = page - allocation;
806 if (multiple_pages)
807 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
808 else
809 tail_slop = alloc_size - entry_size - head_slop;
810 enda = allocation + alloc_size - tail_slop;
812 /* We allocated N pages, which are likely not aligned, leaving
813 us with N-1 usable pages. We plan to place the page_group
814 structure somewhere in the slop. */
815 if (head_slop >= sizeof (page_group))
816 group = (page_group *)page - 1;
817 else
819 /* We magically got an aligned allocation. Too bad, we have
820 to waste a page anyway. */
821 if (tail_slop == 0)
823 enda -= G.pagesize;
824 tail_slop += G.pagesize;
826 gcc_assert (tail_slop >= sizeof (page_group));
827 group = (page_group *)enda;
828 tail_slop -= sizeof (page_group);
831 /* Remember that we allocated this memory. */
832 group->next = G.page_groups;
833 group->allocation = allocation;
834 group->alloc_size = alloc_size;
835 group->in_use = 0;
836 G.page_groups = group;
837 G.bytes_mapped += alloc_size;
839 /* If we allocated multiple pages, put the rest on the free list. */
840 if (multiple_pages)
842 struct page_entry *e, *f = G.free_pages;
843 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
845 e = xcalloc (1, page_entry_size);
846 e->order = order;
847 e->bytes = G.pagesize;
848 e->page = a;
849 e->group = group;
850 e->next = f;
851 f = e;
853 G.free_pages = f;
856 #endif
858 if (entry == NULL)
859 entry = XCNEWVAR (struct page_entry, page_entry_size);
861 entry->bytes = entry_size;
862 entry->page = page;
863 entry->context_depth = G.context_depth;
864 entry->order = order;
865 entry->num_free_objects = num_objects;
866 entry->next_bit_hint = 1;
868 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
870 #ifdef USING_MALLOC_PAGE_GROUPS
871 entry->group = group;
872 set_page_group_in_use (group, page);
873 #endif
875 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
876 increment the hint. */
877 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
878 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
880 set_page_table_entry (page, entry);
882 if (GGC_DEBUG_LEVEL >= 2)
883 fprintf (G.debug_file,
884 "Allocating page at %p, object size=%lu, data %p-%p\n",
885 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
886 page + entry_size - 1);
888 return entry;
891 /* Adjust the size of G.depth so that no index greater than the one
892 used by the top of the G.by_depth is used. */
894 static inline void
895 adjust_depth (void)
897 page_entry *top;
899 if (G.by_depth_in_use)
901 top = G.by_depth[G.by_depth_in_use-1];
903 /* Peel back indices in depth that index into by_depth, so that
904 as new elements are added to by_depth, we note the indices
905 of those elements, if they are for new context depths. */
906 while (G.depth_in_use > (size_t)top->context_depth+1)
907 --G.depth_in_use;
911 /* For a page that is no longer needed, put it on the free page list. */
913 static void
914 free_page (page_entry *entry)
916 if (GGC_DEBUG_LEVEL >= 2)
917 fprintf (G.debug_file,
918 "Deallocating page at %p, data %p-%p\n", (void *) entry,
919 entry->page, entry->page + entry->bytes - 1);
921 /* Mark the page as inaccessible. Discard the handle to avoid handle
922 leak. */
923 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
925 set_page_table_entry (entry->page, NULL);
927 #ifdef USING_MALLOC_PAGE_GROUPS
928 clear_page_group_in_use (entry->group, entry->page);
929 #endif
931 if (G.by_depth_in_use > 1)
933 page_entry *top = G.by_depth[G.by_depth_in_use-1];
934 int i = entry->index_by_depth;
936 /* We cannot free a page from a context deeper than the current
937 one. */
938 gcc_assert (entry->context_depth == top->context_depth);
940 /* Put top element into freed slot. */
941 G.by_depth[i] = top;
942 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
943 top->index_by_depth = i;
945 --G.by_depth_in_use;
947 adjust_depth ();
949 entry->next = G.free_pages;
950 G.free_pages = entry;
953 /* Release the free page cache to the system. */
955 static void
956 release_pages (void)
958 #ifdef USING_MMAP
959 page_entry *p, *next;
960 char *start;
961 size_t len;
963 /* Gather up adjacent pages so they are unmapped together. */
964 p = G.free_pages;
966 while (p)
968 start = p->page;
969 next = p->next;
970 len = p->bytes;
971 free (p);
972 p = next;
974 while (p && p->page == start + len)
976 next = p->next;
977 len += p->bytes;
978 free (p);
979 p = next;
982 munmap (start, len);
983 G.bytes_mapped -= len;
986 G.free_pages = NULL;
987 #endif
988 #ifdef USING_MALLOC_PAGE_GROUPS
989 page_entry **pp, *p;
990 page_group **gp, *g;
992 /* Remove all pages from free page groups from the list. */
993 pp = &G.free_pages;
994 while ((p = *pp) != NULL)
995 if (p->group->in_use == 0)
997 *pp = p->next;
998 free (p);
1000 else
1001 pp = &p->next;
1003 /* Remove all free page groups, and release the storage. */
1004 gp = &G.page_groups;
1005 while ((g = *gp) != NULL)
1006 if (g->in_use == 0)
1008 *gp = g->next;
1009 G.bytes_mapped -= g->alloc_size;
1010 free (g->allocation);
1012 else
1013 gp = &g->next;
1014 #endif
1017 /* This table provides a fast way to determine ceil(log_2(size)) for
1018 allocation requests. The minimum allocation size is eight bytes. */
1019 #define NUM_SIZE_LOOKUP 512
1020 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1022 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1023 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1024 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1025 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1026 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1027 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1028 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1029 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1030 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1031 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1039 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1040 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1056 /* Typed allocation function. Does nothing special in this collector. */
1058 void *
1059 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1060 MEM_STAT_DECL)
1062 return ggc_alloc_stat (size PASS_MEM_STAT);
1065 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1067 void *
1068 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1070 size_t order, word, bit, object_offset, object_size;
1071 struct page_entry *entry;
1072 void *result;
1074 if (size < NUM_SIZE_LOOKUP)
1076 order = size_lookup[size];
1077 object_size = OBJECT_SIZE (order);
1079 else
1081 order = 10;
1082 while (size > (object_size = OBJECT_SIZE (order)))
1083 order++;
1086 /* If there are non-full pages for this size allocation, they are at
1087 the head of the list. */
1088 entry = G.pages[order];
1090 /* If there is no page for this object size, or all pages in this
1091 context are full, allocate a new page. */
1092 if (entry == NULL || entry->num_free_objects == 0)
1094 struct page_entry *new_entry;
1095 new_entry = alloc_page (order);
1097 new_entry->index_by_depth = G.by_depth_in_use;
1098 push_by_depth (new_entry, 0);
1100 /* We can skip context depths, if we do, make sure we go all the
1101 way to the new depth. */
1102 while (new_entry->context_depth >= G.depth_in_use)
1103 push_depth (G.by_depth_in_use-1);
1105 /* If this is the only entry, it's also the tail. If it is not
1106 the only entry, then we must update the PREV pointer of the
1107 ENTRY (G.pages[order]) to point to our new page entry. */
1108 if (entry == NULL)
1109 G.page_tails[order] = new_entry;
1110 else
1111 entry->prev = new_entry;
1113 /* Put new pages at the head of the page list. By definition the
1114 entry at the head of the list always has a NULL pointer. */
1115 new_entry->next = entry;
1116 new_entry->prev = NULL;
1117 entry = new_entry;
1118 G.pages[order] = new_entry;
1120 /* For a new page, we know the word and bit positions (in the
1121 in_use bitmap) of the first available object -- they're zero. */
1122 new_entry->next_bit_hint = 1;
1123 word = 0;
1124 bit = 0;
1125 object_offset = 0;
1127 else
1129 /* First try to use the hint left from the previous allocation
1130 to locate a clear bit in the in-use bitmap. We've made sure
1131 that the one-past-the-end bit is always set, so if the hint
1132 has run over, this test will fail. */
1133 unsigned hint = entry->next_bit_hint;
1134 word = hint / HOST_BITS_PER_LONG;
1135 bit = hint % HOST_BITS_PER_LONG;
1137 /* If the hint didn't work, scan the bitmap from the beginning. */
1138 if ((entry->in_use_p[word] >> bit) & 1)
1140 word = bit = 0;
1141 while (~entry->in_use_p[word] == 0)
1142 ++word;
1144 #if GCC_VERSION >= 3004
1145 bit = __builtin_ctzl (~entry->in_use_p[word]);
1146 #else
1147 while ((entry->in_use_p[word] >> bit) & 1)
1148 ++bit;
1149 #endif
1151 hint = word * HOST_BITS_PER_LONG + bit;
1154 /* Next time, try the next bit. */
1155 entry->next_bit_hint = hint + 1;
1157 object_offset = hint * object_size;
1160 /* Set the in-use bit. */
1161 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1163 /* Keep a running total of the number of free objects. If this page
1164 fills up, we may have to move it to the end of the list if the
1165 next page isn't full. If the next page is full, all subsequent
1166 pages are full, so there's no need to move it. */
1167 if (--entry->num_free_objects == 0
1168 && entry->next != NULL
1169 && entry->next->num_free_objects > 0)
1171 /* We have a new head for the list. */
1172 G.pages[order] = entry->next;
1174 /* We are moving ENTRY to the end of the page table list.
1175 The new page at the head of the list will have NULL in
1176 its PREV field and ENTRY will have NULL in its NEXT field. */
1177 entry->next->prev = NULL;
1178 entry->next = NULL;
1180 /* Append ENTRY to the tail of the list. */
1181 entry->prev = G.page_tails[order];
1182 G.page_tails[order]->next = entry;
1183 G.page_tails[order] = entry;
1186 /* Calculate the object's address. */
1187 result = entry->page + object_offset;
1188 #ifdef GATHER_STATISTICS
1189 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1190 result PASS_MEM_STAT);
1191 #endif
1193 #ifdef ENABLE_GC_CHECKING
1194 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1195 exact same semantics in presence of memory bugs, regardless of
1196 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1197 handle to avoid handle leak. */
1198 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1200 /* `Poison' the entire allocated object, including any padding at
1201 the end. */
1202 memset (result, 0xaf, object_size);
1204 /* Make the bytes after the end of the object unaccessible. Discard the
1205 handle to avoid handle leak. */
1206 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1207 object_size - size));
1208 #endif
1210 /* Tell Valgrind that the memory is there, but its content isn't
1211 defined. The bytes at the end of the object are still marked
1212 unaccessible. */
1213 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1215 /* Keep track of how many bytes are being allocated. This
1216 information is used in deciding when to collect. */
1217 G.allocated += object_size;
1219 /* For timevar statistics. */
1220 timevar_ggc_mem_total += object_size;
1222 #ifdef GATHER_STATISTICS
1224 size_t overhead = object_size - size;
1226 G.stats.total_overhead += overhead;
1227 G.stats.total_allocated += object_size;
1228 G.stats.total_overhead_per_order[order] += overhead;
1229 G.stats.total_allocated_per_order[order] += object_size;
1231 if (size <= 32)
1233 G.stats.total_overhead_under32 += overhead;
1234 G.stats.total_allocated_under32 += object_size;
1236 if (size <= 64)
1238 G.stats.total_overhead_under64 += overhead;
1239 G.stats.total_allocated_under64 += object_size;
1241 if (size <= 128)
1243 G.stats.total_overhead_under128 += overhead;
1244 G.stats.total_allocated_under128 += object_size;
1247 #endif
1249 if (GGC_DEBUG_LEVEL >= 3)
1250 fprintf (G.debug_file,
1251 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1252 (unsigned long) size, (unsigned long) object_size, result,
1253 (void *) entry);
1255 return result;
1258 /* Mark function for strings. */
1260 void
1261 gt_ggc_m_S (const void *p)
1263 page_entry *entry;
1264 unsigned bit, word;
1265 unsigned long mask;
1266 unsigned long offset;
1268 if (!p || !ggc_allocated_p (p))
1269 return;
1271 /* Look up the page on which the object is alloced. . */
1272 entry = lookup_page_table_entry (p);
1273 gcc_assert (entry);
1275 /* Calculate the index of the object on the page; this is its bit
1276 position in the in_use_p bitmap. Note that because a char* might
1277 point to the middle of an object, we need special code here to
1278 make sure P points to the start of an object. */
1279 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1280 if (offset)
1282 /* Here we've seen a char* which does not point to the beginning
1283 of an allocated object. We assume it points to the middle of
1284 a STRING_CST. */
1285 gcc_assert (offset == offsetof (struct tree_string, str));
1286 p = ((const char *) p) - offset;
1287 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1288 return;
1291 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1292 word = bit / HOST_BITS_PER_LONG;
1293 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1295 /* If the bit was previously set, skip it. */
1296 if (entry->in_use_p[word] & mask)
1297 return;
1299 /* Otherwise set it, and decrement the free object count. */
1300 entry->in_use_p[word] |= mask;
1301 entry->num_free_objects -= 1;
1303 if (GGC_DEBUG_LEVEL >= 4)
1304 fprintf (G.debug_file, "Marking %p\n", p);
1306 return;
1309 /* If P is not marked, marks it and return false. Otherwise return true.
1310 P must have been allocated by the GC allocator; it mustn't point to
1311 static objects, stack variables, or memory allocated with malloc. */
1314 ggc_set_mark (const void *p)
1316 page_entry *entry;
1317 unsigned bit, word;
1318 unsigned long mask;
1320 /* Look up the page on which the object is alloced. If the object
1321 wasn't allocated by the collector, we'll probably die. */
1322 entry = lookup_page_table_entry (p);
1323 gcc_assert (entry);
1325 /* Calculate the index of the object on the page; this is its bit
1326 position in the in_use_p bitmap. */
1327 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1328 word = bit / HOST_BITS_PER_LONG;
1329 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1331 /* If the bit was previously set, skip it. */
1332 if (entry->in_use_p[word] & mask)
1333 return 1;
1335 /* Otherwise set it, and decrement the free object count. */
1336 entry->in_use_p[word] |= mask;
1337 entry->num_free_objects -= 1;
1339 if (GGC_DEBUG_LEVEL >= 4)
1340 fprintf (G.debug_file, "Marking %p\n", p);
1342 return 0;
1345 /* Return 1 if P has been marked, zero otherwise.
1346 P must have been allocated by the GC allocator; it mustn't point to
1347 static objects, stack variables, or memory allocated with malloc. */
1350 ggc_marked_p (const void *p)
1352 page_entry *entry;
1353 unsigned bit, word;
1354 unsigned long mask;
1356 /* Look up the page on which the object is alloced. If the object
1357 wasn't allocated by the collector, we'll probably die. */
1358 entry = lookup_page_table_entry (p);
1359 gcc_assert (entry);
1361 /* Calculate the index of the object on the page; this is its bit
1362 position in the in_use_p bitmap. */
1363 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1364 word = bit / HOST_BITS_PER_LONG;
1365 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1367 return (entry->in_use_p[word] & mask) != 0;
1370 /* Return the size of the gc-able object P. */
1372 size_t
1373 ggc_get_size (const void *p)
1375 page_entry *pe = lookup_page_table_entry (p);
1376 return OBJECT_SIZE (pe->order);
1379 /* Release the memory for object P. */
1381 void
1382 ggc_free (void *p)
1384 page_entry *pe = lookup_page_table_entry (p);
1385 size_t order = pe->order;
1386 size_t size = OBJECT_SIZE (order);
1388 #ifdef GATHER_STATISTICS
1389 ggc_free_overhead (p);
1390 #endif
1392 if (GGC_DEBUG_LEVEL >= 3)
1393 fprintf (G.debug_file,
1394 "Freeing object, actual size=%lu, at %p on %p\n",
1395 (unsigned long) size, p, (void *) pe);
1397 #ifdef ENABLE_GC_CHECKING
1398 /* Poison the data, to indicate the data is garbage. */
1399 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1400 memset (p, 0xa5, size);
1401 #endif
1402 /* Let valgrind know the object is free. */
1403 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1405 #ifdef ENABLE_GC_ALWAYS_COLLECT
1406 /* In the completely-anal-checking mode, we do *not* immediately free
1407 the data, but instead verify that the data is *actually* not
1408 reachable the next time we collect. */
1410 struct free_object *fo = XNEW (struct free_object);
1411 fo->object = p;
1412 fo->next = G.free_object_list;
1413 G.free_object_list = fo;
1415 #else
1417 unsigned int bit_offset, word, bit;
1419 G.allocated -= size;
1421 /* Mark the object not-in-use. */
1422 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1423 word = bit_offset / HOST_BITS_PER_LONG;
1424 bit = bit_offset % HOST_BITS_PER_LONG;
1425 pe->in_use_p[word] &= ~(1UL << bit);
1427 if (pe->num_free_objects++ == 0)
1429 page_entry *p, *q;
1431 /* If the page is completely full, then it's supposed to
1432 be after all pages that aren't. Since we've freed one
1433 object from a page that was full, we need to move the
1434 page to the head of the list.
1436 PE is the node we want to move. Q is the previous node
1437 and P is the next node in the list. */
1438 q = pe->prev;
1439 if (q && q->num_free_objects == 0)
1441 p = pe->next;
1443 q->next = p;
1445 /* If PE was at the end of the list, then Q becomes the
1446 new end of the list. If PE was not the end of the
1447 list, then we need to update the PREV field for P. */
1448 if (!p)
1449 G.page_tails[order] = q;
1450 else
1451 p->prev = q;
1453 /* Move PE to the head of the list. */
1454 pe->next = G.pages[order];
1455 pe->prev = NULL;
1456 G.pages[order]->prev = pe;
1457 G.pages[order] = pe;
1460 /* Reset the hint bit to point to the only free object. */
1461 pe->next_bit_hint = bit_offset;
1464 #endif
1467 /* Subroutine of init_ggc which computes the pair of numbers used to
1468 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1470 This algorithm is taken from Granlund and Montgomery's paper
1471 "Division by Invariant Integers using Multiplication"
1472 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1473 constants). */
1475 static void
1476 compute_inverse (unsigned order)
1478 size_t size, inv;
1479 unsigned int e;
1481 size = OBJECT_SIZE (order);
1482 e = 0;
1483 while (size % 2 == 0)
1485 e++;
1486 size >>= 1;
1489 inv = size;
1490 while (inv * size != 1)
1491 inv = inv * (2 - inv*size);
1493 DIV_MULT (order) = inv;
1494 DIV_SHIFT (order) = e;
1497 /* Initialize the ggc-mmap allocator. */
1498 void
1499 init_ggc (void)
1501 unsigned order;
1503 G.pagesize = getpagesize();
1504 G.lg_pagesize = exact_log2 (G.pagesize);
1506 #ifdef HAVE_MMAP_DEV_ZERO
1507 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1508 if (G.dev_zero_fd == -1)
1509 internal_error ("open /dev/zero: %m");
1510 #endif
1512 #if 0
1513 G.debug_file = fopen ("ggc-mmap.debug", "w");
1514 #else
1515 G.debug_file = stdout;
1516 #endif
1518 #ifdef USING_MMAP
1519 /* StunOS has an amazing off-by-one error for the first mmap allocation
1520 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1521 believe, is an unaligned page allocation, which would cause us to
1522 hork badly if we tried to use it. */
1524 char *p = alloc_anon (NULL, G.pagesize);
1525 struct page_entry *e;
1526 if ((size_t)p & (G.pagesize - 1))
1528 /* How losing. Discard this one and try another. If we still
1529 can't get something useful, give up. */
1531 p = alloc_anon (NULL, G.pagesize);
1532 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1535 /* We have a good page, might as well hold onto it... */
1536 e = XCNEW (struct page_entry);
1537 e->bytes = G.pagesize;
1538 e->page = p;
1539 e->next = G.free_pages;
1540 G.free_pages = e;
1542 #endif
1544 /* Initialize the object size table. */
1545 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1546 object_size_table[order] = (size_t) 1 << order;
1547 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1549 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1551 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1552 so that we're sure of getting aligned memory. */
1553 s = ROUND_UP (s, MAX_ALIGNMENT);
1554 object_size_table[order] = s;
1557 /* Initialize the objects-per-page and inverse tables. */
1558 for (order = 0; order < NUM_ORDERS; ++order)
1560 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1561 if (objects_per_page_table[order] == 0)
1562 objects_per_page_table[order] = 1;
1563 compute_inverse (order);
1566 /* Reset the size_lookup array to put appropriately sized objects in
1567 the special orders. All objects bigger than the previous power
1568 of two, but no greater than the special size, should go in the
1569 new order. */
1570 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1572 int o;
1573 int i;
1575 i = OBJECT_SIZE (order);
1576 if (i >= NUM_SIZE_LOOKUP)
1577 continue;
1579 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1580 size_lookup[i] = order;
1583 G.depth_in_use = 0;
1584 G.depth_max = 10;
1585 G.depth = XNEWVEC (unsigned int, G.depth_max);
1587 G.by_depth_in_use = 0;
1588 G.by_depth_max = INITIAL_PTE_COUNT;
1589 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1590 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1593 /* Start a new GGC zone. */
1595 struct alloc_zone *
1596 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1598 return NULL;
1601 /* Destroy a GGC zone. */
1602 void
1603 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1607 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1608 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1610 static void
1611 ggc_recalculate_in_use_p (page_entry *p)
1613 unsigned int i;
1614 size_t num_objects;
1616 /* Because the past-the-end bit in in_use_p is always set, we
1617 pretend there is one additional object. */
1618 num_objects = OBJECTS_IN_PAGE (p) + 1;
1620 /* Reset the free object count. */
1621 p->num_free_objects = num_objects;
1623 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1624 for (i = 0;
1625 i < CEIL (BITMAP_SIZE (num_objects),
1626 sizeof (*p->in_use_p));
1627 ++i)
1629 unsigned long j;
1631 /* Something is in use if it is marked, or if it was in use in a
1632 context further down the context stack. */
1633 p->in_use_p[i] |= save_in_use_p (p)[i];
1635 /* Decrement the free object count for every object allocated. */
1636 for (j = p->in_use_p[i]; j; j >>= 1)
1637 p->num_free_objects -= (j & 1);
1640 gcc_assert (p->num_free_objects < num_objects);
1643 /* Unmark all objects. */
1645 static void
1646 clear_marks (void)
1648 unsigned order;
1650 for (order = 2; order < NUM_ORDERS; order++)
1652 page_entry *p;
1654 for (p = G.pages[order]; p != NULL; p = p->next)
1656 size_t num_objects = OBJECTS_IN_PAGE (p);
1657 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1659 /* The data should be page-aligned. */
1660 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1662 /* Pages that aren't in the topmost context are not collected;
1663 nevertheless, we need their in-use bit vectors to store GC
1664 marks. So, back them up first. */
1665 if (p->context_depth < G.context_depth)
1667 if (! save_in_use_p (p))
1668 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1669 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1672 /* Reset reset the number of free objects and clear the
1673 in-use bits. These will be adjusted by mark_obj. */
1674 p->num_free_objects = num_objects;
1675 memset (p->in_use_p, 0, bitmap_size);
1677 /* Make sure the one-past-the-end bit is always set. */
1678 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1679 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1684 /* Free all empty pages. Partially empty pages need no attention
1685 because the `mark' bit doubles as an `unused' bit. */
1687 static void
1688 sweep_pages (void)
1690 unsigned order;
1692 for (order = 2; order < NUM_ORDERS; order++)
1694 /* The last page-entry to consider, regardless of entries
1695 placed at the end of the list. */
1696 page_entry * const last = G.page_tails[order];
1698 size_t num_objects;
1699 size_t live_objects;
1700 page_entry *p, *previous;
1701 int done;
1703 p = G.pages[order];
1704 if (p == NULL)
1705 continue;
1707 previous = NULL;
1710 page_entry *next = p->next;
1712 /* Loop until all entries have been examined. */
1713 done = (p == last);
1715 num_objects = OBJECTS_IN_PAGE (p);
1717 /* Add all live objects on this page to the count of
1718 allocated memory. */
1719 live_objects = num_objects - p->num_free_objects;
1721 G.allocated += OBJECT_SIZE (order) * live_objects;
1723 /* Only objects on pages in the topmost context should get
1724 collected. */
1725 if (p->context_depth < G.context_depth)
1728 /* Remove the page if it's empty. */
1729 else if (live_objects == 0)
1731 /* If P was the first page in the list, then NEXT
1732 becomes the new first page in the list, otherwise
1733 splice P out of the forward pointers. */
1734 if (! previous)
1735 G.pages[order] = next;
1736 else
1737 previous->next = next;
1739 /* Splice P out of the back pointers too. */
1740 if (next)
1741 next->prev = previous;
1743 /* Are we removing the last element? */
1744 if (p == G.page_tails[order])
1745 G.page_tails[order] = previous;
1746 free_page (p);
1747 p = previous;
1750 /* If the page is full, move it to the end. */
1751 else if (p->num_free_objects == 0)
1753 /* Don't move it if it's already at the end. */
1754 if (p != G.page_tails[order])
1756 /* Move p to the end of the list. */
1757 p->next = NULL;
1758 p->prev = G.page_tails[order];
1759 G.page_tails[order]->next = p;
1761 /* Update the tail pointer... */
1762 G.page_tails[order] = p;
1764 /* ... and the head pointer, if necessary. */
1765 if (! previous)
1766 G.pages[order] = next;
1767 else
1768 previous->next = next;
1770 /* And update the backpointer in NEXT if necessary. */
1771 if (next)
1772 next->prev = previous;
1774 p = previous;
1778 /* If we've fallen through to here, it's a page in the
1779 topmost context that is neither full nor empty. Such a
1780 page must precede pages at lesser context depth in the
1781 list, so move it to the head. */
1782 else if (p != G.pages[order])
1784 previous->next = p->next;
1786 /* Update the backchain in the next node if it exists. */
1787 if (p->next)
1788 p->next->prev = previous;
1790 /* Move P to the head of the list. */
1791 p->next = G.pages[order];
1792 p->prev = NULL;
1793 G.pages[order]->prev = p;
1795 /* Update the head pointer. */
1796 G.pages[order] = p;
1798 /* Are we moving the last element? */
1799 if (G.page_tails[order] == p)
1800 G.page_tails[order] = previous;
1801 p = previous;
1804 previous = p;
1805 p = next;
1807 while (! done);
1809 /* Now, restore the in_use_p vectors for any pages from contexts
1810 other than the current one. */
1811 for (p = G.pages[order]; p; p = p->next)
1812 if (p->context_depth != G.context_depth)
1813 ggc_recalculate_in_use_p (p);
1817 #ifdef ENABLE_GC_CHECKING
1818 /* Clobber all free objects. */
1820 static void
1821 poison_pages (void)
1823 unsigned order;
1825 for (order = 2; order < NUM_ORDERS; order++)
1827 size_t size = OBJECT_SIZE (order);
1828 page_entry *p;
1830 for (p = G.pages[order]; p != NULL; p = p->next)
1832 size_t num_objects;
1833 size_t i;
1835 if (p->context_depth != G.context_depth)
1836 /* Since we don't do any collection for pages in pushed
1837 contexts, there's no need to do any poisoning. And
1838 besides, the IN_USE_P array isn't valid until we pop
1839 contexts. */
1840 continue;
1842 num_objects = OBJECTS_IN_PAGE (p);
1843 for (i = 0; i < num_objects; i++)
1845 size_t word, bit;
1846 word = i / HOST_BITS_PER_LONG;
1847 bit = i % HOST_BITS_PER_LONG;
1848 if (((p->in_use_p[word] >> bit) & 1) == 0)
1850 char *object = p->page + i * size;
1852 /* Keep poison-by-write when we expect to use Valgrind,
1853 so the exact same memory semantics is kept, in case
1854 there are memory errors. We override this request
1855 below. */
1856 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1857 size));
1858 memset (object, 0xa5, size);
1860 /* Drop the handle to avoid handle leak. */
1861 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1867 #else
1868 #define poison_pages()
1869 #endif
1871 #ifdef ENABLE_GC_ALWAYS_COLLECT
1872 /* Validate that the reportedly free objects actually are. */
1874 static void
1875 validate_free_objects (void)
1877 struct free_object *f, *next, *still_free = NULL;
1879 for (f = G.free_object_list; f ; f = next)
1881 page_entry *pe = lookup_page_table_entry (f->object);
1882 size_t bit, word;
1884 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1885 word = bit / HOST_BITS_PER_LONG;
1886 bit = bit % HOST_BITS_PER_LONG;
1887 next = f->next;
1889 /* Make certain it isn't visible from any root. Notice that we
1890 do this check before sweep_pages merges save_in_use_p. */
1891 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1893 /* If the object comes from an outer context, then retain the
1894 free_object entry, so that we can verify that the address
1895 isn't live on the stack in some outer context. */
1896 if (pe->context_depth != G.context_depth)
1898 f->next = still_free;
1899 still_free = f;
1901 else
1902 free (f);
1905 G.free_object_list = still_free;
1907 #else
1908 #define validate_free_objects()
1909 #endif
1911 /* Top level mark-and-sweep routine. */
1913 void
1914 ggc_collect (void)
1916 /* Avoid frequent unnecessary work by skipping collection if the
1917 total allocations haven't expanded much since the last
1918 collection. */
1919 float allocated_last_gc =
1920 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1922 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1924 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1925 return;
1927 timevar_push (TV_GC);
1928 if (!quiet_flag)
1929 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1930 if (GGC_DEBUG_LEVEL >= 2)
1931 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1933 /* Zero the total allocated bytes. This will be recalculated in the
1934 sweep phase. */
1935 G.allocated = 0;
1937 /* Release the pages we freed the last time we collected, but didn't
1938 reuse in the interim. */
1939 release_pages ();
1941 /* Indicate that we've seen collections at this context depth. */
1942 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1944 clear_marks ();
1945 ggc_mark_roots ();
1946 #ifdef GATHER_STATISTICS
1947 ggc_prune_overhead_list ();
1948 #endif
1949 poison_pages ();
1950 validate_free_objects ();
1951 sweep_pages ();
1953 G.allocated_last_gc = G.allocated;
1955 timevar_pop (TV_GC);
1957 if (!quiet_flag)
1958 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1959 if (GGC_DEBUG_LEVEL >= 2)
1960 fprintf (G.debug_file, "END COLLECTING\n");
1963 /* Print allocation statistics. */
1964 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1965 ? (x) \
1966 : ((x) < 1024*1024*10 \
1967 ? (x) / 1024 \
1968 : (x) / (1024*1024))))
1969 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1971 void
1972 ggc_print_statistics (void)
1974 struct ggc_statistics stats;
1975 unsigned int i;
1976 size_t total_overhead = 0;
1978 /* Clear the statistics. */
1979 memset (&stats, 0, sizeof (stats));
1981 /* Make sure collection will really occur. */
1982 G.allocated_last_gc = 0;
1984 /* Collect and print the statistics common across collectors. */
1985 ggc_print_common_statistics (stderr, &stats);
1987 /* Release free pages so that we will not count the bytes allocated
1988 there as part of the total allocated memory. */
1989 release_pages ();
1991 /* Collect some information about the various sizes of
1992 allocation. */
1993 fprintf (stderr,
1994 "Memory still allocated at the end of the compilation process\n");
1995 fprintf (stderr, "%-5s %10s %10s %10s\n",
1996 "Size", "Allocated", "Used", "Overhead");
1997 for (i = 0; i < NUM_ORDERS; ++i)
1999 page_entry *p;
2000 size_t allocated;
2001 size_t in_use;
2002 size_t overhead;
2004 /* Skip empty entries. */
2005 if (!G.pages[i])
2006 continue;
2008 overhead = allocated = in_use = 0;
2010 /* Figure out the total number of bytes allocated for objects of
2011 this size, and how many of them are actually in use. Also figure
2012 out how much memory the page table is using. */
2013 for (p = G.pages[i]; p; p = p->next)
2015 allocated += p->bytes;
2016 in_use +=
2017 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2019 overhead += (sizeof (page_entry) - sizeof (long)
2020 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2022 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2023 (unsigned long) OBJECT_SIZE (i),
2024 SCALE (allocated), STAT_LABEL (allocated),
2025 SCALE (in_use), STAT_LABEL (in_use),
2026 SCALE (overhead), STAT_LABEL (overhead));
2027 total_overhead += overhead;
2029 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2030 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2031 SCALE (G.allocated), STAT_LABEL(G.allocated),
2032 SCALE (total_overhead), STAT_LABEL (total_overhead));
2034 #ifdef GATHER_STATISTICS
2036 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2038 fprintf (stderr, "Total Overhead: %10lld\n",
2039 G.stats.total_overhead);
2040 fprintf (stderr, "Total Allocated: %10lld\n",
2041 G.stats.total_allocated);
2043 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2044 G.stats.total_overhead_under32);
2045 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2046 G.stats.total_allocated_under32);
2047 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2048 G.stats.total_overhead_under64);
2049 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2050 G.stats.total_allocated_under64);
2051 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2052 G.stats.total_overhead_under128);
2053 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2054 G.stats.total_allocated_under128);
2056 for (i = 0; i < NUM_ORDERS; i++)
2057 if (G.stats.total_allocated_per_order[i])
2059 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2060 (unsigned long) OBJECT_SIZE (i),
2061 G.stats.total_overhead_per_order[i]);
2062 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2063 (unsigned long) OBJECT_SIZE (i),
2064 G.stats.total_allocated_per_order[i]);
2067 #endif
2070 struct ggc_pch_data
2072 struct ggc_pch_ondisk
2074 unsigned totals[NUM_ORDERS];
2075 } d;
2076 size_t base[NUM_ORDERS];
2077 size_t written[NUM_ORDERS];
2080 struct ggc_pch_data *
2081 init_ggc_pch (void)
2083 return XCNEW (struct ggc_pch_data);
2086 void
2087 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2088 size_t size, bool is_string ATTRIBUTE_UNUSED,
2089 enum gt_types_enum type ATTRIBUTE_UNUSED)
2091 unsigned order;
2093 if (size < NUM_SIZE_LOOKUP)
2094 order = size_lookup[size];
2095 else
2097 order = 10;
2098 while (size > OBJECT_SIZE (order))
2099 order++;
2102 d->d.totals[order]++;
2105 size_t
2106 ggc_pch_total_size (struct ggc_pch_data *d)
2108 size_t a = 0;
2109 unsigned i;
2111 for (i = 0; i < NUM_ORDERS; i++)
2112 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2113 return a;
2116 void
2117 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2119 size_t a = (size_t) base;
2120 unsigned i;
2122 for (i = 0; i < NUM_ORDERS; i++)
2124 d->base[i] = a;
2125 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2130 char *
2131 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2132 size_t size, bool is_string ATTRIBUTE_UNUSED,
2133 enum gt_types_enum type ATTRIBUTE_UNUSED)
2135 unsigned order;
2136 char *result;
2138 if (size < NUM_SIZE_LOOKUP)
2139 order = size_lookup[size];
2140 else
2142 order = 10;
2143 while (size > OBJECT_SIZE (order))
2144 order++;
2147 result = (char *) d->base[order];
2148 d->base[order] += OBJECT_SIZE (order);
2149 return result;
2152 void
2153 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2154 FILE *f ATTRIBUTE_UNUSED)
2156 /* Nothing to do. */
2159 void
2160 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2161 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2162 size_t size, bool is_string ATTRIBUTE_UNUSED)
2164 unsigned order;
2165 static const char emptyBytes[256];
2167 if (size < NUM_SIZE_LOOKUP)
2168 order = size_lookup[size];
2169 else
2171 order = 10;
2172 while (size > OBJECT_SIZE (order))
2173 order++;
2176 if (fwrite (x, size, 1, f) != 1)
2177 fatal_error ("can't write PCH file: %m");
2179 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2180 object out to OBJECT_SIZE(order). This happens for strings. */
2182 if (size != OBJECT_SIZE (order))
2184 unsigned padding = OBJECT_SIZE(order) - size;
2186 /* To speed small writes, we use a nulled-out array that's larger
2187 than most padding requests as the source for our null bytes. This
2188 permits us to do the padding with fwrite() rather than fseek(), and
2189 limits the chance the OS may try to flush any outstanding writes. */
2190 if (padding <= sizeof(emptyBytes))
2192 if (fwrite (emptyBytes, 1, padding, f) != padding)
2193 fatal_error ("can't write PCH file");
2195 else
2197 /* Larger than our buffer? Just default to fseek. */
2198 if (fseek (f, padding, SEEK_CUR) != 0)
2199 fatal_error ("can't write PCH file");
2203 d->written[order]++;
2204 if (d->written[order] == d->d.totals[order]
2205 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2206 G.pagesize),
2207 SEEK_CUR) != 0)
2208 fatal_error ("can't write PCH file: %m");
2211 void
2212 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2214 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2215 fatal_error ("can't write PCH file: %m");
2216 free (d);
2219 /* Move the PCH PTE entries just added to the end of by_depth, to the
2220 front. */
2222 static void
2223 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2225 unsigned i;
2227 /* First, we swap the new entries to the front of the varrays. */
2228 page_entry **new_by_depth;
2229 unsigned long **new_save_in_use;
2231 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2232 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2234 memcpy (&new_by_depth[0],
2235 &G.by_depth[count_old_page_tables],
2236 count_new_page_tables * sizeof (void *));
2237 memcpy (&new_by_depth[count_new_page_tables],
2238 &G.by_depth[0],
2239 count_old_page_tables * sizeof (void *));
2240 memcpy (&new_save_in_use[0],
2241 &G.save_in_use[count_old_page_tables],
2242 count_new_page_tables * sizeof (void *));
2243 memcpy (&new_save_in_use[count_new_page_tables],
2244 &G.save_in_use[0],
2245 count_old_page_tables * sizeof (void *));
2247 free (G.by_depth);
2248 free (G.save_in_use);
2250 G.by_depth = new_by_depth;
2251 G.save_in_use = new_save_in_use;
2253 /* Now update all the index_by_depth fields. */
2254 for (i = G.by_depth_in_use; i > 0; --i)
2256 page_entry *p = G.by_depth[i-1];
2257 p->index_by_depth = i-1;
2260 /* And last, we update the depth pointers in G.depth. The first
2261 entry is already 0, and context 0 entries always start at index
2262 0, so there is nothing to update in the first slot. We need a
2263 second slot, only if we have old ptes, and if we do, they start
2264 at index count_new_page_tables. */
2265 if (count_old_page_tables)
2266 push_depth (count_new_page_tables);
2269 void
2270 ggc_pch_read (FILE *f, void *addr)
2272 struct ggc_pch_ondisk d;
2273 unsigned i;
2274 char *offs = (char *) addr;
2275 unsigned long count_old_page_tables;
2276 unsigned long count_new_page_tables;
2278 count_old_page_tables = G.by_depth_in_use;
2280 /* We've just read in a PCH file. So, every object that used to be
2281 allocated is now free. */
2282 clear_marks ();
2283 #ifdef ENABLE_GC_CHECKING
2284 poison_pages ();
2285 #endif
2286 /* Since we free all the allocated objects, the free list becomes
2287 useless. Validate it now, which will also clear it. */
2288 validate_free_objects();
2290 /* No object read from a PCH file should ever be freed. So, set the
2291 context depth to 1, and set the depth of all the currently-allocated
2292 pages to be 1 too. PCH pages will have depth 0. */
2293 gcc_assert (!G.context_depth);
2294 G.context_depth = 1;
2295 for (i = 0; i < NUM_ORDERS; i++)
2297 page_entry *p;
2298 for (p = G.pages[i]; p != NULL; p = p->next)
2299 p->context_depth = G.context_depth;
2302 /* Allocate the appropriate page-table entries for the pages read from
2303 the PCH file. */
2304 if (fread (&d, sizeof (d), 1, f) != 1)
2305 fatal_error ("can't read PCH file: %m");
2307 for (i = 0; i < NUM_ORDERS; i++)
2309 struct page_entry *entry;
2310 char *pte;
2311 size_t bytes;
2312 size_t num_objs;
2313 size_t j;
2315 if (d.totals[i] == 0)
2316 continue;
2318 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2319 num_objs = bytes / OBJECT_SIZE (i);
2320 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2321 - sizeof (long)
2322 + BITMAP_SIZE (num_objs + 1)));
2323 entry->bytes = bytes;
2324 entry->page = offs;
2325 entry->context_depth = 0;
2326 offs += bytes;
2327 entry->num_free_objects = 0;
2328 entry->order = i;
2330 for (j = 0;
2331 j + HOST_BITS_PER_LONG <= num_objs + 1;
2332 j += HOST_BITS_PER_LONG)
2333 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2334 for (; j < num_objs + 1; j++)
2335 entry->in_use_p[j / HOST_BITS_PER_LONG]
2336 |= 1L << (j % HOST_BITS_PER_LONG);
2338 for (pte = entry->page;
2339 pte < entry->page + entry->bytes;
2340 pte += G.pagesize)
2341 set_page_table_entry (pte, entry);
2343 if (G.page_tails[i] != NULL)
2344 G.page_tails[i]->next = entry;
2345 else
2346 G.pages[i] = entry;
2347 G.page_tails[i] = entry;
2349 /* We start off by just adding all the new information to the
2350 end of the varrays, later, we will move the new information
2351 to the front of the varrays, as the PCH page tables are at
2352 context 0. */
2353 push_by_depth (entry, 0);
2356 /* Now, we update the various data structures that speed page table
2357 handling. */
2358 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2360 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2362 /* Update the statistics. */
2363 G.allocated = G.allocated_last_gc = offs - (char *)addr;