* alias.c, c-common.h, c-incpath.c, c-incpath.h, expr.c,
[official-gcc.git] / gcc / ggc-page.c
blob131731f1e7ecada53122d5389855c0243b863349
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "toplev.h"
30 #include "flags.h"
31 #include "ggc.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "tree-flow.h"
35 #ifdef ENABLE_VALGRIND_CHECKING
36 # ifdef HAVE_VALGRIND_MEMCHECK_H
37 # include <valgrind/memcheck.h>
38 # elif defined HAVE_MEMCHECK_H
39 # include <memcheck.h>
40 # else
41 # include <valgrind.h>
42 # endif
43 #else
44 /* Avoid #ifdef:s when we can help it. */
45 #define VALGRIND_DISCARD(x)
46 #endif
48 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
49 file open. Prefer either to valloc. */
50 #ifdef HAVE_MMAP_ANON
51 # undef HAVE_MMAP_DEV_ZERO
53 # include <sys/mman.h>
54 # ifndef MAP_FAILED
55 # define MAP_FAILED -1
56 # endif
57 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
58 # define MAP_ANONYMOUS MAP_ANON
59 # endif
60 # define USING_MMAP
62 #endif
64 #ifdef HAVE_MMAP_DEV_ZERO
66 # include <sys/mman.h>
67 # ifndef MAP_FAILED
68 # define MAP_FAILED -1
69 # endif
70 # define USING_MMAP
72 #endif
74 #ifndef USING_MMAP
75 #define USING_MALLOC_PAGE_GROUPS
76 #endif
78 /* Stategy:
80 This garbage-collecting allocator allocates objects on one of a set
81 of pages. Each page can allocate objects of a single size only;
82 available sizes are powers of two starting at four bytes. The size
83 of an allocation request is rounded up to the next power of two
84 (`order'), and satisfied from the appropriate page.
86 Each page is recorded in a page-entry, which also maintains an
87 in-use bitmap of object positions on the page. This allows the
88 allocation state of a particular object to be flipped without
89 touching the page itself.
91 Each page-entry also has a context depth, which is used to track
92 pushing and popping of allocation contexts. Only objects allocated
93 in the current (highest-numbered) context may be collected.
95 Page entries are arranged in an array of singly-linked lists. The
96 array is indexed by the allocation size, in bits, of the pages on
97 it; i.e. all pages on a list allocate objects of the same size.
98 Pages are ordered on the list such that all non-full pages precede
99 all full pages, with non-full pages arranged in order of decreasing
100 context depth.
102 Empty pages (of all orders) are kept on a single page cache list,
103 and are considered first when new pages are required; they are
104 deallocated at the start of the next collection if they haven't
105 been recycled by then. */
107 /* Define GGC_DEBUG_LEVEL to print debugging information.
108 0: No debugging output.
109 1: GC statistics only.
110 2: Page-entry allocations/deallocations as well.
111 3: Object allocations as well.
112 4: Object marks as well. */
113 #define GGC_DEBUG_LEVEL (0)
115 #ifndef HOST_BITS_PER_PTR
116 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
117 #endif
120 /* A two-level tree is used to look up the page-entry for a given
121 pointer. Two chunks of the pointer's bits are extracted to index
122 the first and second levels of the tree, as follows:
124 HOST_PAGE_SIZE_BITS
125 32 | |
126 msb +----------------+----+------+------+ lsb
127 | | |
128 PAGE_L1_BITS |
130 PAGE_L2_BITS
132 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
133 pages are aligned on system page boundaries. The next most
134 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
135 index values in the lookup table, respectively.
137 For 32-bit architectures and the settings below, there are no
138 leftover bits. For architectures with wider pointers, the lookup
139 tree points to a list of pages, which must be scanned to find the
140 correct one. */
142 #define PAGE_L1_BITS (8)
143 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
144 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
145 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
147 #define LOOKUP_L1(p) \
148 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
150 #define LOOKUP_L2(p) \
151 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
153 /* The number of objects per allocation page, for objects on a page of
154 the indicated ORDER. */
155 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
157 /* The number of objects in P. */
158 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
160 /* The size of an object on a page of the indicated ORDER. */
161 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
163 /* For speed, we avoid doing a general integer divide to locate the
164 offset in the allocation bitmap, by precalculating numbers M, S
165 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166 within the page which is evenly divisible by the object size Z. */
167 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
168 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169 #define OFFSET_TO_BIT(OFFSET, ORDER) \
170 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
172 /* The number of extra orders, not corresponding to power-of-two sized
173 objects. */
175 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
177 #define RTL_SIZE(NSLOTS) \
178 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
180 #define TREE_EXP_SIZE(OPS) \
181 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
183 /* The Ith entry is the maximum size of an object to be stored in the
184 Ith extra order. Adding a new entry to this array is the *only*
185 thing you need to do to add a new special allocation size. */
187 static const size_t extra_order_size_table[] = {
188 sizeof (struct stmt_ann_d),
189 sizeof (struct tree_decl),
190 sizeof (struct tree_list),
191 TREE_EXP_SIZE (2),
192 RTL_SIZE (2), /* MEM, PLUS, etc. */
193 RTL_SIZE (9), /* INSN */
196 /* The total number of orders. */
198 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
200 /* We use this structure to determine the alignment required for
201 allocations. For power-of-two sized allocations, that's not a
202 problem, but it does matter for odd-sized allocations. */
204 struct max_alignment {
205 char c;
206 union {
207 HOST_WIDEST_INT i;
208 long double d;
209 } u;
212 /* The biggest alignment required. */
214 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
216 /* Compute the smallest nonnegative number which when added to X gives
217 a multiple of F. */
219 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
221 /* Compute the smallest multiple of F that is >= X. */
223 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
225 /* The Ith entry is the number of objects on a page or order I. */
227 static unsigned objects_per_page_table[NUM_ORDERS];
229 /* The Ith entry is the size of an object on a page of order I. */
231 static size_t object_size_table[NUM_ORDERS];
233 /* The Ith entry is a pair of numbers (mult, shift) such that
234 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
235 for all k evenly divisible by OBJECT_SIZE(I). */
237 static struct
239 size_t mult;
240 unsigned int shift;
242 inverse_table[NUM_ORDERS];
244 /* A page_entry records the status of an allocation page. This
245 structure is dynamically sized to fit the bitmap in_use_p. */
246 typedef struct page_entry
248 /* The next page-entry with objects of the same size, or NULL if
249 this is the last page-entry. */
250 struct page_entry *next;
252 /* The previous page-entry with objects of the same size, or NULL if
253 this is the first page-entry. The PREV pointer exists solely to
254 keep the cost of ggc_free manageable. */
255 struct page_entry *prev;
257 /* The number of bytes allocated. (This will always be a multiple
258 of the host system page size.) */
259 size_t bytes;
261 /* The address at which the memory is allocated. */
262 char *page;
264 #ifdef USING_MALLOC_PAGE_GROUPS
265 /* Back pointer to the page group this page came from. */
266 struct page_group *group;
267 #endif
269 /* This is the index in the by_depth varray where this page table
270 can be found. */
271 unsigned long index_by_depth;
273 /* Context depth of this page. */
274 unsigned short context_depth;
276 /* The number of free objects remaining on this page. */
277 unsigned short num_free_objects;
279 /* A likely candidate for the bit position of a free object for the
280 next allocation from this page. */
281 unsigned short next_bit_hint;
283 /* The lg of size of objects allocated from this page. */
284 unsigned char order;
286 /* A bit vector indicating whether or not objects are in use. The
287 Nth bit is one if the Nth object on this page is allocated. This
288 array is dynamically sized. */
289 unsigned long in_use_p[1];
290 } page_entry;
292 #ifdef USING_MALLOC_PAGE_GROUPS
293 /* A page_group describes a large allocation from malloc, from which
294 we parcel out aligned pages. */
295 typedef struct page_group
297 /* A linked list of all extant page groups. */
298 struct page_group *next;
300 /* The address we received from malloc. */
301 char *allocation;
303 /* The size of the block. */
304 size_t alloc_size;
306 /* A bitmask of pages in use. */
307 unsigned int in_use;
308 } page_group;
309 #endif
311 #if HOST_BITS_PER_PTR <= 32
313 /* On 32-bit hosts, we use a two level page table, as pictured above. */
314 typedef page_entry **page_table[PAGE_L1_SIZE];
316 #else
318 /* On 64-bit hosts, we use the same two level page tables plus a linked
319 list that disambiguates the top 32-bits. There will almost always be
320 exactly one entry in the list. */
321 typedef struct page_table_chain
323 struct page_table_chain *next;
324 size_t high_bits;
325 page_entry **table[PAGE_L1_SIZE];
326 } *page_table;
328 #endif
330 /* The rest of the global variables. */
331 static struct globals
333 /* The Nth element in this array is a page with objects of size 2^N.
334 If there are any pages with free objects, they will be at the
335 head of the list. NULL if there are no page-entries for this
336 object size. */
337 page_entry *pages[NUM_ORDERS];
339 /* The Nth element in this array is the last page with objects of
340 size 2^N. NULL if there are no page-entries for this object
341 size. */
342 page_entry *page_tails[NUM_ORDERS];
344 /* Lookup table for associating allocation pages with object addresses. */
345 page_table lookup;
347 /* The system's page size. */
348 size_t pagesize;
349 size_t lg_pagesize;
351 /* Bytes currently allocated. */
352 size_t allocated;
354 /* Bytes currently allocated at the end of the last collection. */
355 size_t allocated_last_gc;
357 /* Total amount of memory mapped. */
358 size_t bytes_mapped;
360 /* Bit N set if any allocations have been done at context depth N. */
361 unsigned long context_depth_allocations;
363 /* Bit N set if any collections have been done at context depth N. */
364 unsigned long context_depth_collections;
366 /* The current depth in the context stack. */
367 unsigned short context_depth;
369 /* A file descriptor open to /dev/zero for reading. */
370 #if defined (HAVE_MMAP_DEV_ZERO)
371 int dev_zero_fd;
372 #endif
374 /* A cache of free system pages. */
375 page_entry *free_pages;
377 #ifdef USING_MALLOC_PAGE_GROUPS
378 page_group *page_groups;
379 #endif
381 /* The file descriptor for debugging output. */
382 FILE *debug_file;
384 /* Current number of elements in use in depth below. */
385 unsigned int depth_in_use;
387 /* Maximum number of elements that can be used before resizing. */
388 unsigned int depth_max;
390 /* Each element of this arry is an index in by_depth where the given
391 depth starts. This structure is indexed by that given depth we
392 are interested in. */
393 unsigned int *depth;
395 /* Current number of elements in use in by_depth below. */
396 unsigned int by_depth_in_use;
398 /* Maximum number of elements that can be used before resizing. */
399 unsigned int by_depth_max;
401 /* Each element of this array is a pointer to a page_entry, all
402 page_entries can be found in here by increasing depth.
403 index_by_depth in the page_entry is the index into this data
404 structure where that page_entry can be found. This is used to
405 speed up finding all page_entries at a particular depth. */
406 page_entry **by_depth;
408 /* Each element is a pointer to the saved in_use_p bits, if any,
409 zero otherwise. We allocate them all together, to enable a
410 better runtime data access pattern. */
411 unsigned long **save_in_use;
413 #ifdef ENABLE_GC_ALWAYS_COLLECT
414 /* List of free objects to be verified as actually free on the
415 next collection. */
416 struct free_object
418 void *object;
419 struct free_object *next;
420 } *free_object_list;
421 #endif
423 #ifdef GATHER_STATISTICS
424 struct
426 /* Total memory allocated with ggc_alloc. */
427 unsigned long long total_allocated;
428 /* Total overhead for memory to be allocated with ggc_alloc. */
429 unsigned long long total_overhead;
431 /* Total allocations and overhead for sizes less than 32, 64 and 128.
432 These sizes are interesting because they are typical cache line
433 sizes. */
435 unsigned long long total_allocated_under32;
436 unsigned long long total_overhead_under32;
438 unsigned long long total_allocated_under64;
439 unsigned long long total_overhead_under64;
441 unsigned long long total_allocated_under128;
442 unsigned long long total_overhead_under128;
444 /* The allocations for each of the allocation orders. */
445 unsigned long long total_allocated_per_order[NUM_ORDERS];
447 /* The overhead for each of the allocation orders. */
448 unsigned long long total_overhead_per_order[NUM_ORDERS];
449 } stats;
450 #endif
451 } G;
453 /* The size in bytes required to maintain a bitmap for the objects
454 on a page-entry. */
455 #define BITMAP_SIZE(Num_objects) \
456 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
458 /* Allocate pages in chunks of this size, to throttle calls to memory
459 allocation routines. The first page is used, the rest go onto the
460 free list. This cannot be larger than HOST_BITS_PER_INT for the
461 in_use bitmask for page_group. Hosts that need a different value
462 can override this by defining GGC_QUIRE_SIZE explicitly. */
463 #ifndef GGC_QUIRE_SIZE
464 # ifdef USING_MMAP
465 # define GGC_QUIRE_SIZE 256
466 # else
467 # define GGC_QUIRE_SIZE 16
468 # endif
469 #endif
471 /* Initial guess as to how many page table entries we might need. */
472 #define INITIAL_PTE_COUNT 128
474 static int ggc_allocated_p (const void *);
475 static page_entry *lookup_page_table_entry (const void *);
476 static void set_page_table_entry (void *, page_entry *);
477 #ifdef USING_MMAP
478 static char *alloc_anon (char *, size_t);
479 #endif
480 #ifdef USING_MALLOC_PAGE_GROUPS
481 static size_t page_group_index (char *, char *);
482 static void set_page_group_in_use (page_group *, char *);
483 static void clear_page_group_in_use (page_group *, char *);
484 #endif
485 static struct page_entry * alloc_page (unsigned);
486 static void free_page (struct page_entry *);
487 static void release_pages (void);
488 static void clear_marks (void);
489 static void sweep_pages (void);
490 static void ggc_recalculate_in_use_p (page_entry *);
491 static void compute_inverse (unsigned);
492 static inline void adjust_depth (void);
493 static void move_ptes_to_front (int, int);
495 void debug_print_page_list (int);
496 static void push_depth (unsigned int);
497 static void push_by_depth (page_entry *, unsigned long *);
498 struct alloc_zone *rtl_zone = NULL;
499 struct alloc_zone *tree_zone = NULL;
500 struct alloc_zone *garbage_zone = NULL;
502 /* Push an entry onto G.depth. */
504 inline static void
505 push_depth (unsigned int i)
507 if (G.depth_in_use >= G.depth_max)
509 G.depth_max *= 2;
510 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
512 G.depth[G.depth_in_use++] = i;
515 /* Push an entry onto G.by_depth and G.save_in_use. */
517 inline static void
518 push_by_depth (page_entry *p, unsigned long *s)
520 if (G.by_depth_in_use >= G.by_depth_max)
522 G.by_depth_max *= 2;
523 G.by_depth = xrealloc (G.by_depth,
524 G.by_depth_max * sizeof (page_entry *));
525 G.save_in_use = xrealloc (G.save_in_use,
526 G.by_depth_max * sizeof (unsigned long *));
528 G.by_depth[G.by_depth_in_use] = p;
529 G.save_in_use[G.by_depth_in_use++] = s;
532 #if (GCC_VERSION < 3001)
533 #define prefetch(X) ((void) X)
534 #else
535 #define prefetch(X) __builtin_prefetch (X)
536 #endif
538 #define save_in_use_p_i(__i) \
539 (G.save_in_use[__i])
540 #define save_in_use_p(__p) \
541 (save_in_use_p_i (__p->index_by_depth))
543 /* Returns nonzero if P was allocated in GC'able memory. */
545 static inline int
546 ggc_allocated_p (const void *p)
548 page_entry ***base;
549 size_t L1, L2;
551 #if HOST_BITS_PER_PTR <= 32
552 base = &G.lookup[0];
553 #else
554 page_table table = G.lookup;
555 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
556 while (1)
558 if (table == NULL)
559 return 0;
560 if (table->high_bits == high_bits)
561 break;
562 table = table->next;
564 base = &table->table[0];
565 #endif
567 /* Extract the level 1 and 2 indices. */
568 L1 = LOOKUP_L1 (p);
569 L2 = LOOKUP_L2 (p);
571 return base[L1] && base[L1][L2];
574 /* Traverse the page table and find the entry for a page.
575 Die (probably) if the object wasn't allocated via GC. */
577 static inline page_entry *
578 lookup_page_table_entry (const void *p)
580 page_entry ***base;
581 size_t L1, L2;
583 #if HOST_BITS_PER_PTR <= 32
584 base = &G.lookup[0];
585 #else
586 page_table table = G.lookup;
587 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
588 while (table->high_bits != high_bits)
589 table = table->next;
590 base = &table->table[0];
591 #endif
593 /* Extract the level 1 and 2 indices. */
594 L1 = LOOKUP_L1 (p);
595 L2 = LOOKUP_L2 (p);
597 return base[L1][L2];
600 /* Set the page table entry for a page. */
602 static void
603 set_page_table_entry (void *p, page_entry *entry)
605 page_entry ***base;
606 size_t L1, L2;
608 #if HOST_BITS_PER_PTR <= 32
609 base = &G.lookup[0];
610 #else
611 page_table table;
612 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
613 for (table = G.lookup; table; table = table->next)
614 if (table->high_bits == high_bits)
615 goto found;
617 /* Not found -- allocate a new table. */
618 table = xcalloc (1, sizeof(*table));
619 table->next = G.lookup;
620 table->high_bits = high_bits;
621 G.lookup = table;
622 found:
623 base = &table->table[0];
624 #endif
626 /* Extract the level 1 and 2 indices. */
627 L1 = LOOKUP_L1 (p);
628 L2 = LOOKUP_L2 (p);
630 if (base[L1] == NULL)
631 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
633 base[L1][L2] = entry;
636 /* Prints the page-entry for object size ORDER, for debugging. */
638 void
639 debug_print_page_list (int order)
641 page_entry *p;
642 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
643 (void *) G.page_tails[order]);
644 p = G.pages[order];
645 while (p != NULL)
647 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
648 p->num_free_objects);
649 p = p->next;
651 printf ("NULL\n");
652 fflush (stdout);
655 #ifdef USING_MMAP
656 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
657 (if non-null). The ifdef structure here is intended to cause a
658 compile error unless exactly one of the HAVE_* is defined. */
660 static inline char *
661 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
663 #ifdef HAVE_MMAP_ANON
664 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
665 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
666 #endif
667 #ifdef HAVE_MMAP_DEV_ZERO
668 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
669 MAP_PRIVATE, G.dev_zero_fd, 0);
670 #endif
672 if (page == (char *) MAP_FAILED)
674 perror ("virtual memory exhausted");
675 exit (FATAL_EXIT_CODE);
678 /* Remember that we allocated this memory. */
679 G.bytes_mapped += size;
681 /* Pretend we don't have access to the allocated pages. We'll enable
682 access to smaller pieces of the area in ggc_alloc. Discard the
683 handle to avoid handle leak. */
684 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
686 return page;
688 #endif
689 #ifdef USING_MALLOC_PAGE_GROUPS
690 /* Compute the index for this page into the page group. */
692 static inline size_t
693 page_group_index (char *allocation, char *page)
695 return (size_t) (page - allocation) >> G.lg_pagesize;
698 /* Set and clear the in_use bit for this page in the page group. */
700 static inline void
701 set_page_group_in_use (page_group *group, char *page)
703 group->in_use |= 1 << page_group_index (group->allocation, page);
706 static inline void
707 clear_page_group_in_use (page_group *group, char *page)
709 group->in_use &= ~(1 << page_group_index (group->allocation, page));
711 #endif
713 /* Allocate a new page for allocating objects of size 2^ORDER,
714 and return an entry for it. The entry is not added to the
715 appropriate page_table list. */
717 static inline struct page_entry *
718 alloc_page (unsigned order)
720 struct page_entry *entry, *p, **pp;
721 char *page;
722 size_t num_objects;
723 size_t bitmap_size;
724 size_t page_entry_size;
725 size_t entry_size;
726 #ifdef USING_MALLOC_PAGE_GROUPS
727 page_group *group;
728 #endif
730 num_objects = OBJECTS_PER_PAGE (order);
731 bitmap_size = BITMAP_SIZE (num_objects + 1);
732 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
733 entry_size = num_objects * OBJECT_SIZE (order);
734 if (entry_size < G.pagesize)
735 entry_size = G.pagesize;
737 entry = NULL;
738 page = NULL;
740 /* Check the list of free pages for one we can use. */
741 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
742 if (p->bytes == entry_size)
743 break;
745 if (p != NULL)
747 /* Recycle the allocated memory from this page ... */
748 *pp = p->next;
749 page = p->page;
751 #ifdef USING_MALLOC_PAGE_GROUPS
752 group = p->group;
753 #endif
755 /* ... and, if possible, the page entry itself. */
756 if (p->order == order)
758 entry = p;
759 memset (entry, 0, page_entry_size);
761 else
762 free (p);
764 #ifdef USING_MMAP
765 else if (entry_size == G.pagesize)
767 /* We want just one page. Allocate a bunch of them and put the
768 extras on the freelist. (Can only do this optimization with
769 mmap for backing store.) */
770 struct page_entry *e, *f = G.free_pages;
771 int i;
773 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
775 /* This loop counts down so that the chain will be in ascending
776 memory order. */
777 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
779 e = xcalloc (1, page_entry_size);
780 e->order = order;
781 e->bytes = G.pagesize;
782 e->page = page + (i << G.lg_pagesize);
783 e->next = f;
784 f = e;
787 G.free_pages = f;
789 else
790 page = alloc_anon (NULL, entry_size);
791 #endif
792 #ifdef USING_MALLOC_PAGE_GROUPS
793 else
795 /* Allocate a large block of memory and serve out the aligned
796 pages therein. This results in much less memory wastage
797 than the traditional implementation of valloc. */
799 char *allocation, *a, *enda;
800 size_t alloc_size, head_slop, tail_slop;
801 int multiple_pages = (entry_size == G.pagesize);
803 if (multiple_pages)
804 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
805 else
806 alloc_size = entry_size + G.pagesize - 1;
807 allocation = xmalloc (alloc_size);
809 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
810 head_slop = page - allocation;
811 if (multiple_pages)
812 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
813 else
814 tail_slop = alloc_size - entry_size - head_slop;
815 enda = allocation + alloc_size - tail_slop;
817 /* We allocated N pages, which are likely not aligned, leaving
818 us with N-1 usable pages. We plan to place the page_group
819 structure somewhere in the slop. */
820 if (head_slop >= sizeof (page_group))
821 group = (page_group *)page - 1;
822 else
824 /* We magically got an aligned allocation. Too bad, we have
825 to waste a page anyway. */
826 if (tail_slop == 0)
828 enda -= G.pagesize;
829 tail_slop += G.pagesize;
831 gcc_assert (tail_slop >= sizeof (page_group));
832 group = (page_group *)enda;
833 tail_slop -= sizeof (page_group);
836 /* Remember that we allocated this memory. */
837 group->next = G.page_groups;
838 group->allocation = allocation;
839 group->alloc_size = alloc_size;
840 group->in_use = 0;
841 G.page_groups = group;
842 G.bytes_mapped += alloc_size;
844 /* If we allocated multiple pages, put the rest on the free list. */
845 if (multiple_pages)
847 struct page_entry *e, *f = G.free_pages;
848 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
850 e = xcalloc (1, page_entry_size);
851 e->order = order;
852 e->bytes = G.pagesize;
853 e->page = a;
854 e->group = group;
855 e->next = f;
856 f = e;
858 G.free_pages = f;
861 #endif
863 if (entry == NULL)
864 entry = xcalloc (1, page_entry_size);
866 entry->bytes = entry_size;
867 entry->page = page;
868 entry->context_depth = G.context_depth;
869 entry->order = order;
870 entry->num_free_objects = num_objects;
871 entry->next_bit_hint = 1;
873 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
875 #ifdef USING_MALLOC_PAGE_GROUPS
876 entry->group = group;
877 set_page_group_in_use (group, page);
878 #endif
880 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
881 increment the hint. */
882 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
883 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
885 set_page_table_entry (page, entry);
887 if (GGC_DEBUG_LEVEL >= 2)
888 fprintf (G.debug_file,
889 "Allocating page at %p, object size=%lu, data %p-%p\n",
890 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
891 page + entry_size - 1);
893 return entry;
896 /* Adjust the size of G.depth so that no index greater than the one
897 used by the top of the G.by_depth is used. */
899 static inline void
900 adjust_depth (void)
902 page_entry *top;
904 if (G.by_depth_in_use)
906 top = G.by_depth[G.by_depth_in_use-1];
908 /* Peel back indices in depth that index into by_depth, so that
909 as new elements are added to by_depth, we note the indices
910 of those elements, if they are for new context depths. */
911 while (G.depth_in_use > (size_t)top->context_depth+1)
912 --G.depth_in_use;
916 /* For a page that is no longer needed, put it on the free page list. */
918 static void
919 free_page (page_entry *entry)
921 if (GGC_DEBUG_LEVEL >= 2)
922 fprintf (G.debug_file,
923 "Deallocating page at %p, data %p-%p\n", (void *) entry,
924 entry->page, entry->page + entry->bytes - 1);
926 /* Mark the page as inaccessible. Discard the handle to avoid handle
927 leak. */
928 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
930 set_page_table_entry (entry->page, NULL);
932 #ifdef USING_MALLOC_PAGE_GROUPS
933 clear_page_group_in_use (entry->group, entry->page);
934 #endif
936 if (G.by_depth_in_use > 1)
938 page_entry *top = G.by_depth[G.by_depth_in_use-1];
939 int i = entry->index_by_depth;
941 /* We cannot free a page from a context deeper than the current
942 one. */
943 gcc_assert (entry->context_depth == top->context_depth);
945 /* Put top element into freed slot. */
946 G.by_depth[i] = top;
947 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
948 top->index_by_depth = i;
950 --G.by_depth_in_use;
952 adjust_depth ();
954 entry->next = G.free_pages;
955 G.free_pages = entry;
958 /* Release the free page cache to the system. */
960 static void
961 release_pages (void)
963 #ifdef USING_MMAP
964 page_entry *p, *next;
965 char *start;
966 size_t len;
968 /* Gather up adjacent pages so they are unmapped together. */
969 p = G.free_pages;
971 while (p)
973 start = p->page;
974 next = p->next;
975 len = p->bytes;
976 free (p);
977 p = next;
979 while (p && p->page == start + len)
981 next = p->next;
982 len += p->bytes;
983 free (p);
984 p = next;
987 munmap (start, len);
988 G.bytes_mapped -= len;
991 G.free_pages = NULL;
992 #endif
993 #ifdef USING_MALLOC_PAGE_GROUPS
994 page_entry **pp, *p;
995 page_group **gp, *g;
997 /* Remove all pages from free page groups from the list. */
998 pp = &G.free_pages;
999 while ((p = *pp) != NULL)
1000 if (p->group->in_use == 0)
1002 *pp = p->next;
1003 free (p);
1005 else
1006 pp = &p->next;
1008 /* Remove all free page groups, and release the storage. */
1009 gp = &G.page_groups;
1010 while ((g = *gp) != NULL)
1011 if (g->in_use == 0)
1013 *gp = g->next;
1014 G.bytes_mapped -= g->alloc_size;
1015 free (g->allocation);
1017 else
1018 gp = &g->next;
1019 #endif
1022 /* This table provides a fast way to determine ceil(log_2(size)) for
1023 allocation requests. The minimum allocation size is eight bytes. */
1025 static unsigned char size_lookup[257] =
1027 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1028 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1029 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1030 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1031 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1032 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1033 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1034 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1035 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1041 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1042 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046 /* Typed allocation function. Does nothing special in this collector. */
1048 void *
1049 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1050 MEM_STAT_DECL)
1052 return ggc_alloc_stat (size PASS_MEM_STAT);
1055 /* Zone allocation function. Does nothing special in this collector. */
1057 void *
1058 ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED
1059 MEM_STAT_DECL)
1061 return ggc_alloc_stat (size PASS_MEM_STAT);
1064 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1066 void *
1067 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1069 size_t order, word, bit, object_offset, object_size;
1070 struct page_entry *entry;
1071 void *result;
1073 if (size <= 256)
1075 order = size_lookup[size];
1076 object_size = OBJECT_SIZE (order);
1078 else
1080 order = 9;
1081 while (size > (object_size = OBJECT_SIZE (order)))
1082 order++;
1085 /* If there are non-full pages for this size allocation, they are at
1086 the head of the list. */
1087 entry = G.pages[order];
1089 /* If there is no page for this object size, or all pages in this
1090 context are full, allocate a new page. */
1091 if (entry == NULL || entry->num_free_objects == 0)
1093 struct page_entry *new_entry;
1094 new_entry = alloc_page (order);
1096 new_entry->index_by_depth = G.by_depth_in_use;
1097 push_by_depth (new_entry, 0);
1099 /* We can skip context depths, if we do, make sure we go all the
1100 way to the new depth. */
1101 while (new_entry->context_depth >= G.depth_in_use)
1102 push_depth (G.by_depth_in_use-1);
1104 /* If this is the only entry, it's also the tail. If it is not
1105 the only entry, then we must update the PREV pointer of the
1106 ENTRY (G.pages[order]) to point to our new page entry. */
1107 if (entry == NULL)
1108 G.page_tails[order] = new_entry;
1109 else
1110 entry->prev = new_entry;
1112 /* Put new pages at the head of the page list. By definition the
1113 entry at the head of the list always has a NULL pointer. */
1114 new_entry->next = entry;
1115 new_entry->prev = NULL;
1116 entry = new_entry;
1117 G.pages[order] = new_entry;
1119 /* For a new page, we know the word and bit positions (in the
1120 in_use bitmap) of the first available object -- they're zero. */
1121 new_entry->next_bit_hint = 1;
1122 word = 0;
1123 bit = 0;
1124 object_offset = 0;
1126 else
1128 /* First try to use the hint left from the previous allocation
1129 to locate a clear bit in the in-use bitmap. We've made sure
1130 that the one-past-the-end bit is always set, so if the hint
1131 has run over, this test will fail. */
1132 unsigned hint = entry->next_bit_hint;
1133 word = hint / HOST_BITS_PER_LONG;
1134 bit = hint % HOST_BITS_PER_LONG;
1136 /* If the hint didn't work, scan the bitmap from the beginning. */
1137 if ((entry->in_use_p[word] >> bit) & 1)
1139 word = bit = 0;
1140 while (~entry->in_use_p[word] == 0)
1141 ++word;
1143 #if GCC_VERSION >= 3004
1144 bit = __builtin_ctzl (~entry->in_use_p[word]);
1145 #else
1146 while ((entry->in_use_p[word] >> bit) & 1)
1147 ++bit;
1148 #endif
1150 hint = word * HOST_BITS_PER_LONG + bit;
1153 /* Next time, try the next bit. */
1154 entry->next_bit_hint = hint + 1;
1156 object_offset = hint * object_size;
1159 /* Set the in-use bit. */
1160 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1162 /* Keep a running total of the number of free objects. If this page
1163 fills up, we may have to move it to the end of the list if the
1164 next page isn't full. If the next page is full, all subsequent
1165 pages are full, so there's no need to move it. */
1166 if (--entry->num_free_objects == 0
1167 && entry->next != NULL
1168 && entry->next->num_free_objects > 0)
1170 /* We have a new head for the list. */
1171 G.pages[order] = entry->next;
1173 /* We are moving ENTRY to the end of the page table list.
1174 The new page at the head of the list will have NULL in
1175 its PREV field and ENTRY will have NULL in its NEXT field. */
1176 entry->next->prev = NULL;
1177 entry->next = NULL;
1179 /* Append ENTRY to the tail of the list. */
1180 entry->prev = G.page_tails[order];
1181 G.page_tails[order]->next = entry;
1182 G.page_tails[order] = entry;
1185 /* Calculate the object's address. */
1186 result = entry->page + object_offset;
1187 #ifdef GATHER_STATISTICS
1188 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1189 result PASS_MEM_STAT);
1190 #endif
1192 #ifdef ENABLE_GC_CHECKING
1193 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1194 exact same semantics in presence of memory bugs, regardless of
1195 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1196 handle to avoid handle leak. */
1197 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1199 /* `Poison' the entire allocated object, including any padding at
1200 the end. */
1201 memset (result, 0xaf, object_size);
1203 /* Make the bytes after the end of the object unaccessible. Discard the
1204 handle to avoid handle leak. */
1205 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1206 object_size - size));
1207 #endif
1209 /* Tell Valgrind that the memory is there, but its content isn't
1210 defined. The bytes at the end of the object are still marked
1211 unaccessible. */
1212 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1214 /* Keep track of how many bytes are being allocated. This
1215 information is used in deciding when to collect. */
1216 G.allocated += object_size;
1218 #ifdef GATHER_STATISTICS
1220 size_t overhead = object_size - size;
1222 G.stats.total_overhead += overhead;
1223 G.stats.total_allocated += object_size;
1224 G.stats.total_overhead_per_order[order] += overhead;
1225 G.stats.total_allocated_per_order[order] += object_size;
1227 if (size <= 32)
1229 G.stats.total_overhead_under32 += overhead;
1230 G.stats.total_allocated_under32 += object_size;
1232 if (size <= 64)
1234 G.stats.total_overhead_under64 += overhead;
1235 G.stats.total_allocated_under64 += object_size;
1237 if (size <= 128)
1239 G.stats.total_overhead_under128 += overhead;
1240 G.stats.total_allocated_under128 += object_size;
1243 #endif
1245 if (GGC_DEBUG_LEVEL >= 3)
1246 fprintf (G.debug_file,
1247 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1248 (unsigned long) size, (unsigned long) object_size, result,
1249 (void *) entry);
1251 return result;
1254 /* If P is not marked, marks it and return false. Otherwise return true.
1255 P must have been allocated by the GC allocator; it mustn't point to
1256 static objects, stack variables, or memory allocated with malloc. */
1259 ggc_set_mark (const void *p)
1261 page_entry *entry;
1262 unsigned bit, word;
1263 unsigned long mask;
1265 /* Look up the page on which the object is alloced. If the object
1266 wasn't allocated by the collector, we'll probably die. */
1267 entry = lookup_page_table_entry (p);
1268 gcc_assert (entry);
1270 /* Calculate the index of the object on the page; this is its bit
1271 position in the in_use_p bitmap. */
1272 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1273 word = bit / HOST_BITS_PER_LONG;
1274 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1276 /* If the bit was previously set, skip it. */
1277 if (entry->in_use_p[word] & mask)
1278 return 1;
1280 /* Otherwise set it, and decrement the free object count. */
1281 entry->in_use_p[word] |= mask;
1282 entry->num_free_objects -= 1;
1284 if (GGC_DEBUG_LEVEL >= 4)
1285 fprintf (G.debug_file, "Marking %p\n", p);
1287 return 0;
1290 /* Return 1 if P has been marked, zero otherwise.
1291 P must have been allocated by the GC allocator; it mustn't point to
1292 static objects, stack variables, or memory allocated with malloc. */
1295 ggc_marked_p (const void *p)
1297 page_entry *entry;
1298 unsigned bit, word;
1299 unsigned long mask;
1301 /* Look up the page on which the object is alloced. If the object
1302 wasn't allocated by the collector, we'll probably die. */
1303 entry = lookup_page_table_entry (p);
1304 gcc_assert (entry);
1306 /* Calculate the index of the object on the page; this is its bit
1307 position in the in_use_p bitmap. */
1308 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1309 word = bit / HOST_BITS_PER_LONG;
1310 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1312 return (entry->in_use_p[word] & mask) != 0;
1315 /* Return the size of the gc-able object P. */
1317 size_t
1318 ggc_get_size (const void *p)
1320 page_entry *pe = lookup_page_table_entry (p);
1321 return OBJECT_SIZE (pe->order);
1324 /* Release the memory for object P. */
1326 void
1327 ggc_free (void *p)
1329 page_entry *pe = lookup_page_table_entry (p);
1330 size_t order = pe->order;
1331 size_t size = OBJECT_SIZE (order);
1333 #ifdef GATHER_STATISTICS
1334 ggc_free_overhead (p);
1335 #endif
1337 if (GGC_DEBUG_LEVEL >= 3)
1338 fprintf (G.debug_file,
1339 "Freeing object, actual size=%lu, at %p on %p\n",
1340 (unsigned long) size, p, (void *) pe);
1342 #ifdef ENABLE_GC_CHECKING
1343 /* Poison the data, to indicate the data is garbage. */
1344 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1345 memset (p, 0xa5, size);
1346 #endif
1347 /* Let valgrind know the object is free. */
1348 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1350 #ifdef ENABLE_GC_ALWAYS_COLLECT
1351 /* In the completely-anal-checking mode, we do *not* immediately free
1352 the data, but instead verify that the data is *actually* not
1353 reachable the next time we collect. */
1355 struct free_object *fo = xmalloc (sizeof (struct free_object));
1356 fo->object = p;
1357 fo->next = G.free_object_list;
1358 G.free_object_list = fo;
1360 #else
1362 unsigned int bit_offset, word, bit;
1364 G.allocated -= size;
1366 /* Mark the object not-in-use. */
1367 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1368 word = bit_offset / HOST_BITS_PER_LONG;
1369 bit = bit_offset % HOST_BITS_PER_LONG;
1370 pe->in_use_p[word] &= ~(1UL << bit);
1372 if (pe->num_free_objects++ == 0)
1374 page_entry *p, *q;
1376 /* If the page is completely full, then it's supposed to
1377 be after all pages that aren't. Since we've freed one
1378 object from a page that was full, we need to move the
1379 page to the head of the list.
1381 PE is the node we want to move. Q is the previous node
1382 and P is the next node in the list. */
1383 q = pe->prev;
1384 if (q && q->num_free_objects == 0)
1386 p = pe->next;
1388 q->next = p;
1390 /* If PE was at the end of the list, then Q becomes the
1391 new end of the list. If PE was not the end of the
1392 list, then we need to update the PREV field for P. */
1393 if (!p)
1394 G.page_tails[order] = q;
1395 else
1396 p->prev = q;
1398 /* Move PE to the head of the list. */
1399 pe->next = G.pages[order];
1400 pe->prev = NULL;
1401 G.pages[order]->prev = pe;
1402 G.pages[order] = pe;
1405 /* Reset the hint bit to point to the only free object. */
1406 pe->next_bit_hint = bit_offset;
1409 #endif
1412 /* Subroutine of init_ggc which computes the pair of numbers used to
1413 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1415 This algorithm is taken from Granlund and Montgomery's paper
1416 "Division by Invariant Integers using Multiplication"
1417 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1418 constants). */
1420 static void
1421 compute_inverse (unsigned order)
1423 size_t size, inv;
1424 unsigned int e;
1426 size = OBJECT_SIZE (order);
1427 e = 0;
1428 while (size % 2 == 0)
1430 e++;
1431 size >>= 1;
1434 inv = size;
1435 while (inv * size != 1)
1436 inv = inv * (2 - inv*size);
1438 DIV_MULT (order) = inv;
1439 DIV_SHIFT (order) = e;
1442 /* Initialize the ggc-mmap allocator. */
1443 void
1444 init_ggc (void)
1446 unsigned order;
1448 G.pagesize = getpagesize();
1449 G.lg_pagesize = exact_log2 (G.pagesize);
1451 #ifdef HAVE_MMAP_DEV_ZERO
1452 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1453 if (G.dev_zero_fd == -1)
1454 internal_error ("open /dev/zero: %m");
1455 #endif
1457 #if 0
1458 G.debug_file = fopen ("ggc-mmap.debug", "w");
1459 #else
1460 G.debug_file = stdout;
1461 #endif
1463 #ifdef USING_MMAP
1464 /* StunOS has an amazing off-by-one error for the first mmap allocation
1465 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1466 believe, is an unaligned page allocation, which would cause us to
1467 hork badly if we tried to use it. */
1469 char *p = alloc_anon (NULL, G.pagesize);
1470 struct page_entry *e;
1471 if ((size_t)p & (G.pagesize - 1))
1473 /* How losing. Discard this one and try another. If we still
1474 can't get something useful, give up. */
1476 p = alloc_anon (NULL, G.pagesize);
1477 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1480 /* We have a good page, might as well hold onto it... */
1481 e = xcalloc (1, sizeof (struct page_entry));
1482 e->bytes = G.pagesize;
1483 e->page = p;
1484 e->next = G.free_pages;
1485 G.free_pages = e;
1487 #endif
1489 /* Initialize the object size table. */
1490 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1491 object_size_table[order] = (size_t) 1 << order;
1492 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1494 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1496 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1497 so that we're sure of getting aligned memory. */
1498 s = ROUND_UP (s, MAX_ALIGNMENT);
1499 object_size_table[order] = s;
1502 /* Initialize the objects-per-page and inverse tables. */
1503 for (order = 0; order < NUM_ORDERS; ++order)
1505 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1506 if (objects_per_page_table[order] == 0)
1507 objects_per_page_table[order] = 1;
1508 compute_inverse (order);
1511 /* Reset the size_lookup array to put appropriately sized objects in
1512 the special orders. All objects bigger than the previous power
1513 of two, but no greater than the special size, should go in the
1514 new order. */
1515 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1517 int o;
1518 int i;
1520 o = size_lookup[OBJECT_SIZE (order)];
1521 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1522 size_lookup[i] = order;
1525 G.depth_in_use = 0;
1526 G.depth_max = 10;
1527 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1529 G.by_depth_in_use = 0;
1530 G.by_depth_max = INITIAL_PTE_COUNT;
1531 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1532 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1535 /* Start a new GGC zone. */
1537 struct alloc_zone *
1538 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1540 return NULL;
1543 /* Destroy a GGC zone. */
1544 void
1545 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1549 /* Increment the `GC context'. Objects allocated in an outer context
1550 are never freed, eliminating the need to register their roots. */
1552 void
1553 ggc_push_context (void)
1555 ++G.context_depth;
1557 /* Die on wrap. */
1558 gcc_assert (G.context_depth < HOST_BITS_PER_LONG);
1561 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1562 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1564 static void
1565 ggc_recalculate_in_use_p (page_entry *p)
1567 unsigned int i;
1568 size_t num_objects;
1570 /* Because the past-the-end bit in in_use_p is always set, we
1571 pretend there is one additional object. */
1572 num_objects = OBJECTS_IN_PAGE (p) + 1;
1574 /* Reset the free object count. */
1575 p->num_free_objects = num_objects;
1577 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1578 for (i = 0;
1579 i < CEIL (BITMAP_SIZE (num_objects),
1580 sizeof (*p->in_use_p));
1581 ++i)
1583 unsigned long j;
1585 /* Something is in use if it is marked, or if it was in use in a
1586 context further down the context stack. */
1587 p->in_use_p[i] |= save_in_use_p (p)[i];
1589 /* Decrement the free object count for every object allocated. */
1590 for (j = p->in_use_p[i]; j; j >>= 1)
1591 p->num_free_objects -= (j & 1);
1594 gcc_assert (p->num_free_objects < num_objects);
1597 /* Decrement the `GC context'. All objects allocated since the
1598 previous ggc_push_context are migrated to the outer context. */
1600 void
1601 ggc_pop_context (void)
1603 unsigned long omask;
1604 unsigned int depth, i, e;
1605 #ifdef ENABLE_CHECKING
1606 unsigned int order;
1607 #endif
1609 depth = --G.context_depth;
1610 omask = (unsigned long)1 << (depth + 1);
1612 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1613 return;
1615 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1616 G.context_depth_allocations &= omask - 1;
1617 G.context_depth_collections &= omask - 1;
1619 /* The G.depth array is shortened so that the last index is the
1620 context_depth of the top element of by_depth. */
1621 if (depth+1 < G.depth_in_use)
1622 e = G.depth[depth+1];
1623 else
1624 e = G.by_depth_in_use;
1626 /* We might not have any PTEs of depth depth. */
1627 if (depth < G.depth_in_use)
1630 /* First we go through all the pages at depth depth to
1631 recalculate the in use bits. */
1632 for (i = G.depth[depth]; i < e; ++i)
1634 page_entry *p = G.by_depth[i];
1636 /* Check that all of the pages really are at the depth that
1637 we expect. */
1638 gcc_assert (p->context_depth == depth);
1639 gcc_assert (p->index_by_depth == i);
1641 prefetch (&save_in_use_p_i (i+8));
1642 prefetch (&save_in_use_p_i (i+16));
1643 if (save_in_use_p_i (i))
1645 p = G.by_depth[i];
1646 ggc_recalculate_in_use_p (p);
1647 free (save_in_use_p_i (i));
1648 save_in_use_p_i (i) = 0;
1653 /* Then, we reset all page_entries with a depth greater than depth
1654 to be at depth. */
1655 for (i = e; i < G.by_depth_in_use; ++i)
1657 page_entry *p = G.by_depth[i];
1659 /* Check that all of the pages really are at the depth we
1660 expect. */
1661 gcc_assert (p->context_depth > depth);
1662 gcc_assert (p->index_by_depth == i);
1663 p->context_depth = depth;
1666 adjust_depth ();
1668 #ifdef ENABLE_CHECKING
1669 for (order = 2; order < NUM_ORDERS; order++)
1671 page_entry *p;
1673 for (p = G.pages[order]; p != NULL; p = p->next)
1674 gcc_assert (p->context_depth < depth ||
1675 (p->context_depth == depth && !save_in_use_p (p)));
1677 #endif
1680 /* Unmark all objects. */
1682 static void
1683 clear_marks (void)
1685 unsigned order;
1687 for (order = 2; order < NUM_ORDERS; order++)
1689 page_entry *p;
1691 for (p = G.pages[order]; p != NULL; p = p->next)
1693 size_t num_objects = OBJECTS_IN_PAGE (p);
1694 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1696 /* The data should be page-aligned. */
1697 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1699 /* Pages that aren't in the topmost context are not collected;
1700 nevertheless, we need their in-use bit vectors to store GC
1701 marks. So, back them up first. */
1702 if (p->context_depth < G.context_depth)
1704 if (! save_in_use_p (p))
1705 save_in_use_p (p) = xmalloc (bitmap_size);
1706 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1709 /* Reset reset the number of free objects and clear the
1710 in-use bits. These will be adjusted by mark_obj. */
1711 p->num_free_objects = num_objects;
1712 memset (p->in_use_p, 0, bitmap_size);
1714 /* Make sure the one-past-the-end bit is always set. */
1715 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1716 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1721 /* Free all empty pages. Partially empty pages need no attention
1722 because the `mark' bit doubles as an `unused' bit. */
1724 static void
1725 sweep_pages (void)
1727 unsigned order;
1729 for (order = 2; order < NUM_ORDERS; order++)
1731 /* The last page-entry to consider, regardless of entries
1732 placed at the end of the list. */
1733 page_entry * const last = G.page_tails[order];
1735 size_t num_objects;
1736 size_t live_objects;
1737 page_entry *p, *previous;
1738 int done;
1740 p = G.pages[order];
1741 if (p == NULL)
1742 continue;
1744 previous = NULL;
1747 page_entry *next = p->next;
1749 /* Loop until all entries have been examined. */
1750 done = (p == last);
1752 num_objects = OBJECTS_IN_PAGE (p);
1754 /* Add all live objects on this page to the count of
1755 allocated memory. */
1756 live_objects = num_objects - p->num_free_objects;
1758 G.allocated += OBJECT_SIZE (order) * live_objects;
1760 /* Only objects on pages in the topmost context should get
1761 collected. */
1762 if (p->context_depth < G.context_depth)
1765 /* Remove the page if it's empty. */
1766 else if (live_objects == 0)
1768 /* If P was the first page in the list, then NEXT
1769 becomes the new first page in the list, otherwise
1770 splice P out of the forward pointers. */
1771 if (! previous)
1772 G.pages[order] = next;
1773 else
1774 previous->next = next;
1776 /* Splice P out of the back pointers too. */
1777 if (next)
1778 next->prev = previous;
1780 /* Are we removing the last element? */
1781 if (p == G.page_tails[order])
1782 G.page_tails[order] = previous;
1783 free_page (p);
1784 p = previous;
1787 /* If the page is full, move it to the end. */
1788 else if (p->num_free_objects == 0)
1790 /* Don't move it if it's already at the end. */
1791 if (p != G.page_tails[order])
1793 /* Move p to the end of the list. */
1794 p->next = NULL;
1795 p->prev = G.page_tails[order];
1796 G.page_tails[order]->next = p;
1798 /* Update the tail pointer... */
1799 G.page_tails[order] = p;
1801 /* ... and the head pointer, if necessary. */
1802 if (! previous)
1803 G.pages[order] = next;
1804 else
1805 previous->next = next;
1807 /* And update the backpointer in NEXT if necessary. */
1808 if (next)
1809 next->prev = previous;
1811 p = previous;
1815 /* If we've fallen through to here, it's a page in the
1816 topmost context that is neither full nor empty. Such a
1817 page must precede pages at lesser context depth in the
1818 list, so move it to the head. */
1819 else if (p != G.pages[order])
1821 previous->next = p->next;
1823 /* Update the backchain in the next node if it exists. */
1824 if (p->next)
1825 p->next->prev = previous;
1827 /* Move P to the head of the list. */
1828 p->next = G.pages[order];
1829 p->prev = NULL;
1830 G.pages[order]->prev = p;
1832 /* Update the head pointer. */
1833 G.pages[order] = p;
1835 /* Are we moving the last element? */
1836 if (G.page_tails[order] == p)
1837 G.page_tails[order] = previous;
1838 p = previous;
1841 previous = p;
1842 p = next;
1844 while (! done);
1846 /* Now, restore the in_use_p vectors for any pages from contexts
1847 other than the current one. */
1848 for (p = G.pages[order]; p; p = p->next)
1849 if (p->context_depth != G.context_depth)
1850 ggc_recalculate_in_use_p (p);
1854 #ifdef ENABLE_GC_CHECKING
1855 /* Clobber all free objects. */
1857 static void
1858 poison_pages (void)
1860 unsigned order;
1862 for (order = 2; order < NUM_ORDERS; order++)
1864 size_t size = OBJECT_SIZE (order);
1865 page_entry *p;
1867 for (p = G.pages[order]; p != NULL; p = p->next)
1869 size_t num_objects;
1870 size_t i;
1872 if (p->context_depth != G.context_depth)
1873 /* Since we don't do any collection for pages in pushed
1874 contexts, there's no need to do any poisoning. And
1875 besides, the IN_USE_P array isn't valid until we pop
1876 contexts. */
1877 continue;
1879 num_objects = OBJECTS_IN_PAGE (p);
1880 for (i = 0; i < num_objects; i++)
1882 size_t word, bit;
1883 word = i / HOST_BITS_PER_LONG;
1884 bit = i % HOST_BITS_PER_LONG;
1885 if (((p->in_use_p[word] >> bit) & 1) == 0)
1887 char *object = p->page + i * size;
1889 /* Keep poison-by-write when we expect to use Valgrind,
1890 so the exact same memory semantics is kept, in case
1891 there are memory errors. We override this request
1892 below. */
1893 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1894 memset (object, 0xa5, size);
1896 /* Drop the handle to avoid handle leak. */
1897 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1903 #else
1904 #define poison_pages()
1905 #endif
1907 #ifdef ENABLE_GC_ALWAYS_COLLECT
1908 /* Validate that the reportedly free objects actually are. */
1910 static void
1911 validate_free_objects (void)
1913 struct free_object *f, *next, *still_free = NULL;
1915 for (f = G.free_object_list; f ; f = next)
1917 page_entry *pe = lookup_page_table_entry (f->object);
1918 size_t bit, word;
1920 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1921 word = bit / HOST_BITS_PER_LONG;
1922 bit = bit % HOST_BITS_PER_LONG;
1923 next = f->next;
1925 /* Make certain it isn't visible from any root. Notice that we
1926 do this check before sweep_pages merges save_in_use_p. */
1927 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1929 /* If the object comes from an outer context, then retain the
1930 free_object entry, so that we can verify that the address
1931 isn't live on the stack in some outer context. */
1932 if (pe->context_depth != G.context_depth)
1934 f->next = still_free;
1935 still_free = f;
1937 else
1938 free (f);
1941 G.free_object_list = still_free;
1943 #else
1944 #define validate_free_objects()
1945 #endif
1947 /* Top level mark-and-sweep routine. */
1949 void
1950 ggc_collect (void)
1952 /* Avoid frequent unnecessary work by skipping collection if the
1953 total allocations haven't expanded much since the last
1954 collection. */
1955 float allocated_last_gc =
1956 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1958 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1960 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1961 return;
1963 timevar_push (TV_GC);
1964 if (!quiet_flag)
1965 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1966 if (GGC_DEBUG_LEVEL >= 2)
1967 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1969 /* Zero the total allocated bytes. This will be recalculated in the
1970 sweep phase. */
1971 G.allocated = 0;
1973 /* Release the pages we freed the last time we collected, but didn't
1974 reuse in the interim. */
1975 release_pages ();
1977 /* Indicate that we've seen collections at this context depth. */
1978 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1980 clear_marks ();
1981 ggc_mark_roots ();
1982 #ifdef GATHER_STATISTICS
1983 ggc_prune_overhead_list ();
1984 #endif
1985 poison_pages ();
1986 validate_free_objects ();
1987 sweep_pages ();
1989 G.allocated_last_gc = G.allocated;
1991 timevar_pop (TV_GC);
1993 if (!quiet_flag)
1994 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1995 if (GGC_DEBUG_LEVEL >= 2)
1996 fprintf (G.debug_file, "END COLLECTING\n");
1999 /* Print allocation statistics. */
2000 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2001 ? (x) \
2002 : ((x) < 1024*1024*10 \
2003 ? (x) / 1024 \
2004 : (x) / (1024*1024))))
2005 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2007 void
2008 ggc_print_statistics (void)
2010 struct ggc_statistics stats;
2011 unsigned int i;
2012 size_t total_overhead = 0;
2014 /* Clear the statistics. */
2015 memset (&stats, 0, sizeof (stats));
2017 /* Make sure collection will really occur. */
2018 G.allocated_last_gc = 0;
2020 /* Collect and print the statistics common across collectors. */
2021 ggc_print_common_statistics (stderr, &stats);
2023 /* Release free pages so that we will not count the bytes allocated
2024 there as part of the total allocated memory. */
2025 release_pages ();
2027 /* Collect some information about the various sizes of
2028 allocation. */
2029 fprintf (stderr,
2030 "Memory still allocated at the end of the compilation process\n");
2031 fprintf (stderr, "%-5s %10s %10s %10s\n",
2032 "Size", "Allocated", "Used", "Overhead");
2033 for (i = 0; i < NUM_ORDERS; ++i)
2035 page_entry *p;
2036 size_t allocated;
2037 size_t in_use;
2038 size_t overhead;
2040 /* Skip empty entries. */
2041 if (!G.pages[i])
2042 continue;
2044 overhead = allocated = in_use = 0;
2046 /* Figure out the total number of bytes allocated for objects of
2047 this size, and how many of them are actually in use. Also figure
2048 out how much memory the page table is using. */
2049 for (p = G.pages[i]; p; p = p->next)
2051 allocated += p->bytes;
2052 in_use +=
2053 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2055 overhead += (sizeof (page_entry) - sizeof (long)
2056 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2058 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2059 (unsigned long) OBJECT_SIZE (i),
2060 SCALE (allocated), STAT_LABEL (allocated),
2061 SCALE (in_use), STAT_LABEL (in_use),
2062 SCALE (overhead), STAT_LABEL (overhead));
2063 total_overhead += overhead;
2065 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2066 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2067 SCALE (G.allocated), STAT_LABEL(G.allocated),
2068 SCALE (total_overhead), STAT_LABEL (total_overhead));
2070 #ifdef GATHER_STATISTICS
2072 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2074 fprintf (stderr, "Total Overhead: %10lld\n",
2075 G.stats.total_overhead);
2076 fprintf (stderr, "Total Allocated: %10lld\n",
2077 G.stats.total_allocated);
2079 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2080 G.stats.total_overhead_under32);
2081 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2082 G.stats.total_allocated_under32);
2083 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2084 G.stats.total_overhead_under64);
2085 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2086 G.stats.total_allocated_under64);
2087 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2088 G.stats.total_overhead_under128);
2089 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2090 G.stats.total_allocated_under128);
2092 for (i = 0; i < NUM_ORDERS; i++)
2093 if (G.stats.total_allocated_per_order[i])
2095 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
2096 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2097 fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
2098 OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2101 #endif
2104 struct ggc_pch_data
2106 struct ggc_pch_ondisk
2108 unsigned totals[NUM_ORDERS];
2109 } d;
2110 size_t base[NUM_ORDERS];
2111 size_t written[NUM_ORDERS];
2114 struct ggc_pch_data *
2115 init_ggc_pch (void)
2117 return xcalloc (sizeof (struct ggc_pch_data), 1);
2120 void
2121 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2122 size_t size, bool is_string ATTRIBUTE_UNUSED)
2124 unsigned order;
2126 if (size <= 256)
2127 order = size_lookup[size];
2128 else
2130 order = 9;
2131 while (size > OBJECT_SIZE (order))
2132 order++;
2135 d->d.totals[order]++;
2138 size_t
2139 ggc_pch_total_size (struct ggc_pch_data *d)
2141 size_t a = 0;
2142 unsigned i;
2144 for (i = 0; i < NUM_ORDERS; i++)
2145 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2146 return a;
2149 void
2150 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2152 size_t a = (size_t) base;
2153 unsigned i;
2155 for (i = 0; i < NUM_ORDERS; i++)
2157 d->base[i] = a;
2158 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2163 char *
2164 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2165 size_t size, bool is_string ATTRIBUTE_UNUSED)
2167 unsigned order;
2168 char *result;
2170 if (size <= 256)
2171 order = size_lookup[size];
2172 else
2174 order = 9;
2175 while (size > OBJECT_SIZE (order))
2176 order++;
2179 result = (char *) d->base[order];
2180 d->base[order] += OBJECT_SIZE (order);
2181 return result;
2184 void
2185 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2186 FILE *f ATTRIBUTE_UNUSED)
2188 /* Nothing to do. */
2191 void
2192 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2193 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2194 size_t size, bool is_string ATTRIBUTE_UNUSED)
2196 unsigned order;
2197 static const char emptyBytes[256];
2199 if (size <= 256)
2200 order = size_lookup[size];
2201 else
2203 order = 9;
2204 while (size > OBJECT_SIZE (order))
2205 order++;
2208 if (fwrite (x, size, 1, f) != 1)
2209 fatal_error ("can't write PCH file: %m");
2211 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2212 object out to OBJECT_SIZE(order). This happens for strings. */
2214 if (size != OBJECT_SIZE (order))
2216 unsigned padding = OBJECT_SIZE(order) - size;
2218 /* To speed small writes, we use a nulled-out array that's larger
2219 than most padding requests as the source for our null bytes. This
2220 permits us to do the padding with fwrite() rather than fseek(), and
2221 limits the chance the the OS may try to flush any outstanding
2222 writes. */
2223 if (padding <= sizeof(emptyBytes))
2225 if (fwrite (emptyBytes, 1, padding, f) != padding)
2226 fatal_error ("can't write PCH file");
2228 else
2230 /* Larger than our buffer? Just default to fseek. */
2231 if (fseek (f, padding, SEEK_CUR) != 0)
2232 fatal_error ("can't write PCH file");
2236 d->written[order]++;
2237 if (d->written[order] == d->d.totals[order]
2238 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2239 G.pagesize),
2240 SEEK_CUR) != 0)
2241 fatal_error ("can't write PCH file: %m");
2244 void
2245 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2247 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2248 fatal_error ("can't write PCH file: %m");
2249 free (d);
2252 /* Move the PCH PTE entries just added to the end of by_depth, to the
2253 front. */
2255 static void
2256 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2258 unsigned i;
2260 /* First, we swap the new entries to the front of the varrays. */
2261 page_entry **new_by_depth;
2262 unsigned long **new_save_in_use;
2264 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2265 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2267 memcpy (&new_by_depth[0],
2268 &G.by_depth[count_old_page_tables],
2269 count_new_page_tables * sizeof (void *));
2270 memcpy (&new_by_depth[count_new_page_tables],
2271 &G.by_depth[0],
2272 count_old_page_tables * sizeof (void *));
2273 memcpy (&new_save_in_use[0],
2274 &G.save_in_use[count_old_page_tables],
2275 count_new_page_tables * sizeof (void *));
2276 memcpy (&new_save_in_use[count_new_page_tables],
2277 &G.save_in_use[0],
2278 count_old_page_tables * sizeof (void *));
2280 free (G.by_depth);
2281 free (G.save_in_use);
2283 G.by_depth = new_by_depth;
2284 G.save_in_use = new_save_in_use;
2286 /* Now update all the index_by_depth fields. */
2287 for (i = G.by_depth_in_use; i > 0; --i)
2289 page_entry *p = G.by_depth[i-1];
2290 p->index_by_depth = i-1;
2293 /* And last, we update the depth pointers in G.depth. The first
2294 entry is already 0, and context 0 entries always start at index
2295 0, so there is nothing to update in the first slot. We need a
2296 second slot, only if we have old ptes, and if we do, they start
2297 at index count_new_page_tables. */
2298 if (count_old_page_tables)
2299 push_depth (count_new_page_tables);
2302 void
2303 ggc_pch_read (FILE *f, void *addr)
2305 struct ggc_pch_ondisk d;
2306 unsigned i;
2307 char *offs = addr;
2308 unsigned long count_old_page_tables;
2309 unsigned long count_new_page_tables;
2311 count_old_page_tables = G.by_depth_in_use;
2313 /* We've just read in a PCH file. So, every object that used to be
2314 allocated is now free. */
2315 clear_marks ();
2316 #ifdef ENABLE_GC_CHECKING
2317 poison_pages ();
2318 #endif
2320 /* No object read from a PCH file should ever be freed. So, set the
2321 context depth to 1, and set the depth of all the currently-allocated
2322 pages to be 1 too. PCH pages will have depth 0. */
2323 gcc_assert (!G.context_depth);
2324 G.context_depth = 1;
2325 for (i = 0; i < NUM_ORDERS; i++)
2327 page_entry *p;
2328 for (p = G.pages[i]; p != NULL; p = p->next)
2329 p->context_depth = G.context_depth;
2332 /* Allocate the appropriate page-table entries for the pages read from
2333 the PCH file. */
2334 if (fread (&d, sizeof (d), 1, f) != 1)
2335 fatal_error ("can't read PCH file: %m");
2337 for (i = 0; i < NUM_ORDERS; i++)
2339 struct page_entry *entry;
2340 char *pte;
2341 size_t bytes;
2342 size_t num_objs;
2343 size_t j;
2345 if (d.totals[i] == 0)
2346 continue;
2348 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2349 num_objs = bytes / OBJECT_SIZE (i);
2350 entry = xcalloc (1, (sizeof (struct page_entry)
2351 - sizeof (long)
2352 + BITMAP_SIZE (num_objs + 1)));
2353 entry->bytes = bytes;
2354 entry->page = offs;
2355 entry->context_depth = 0;
2356 offs += bytes;
2357 entry->num_free_objects = 0;
2358 entry->order = i;
2360 for (j = 0;
2361 j + HOST_BITS_PER_LONG <= num_objs + 1;
2362 j += HOST_BITS_PER_LONG)
2363 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2364 for (; j < num_objs + 1; j++)
2365 entry->in_use_p[j / HOST_BITS_PER_LONG]
2366 |= 1L << (j % HOST_BITS_PER_LONG);
2368 for (pte = entry->page;
2369 pte < entry->page + entry->bytes;
2370 pte += G.pagesize)
2371 set_page_table_entry (pte, entry);
2373 if (G.page_tails[i] != NULL)
2374 G.page_tails[i]->next = entry;
2375 else
2376 G.pages[i] = entry;
2377 G.page_tails[i] = entry;
2379 /* We start off by just adding all the new information to the
2380 end of the varrays, later, we will move the new information
2381 to the front of the varrays, as the PCH page tables are at
2382 context 0. */
2383 push_by_depth (entry, 0);
2386 /* Now, we update the various data structures that speed page table
2387 handling. */
2388 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2390 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2392 /* Update the statistics. */
2393 G.allocated = G.allocated_last_gc = offs - (char *)addr;