* g++.dg/tree-ssa/copyprop-1.C: Remove xfail.
[official-gcc.git] / gcc / ggc-page.c
blob6b691eec759d8ed6fc3de0554eb9c0459f3c5a20
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
34 #ifdef ENABLE_VALGRIND_CHECKING
35 # ifdef HAVE_VALGRIND_MEMCHECK_H
36 # include <valgrind/memcheck.h>
37 # elif defined HAVE_MEMCHECK_H
38 # include <memcheck.h>
39 # else
40 # include <valgrind.h>
41 # endif
42 #else
43 /* Avoid #ifdef:s when we can help it. */
44 #define VALGRIND_DISCARD(x)
45 #endif
47 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
48 file open. Prefer either to valloc. */
49 #ifdef HAVE_MMAP_ANON
50 # undef HAVE_MMAP_DEV_ZERO
52 # include <sys/mman.h>
53 # ifndef MAP_FAILED
54 # define MAP_FAILED -1
55 # endif
56 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
57 # define MAP_ANONYMOUS MAP_ANON
58 # endif
59 # define USING_MMAP
61 #endif
63 #ifdef HAVE_MMAP_DEV_ZERO
65 # include <sys/mman.h>
66 # ifndef MAP_FAILED
67 # define MAP_FAILED -1
68 # endif
69 # define USING_MMAP
71 #endif
73 #ifndef USING_MMAP
74 #define USING_MALLOC_PAGE_GROUPS
75 #endif
77 /* Strategy:
79 This garbage-collecting allocator allocates objects on one of a set
80 of pages. Each page can allocate objects of a single size only;
81 available sizes are powers of two starting at four bytes. The size
82 of an allocation request is rounded up to the next power of two
83 (`order'), and satisfied from the appropriate page.
85 Each page is recorded in a page-entry, which also maintains an
86 in-use bitmap of object positions on the page. This allows the
87 allocation state of a particular object to be flipped without
88 touching the page itself.
90 Each page-entry also has a context depth, which is used to track
91 pushing and popping of allocation contexts. Only objects allocated
92 in the current (highest-numbered) context may be collected.
94 Page entries are arranged in an array of singly-linked lists. The
95 array is indexed by the allocation size, in bits, of the pages on
96 it; i.e. all pages on a list allocate objects of the same size.
97 Pages are ordered on the list such that all non-full pages precede
98 all full pages, with non-full pages arranged in order of decreasing
99 context depth.
101 Empty pages (of all orders) are kept on a single page cache list,
102 and are considered first when new pages are required; they are
103 deallocated at the start of the next collection if they haven't
104 been recycled by then. */
106 /* Define GGC_DEBUG_LEVEL to print debugging information.
107 0: No debugging output.
108 1: GC statistics only.
109 2: Page-entry allocations/deallocations as well.
110 3: Object allocations as well.
111 4: Object marks as well. */
112 #define GGC_DEBUG_LEVEL (0)
114 #ifndef HOST_BITS_PER_PTR
115 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 #endif
119 /* A two-level tree is used to look up the page-entry for a given
120 pointer. Two chunks of the pointer's bits are extracted to index
121 the first and second levels of the tree, as follows:
123 HOST_PAGE_SIZE_BITS
124 32 | |
125 msb +----------------+----+------+------+ lsb
126 | | |
127 PAGE_L1_BITS |
129 PAGE_L2_BITS
131 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
132 pages are aligned on system page boundaries. The next most
133 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
134 index values in the lookup table, respectively.
136 For 32-bit architectures and the settings below, there are no
137 leftover bits. For architectures with wider pointers, the lookup
138 tree points to a list of pages, which must be scanned to find the
139 correct one. */
141 #define PAGE_L1_BITS (8)
142 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
143 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
144 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
146 #define LOOKUP_L1(p) \
147 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149 #define LOOKUP_L2(p) \
150 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152 /* The number of objects per allocation page, for objects on a page of
153 the indicated ORDER. */
154 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156 /* The number of objects in P. */
157 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159 /* The size of an object on a page of the indicated ORDER. */
160 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162 /* For speed, we avoid doing a general integer divide to locate the
163 offset in the allocation bitmap, by precalculating numbers M, S
164 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
165 within the page which is evenly divisible by the object size Z. */
166 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
167 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
168 #define OFFSET_TO_BIT(OFFSET, ORDER) \
169 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171 /* The number of extra orders, not corresponding to power-of-two sized
172 objects. */
174 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
176 #define RTL_SIZE(NSLOTS) \
177 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
179 #define TREE_EXP_SIZE(OPS) \
180 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182 /* The Ith entry is the maximum size of an object to be stored in the
183 Ith extra order. Adding a new entry to this array is the *only*
184 thing you need to do to add a new special allocation size. */
186 static const size_t extra_order_size_table[] = {
187 sizeof (struct stmt_ann_d),
188 sizeof (struct var_ann_d),
189 sizeof (struct tree_decl_non_common),
190 sizeof (struct tree_field_decl),
191 sizeof (struct tree_parm_decl),
192 sizeof (struct tree_var_decl),
193 sizeof (struct tree_list),
194 sizeof (struct tree_ssa_name),
195 sizeof (struct function),
196 sizeof (struct basic_block_def),
197 sizeof (bitmap_element),
198 /* PHI nodes with one to three arguments are already covered by the
199 above sizes. */
200 sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
201 TREE_EXP_SIZE (2),
202 RTL_SIZE (2), /* MEM, PLUS, etc. */
203 RTL_SIZE (9), /* INSN */
206 /* The total number of orders. */
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
210 /* We use this structure to determine the alignment required for
211 allocations. For power-of-two sized allocations, that's not a
212 problem, but it does matter for odd-sized allocations. */
214 struct max_alignment {
215 char c;
216 union {
217 HOST_WIDEST_INT i;
218 long double d;
219 } u;
222 /* The biggest alignment required. */
224 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
226 /* Compute the smallest nonnegative number which when added to X gives
227 a multiple of F. */
229 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
231 /* Compute the smallest multiple of F that is >= X. */
233 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
235 /* The Ith entry is the number of objects on a page or order I. */
237 static unsigned objects_per_page_table[NUM_ORDERS];
239 /* The Ith entry is the size of an object on a page of order I. */
241 static size_t object_size_table[NUM_ORDERS];
243 /* The Ith entry is a pair of numbers (mult, shift) such that
244 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
245 for all k evenly divisible by OBJECT_SIZE(I). */
247 static struct
249 size_t mult;
250 unsigned int shift;
252 inverse_table[NUM_ORDERS];
254 /* A page_entry records the status of an allocation page. This
255 structure is dynamically sized to fit the bitmap in_use_p. */
256 typedef struct page_entry
258 /* The next page-entry with objects of the same size, or NULL if
259 this is the last page-entry. */
260 struct page_entry *next;
262 /* The previous page-entry with objects of the same size, or NULL if
263 this is the first page-entry. The PREV pointer exists solely to
264 keep the cost of ggc_free manageable. */
265 struct page_entry *prev;
267 /* The number of bytes allocated. (This will always be a multiple
268 of the host system page size.) */
269 size_t bytes;
271 /* The address at which the memory is allocated. */
272 char *page;
274 #ifdef USING_MALLOC_PAGE_GROUPS
275 /* Back pointer to the page group this page came from. */
276 struct page_group *group;
277 #endif
279 /* This is the index in the by_depth varray where this page table
280 can be found. */
281 unsigned long index_by_depth;
283 /* Context depth of this page. */
284 unsigned short context_depth;
286 /* The number of free objects remaining on this page. */
287 unsigned short num_free_objects;
289 /* A likely candidate for the bit position of a free object for the
290 next allocation from this page. */
291 unsigned short next_bit_hint;
293 /* The lg of size of objects allocated from this page. */
294 unsigned char order;
296 /* A bit vector indicating whether or not objects are in use. The
297 Nth bit is one if the Nth object on this page is allocated. This
298 array is dynamically sized. */
299 unsigned long in_use_p[1];
300 } page_entry;
302 #ifdef USING_MALLOC_PAGE_GROUPS
303 /* A page_group describes a large allocation from malloc, from which
304 we parcel out aligned pages. */
305 typedef struct page_group
307 /* A linked list of all extant page groups. */
308 struct page_group *next;
310 /* The address we received from malloc. */
311 char *allocation;
313 /* The size of the block. */
314 size_t alloc_size;
316 /* A bitmask of pages in use. */
317 unsigned int in_use;
318 } page_group;
319 #endif
321 #if HOST_BITS_PER_PTR <= 32
323 /* On 32-bit hosts, we use a two level page table, as pictured above. */
324 typedef page_entry **page_table[PAGE_L1_SIZE];
326 #else
328 /* On 64-bit hosts, we use the same two level page tables plus a linked
329 list that disambiguates the top 32-bits. There will almost always be
330 exactly one entry in the list. */
331 typedef struct page_table_chain
333 struct page_table_chain *next;
334 size_t high_bits;
335 page_entry **table[PAGE_L1_SIZE];
336 } *page_table;
338 #endif
340 /* The rest of the global variables. */
341 static struct globals
343 /* The Nth element in this array is a page with objects of size 2^N.
344 If there are any pages with free objects, they will be at the
345 head of the list. NULL if there are no page-entries for this
346 object size. */
347 page_entry *pages[NUM_ORDERS];
349 /* The Nth element in this array is the last page with objects of
350 size 2^N. NULL if there are no page-entries for this object
351 size. */
352 page_entry *page_tails[NUM_ORDERS];
354 /* Lookup table for associating allocation pages with object addresses. */
355 page_table lookup;
357 /* The system's page size. */
358 size_t pagesize;
359 size_t lg_pagesize;
361 /* Bytes currently allocated. */
362 size_t allocated;
364 /* Bytes currently allocated at the end of the last collection. */
365 size_t allocated_last_gc;
367 /* Total amount of memory mapped. */
368 size_t bytes_mapped;
370 /* Bit N set if any allocations have been done at context depth N. */
371 unsigned long context_depth_allocations;
373 /* Bit N set if any collections have been done at context depth N. */
374 unsigned long context_depth_collections;
376 /* The current depth in the context stack. */
377 unsigned short context_depth;
379 /* A file descriptor open to /dev/zero for reading. */
380 #if defined (HAVE_MMAP_DEV_ZERO)
381 int dev_zero_fd;
382 #endif
384 /* A cache of free system pages. */
385 page_entry *free_pages;
387 #ifdef USING_MALLOC_PAGE_GROUPS
388 page_group *page_groups;
389 #endif
391 /* The file descriptor for debugging output. */
392 FILE *debug_file;
394 /* Current number of elements in use in depth below. */
395 unsigned int depth_in_use;
397 /* Maximum number of elements that can be used before resizing. */
398 unsigned int depth_max;
400 /* Each element of this arry is an index in by_depth where the given
401 depth starts. This structure is indexed by that given depth we
402 are interested in. */
403 unsigned int *depth;
405 /* Current number of elements in use in by_depth below. */
406 unsigned int by_depth_in_use;
408 /* Maximum number of elements that can be used before resizing. */
409 unsigned int by_depth_max;
411 /* Each element of this array is a pointer to a page_entry, all
412 page_entries can be found in here by increasing depth.
413 index_by_depth in the page_entry is the index into this data
414 structure where that page_entry can be found. This is used to
415 speed up finding all page_entries at a particular depth. */
416 page_entry **by_depth;
418 /* Each element is a pointer to the saved in_use_p bits, if any,
419 zero otherwise. We allocate them all together, to enable a
420 better runtime data access pattern. */
421 unsigned long **save_in_use;
423 #ifdef ENABLE_GC_ALWAYS_COLLECT
424 /* List of free objects to be verified as actually free on the
425 next collection. */
426 struct free_object
428 void *object;
429 struct free_object *next;
430 } *free_object_list;
431 #endif
433 #ifdef GATHER_STATISTICS
434 struct
436 /* Total memory allocated with ggc_alloc. */
437 unsigned long long total_allocated;
438 /* Total overhead for memory to be allocated with ggc_alloc. */
439 unsigned long long total_overhead;
441 /* Total allocations and overhead for sizes less than 32, 64 and 128.
442 These sizes are interesting because they are typical cache line
443 sizes. */
445 unsigned long long total_allocated_under32;
446 unsigned long long total_overhead_under32;
448 unsigned long long total_allocated_under64;
449 unsigned long long total_overhead_under64;
451 unsigned long long total_allocated_under128;
452 unsigned long long total_overhead_under128;
454 /* The allocations for each of the allocation orders. */
455 unsigned long long total_allocated_per_order[NUM_ORDERS];
457 /* The overhead for each of the allocation orders. */
458 unsigned long long total_overhead_per_order[NUM_ORDERS];
459 } stats;
460 #endif
461 } G;
463 /* The size in bytes required to maintain a bitmap for the objects
464 on a page-entry. */
465 #define BITMAP_SIZE(Num_objects) \
466 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
468 /* Allocate pages in chunks of this size, to throttle calls to memory
469 allocation routines. The first page is used, the rest go onto the
470 free list. This cannot be larger than HOST_BITS_PER_INT for the
471 in_use bitmask for page_group. Hosts that need a different value
472 can override this by defining GGC_QUIRE_SIZE explicitly. */
473 #ifndef GGC_QUIRE_SIZE
474 # ifdef USING_MMAP
475 # define GGC_QUIRE_SIZE 256
476 # else
477 # define GGC_QUIRE_SIZE 16
478 # endif
479 #endif
481 /* Initial guess as to how many page table entries we might need. */
482 #define INITIAL_PTE_COUNT 128
484 static int ggc_allocated_p (const void *);
485 static page_entry *lookup_page_table_entry (const void *);
486 static void set_page_table_entry (void *, page_entry *);
487 #ifdef USING_MMAP
488 static char *alloc_anon (char *, size_t);
489 #endif
490 #ifdef USING_MALLOC_PAGE_GROUPS
491 static size_t page_group_index (char *, char *);
492 static void set_page_group_in_use (page_group *, char *);
493 static void clear_page_group_in_use (page_group *, char *);
494 #endif
495 static struct page_entry * alloc_page (unsigned);
496 static void free_page (struct page_entry *);
497 static void release_pages (void);
498 static void clear_marks (void);
499 static void sweep_pages (void);
500 static void ggc_recalculate_in_use_p (page_entry *);
501 static void compute_inverse (unsigned);
502 static inline void adjust_depth (void);
503 static void move_ptes_to_front (int, int);
505 void debug_print_page_list (int);
506 static void push_depth (unsigned int);
507 static void push_by_depth (page_entry *, unsigned long *);
509 /* Push an entry onto G.depth. */
511 inline static void
512 push_depth (unsigned int i)
514 if (G.depth_in_use >= G.depth_max)
516 G.depth_max *= 2;
517 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
519 G.depth[G.depth_in_use++] = i;
522 /* Push an entry onto G.by_depth and G.save_in_use. */
524 inline static void
525 push_by_depth (page_entry *p, unsigned long *s)
527 if (G.by_depth_in_use >= G.by_depth_max)
529 G.by_depth_max *= 2;
530 G.by_depth = xrealloc (G.by_depth,
531 G.by_depth_max * sizeof (page_entry *));
532 G.save_in_use = xrealloc (G.save_in_use,
533 G.by_depth_max * sizeof (unsigned long *));
535 G.by_depth[G.by_depth_in_use] = p;
536 G.save_in_use[G.by_depth_in_use++] = s;
539 #if (GCC_VERSION < 3001)
540 #define prefetch(X) ((void) X)
541 #else
542 #define prefetch(X) __builtin_prefetch (X)
543 #endif
545 #define save_in_use_p_i(__i) \
546 (G.save_in_use[__i])
547 #define save_in_use_p(__p) \
548 (save_in_use_p_i (__p->index_by_depth))
550 /* Returns nonzero if P was allocated in GC'able memory. */
552 static inline int
553 ggc_allocated_p (const void *p)
555 page_entry ***base;
556 size_t L1, L2;
558 #if HOST_BITS_PER_PTR <= 32
559 base = &G.lookup[0];
560 #else
561 page_table table = G.lookup;
562 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
563 while (1)
565 if (table == NULL)
566 return 0;
567 if (table->high_bits == high_bits)
568 break;
569 table = table->next;
571 base = &table->table[0];
572 #endif
574 /* Extract the level 1 and 2 indices. */
575 L1 = LOOKUP_L1 (p);
576 L2 = LOOKUP_L2 (p);
578 return base[L1] && base[L1][L2];
581 /* Traverse the page table and find the entry for a page.
582 Die (probably) if the object wasn't allocated via GC. */
584 static inline page_entry *
585 lookup_page_table_entry (const void *p)
587 page_entry ***base;
588 size_t L1, L2;
590 #if HOST_BITS_PER_PTR <= 32
591 base = &G.lookup[0];
592 #else
593 page_table table = G.lookup;
594 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
595 while (table->high_bits != high_bits)
596 table = table->next;
597 base = &table->table[0];
598 #endif
600 /* Extract the level 1 and 2 indices. */
601 L1 = LOOKUP_L1 (p);
602 L2 = LOOKUP_L2 (p);
604 return base[L1][L2];
607 /* Set the page table entry for a page. */
609 static void
610 set_page_table_entry (void *p, page_entry *entry)
612 page_entry ***base;
613 size_t L1, L2;
615 #if HOST_BITS_PER_PTR <= 32
616 base = &G.lookup[0];
617 #else
618 page_table table;
619 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
620 for (table = G.lookup; table; table = table->next)
621 if (table->high_bits == high_bits)
622 goto found;
624 /* Not found -- allocate a new table. */
625 table = xcalloc (1, sizeof(*table));
626 table->next = G.lookup;
627 table->high_bits = high_bits;
628 G.lookup = table;
629 found:
630 base = &table->table[0];
631 #endif
633 /* Extract the level 1 and 2 indices. */
634 L1 = LOOKUP_L1 (p);
635 L2 = LOOKUP_L2 (p);
637 if (base[L1] == NULL)
638 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
640 base[L1][L2] = entry;
643 /* Prints the page-entry for object size ORDER, for debugging. */
645 void
646 debug_print_page_list (int order)
648 page_entry *p;
649 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
650 (void *) G.page_tails[order]);
651 p = G.pages[order];
652 while (p != NULL)
654 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
655 p->num_free_objects);
656 p = p->next;
658 printf ("NULL\n");
659 fflush (stdout);
662 #ifdef USING_MMAP
663 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
664 (if non-null). The ifdef structure here is intended to cause a
665 compile error unless exactly one of the HAVE_* is defined. */
667 static inline char *
668 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
670 #ifdef HAVE_MMAP_ANON
671 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
672 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
673 #endif
674 #ifdef HAVE_MMAP_DEV_ZERO
675 char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
676 MAP_PRIVATE, G.dev_zero_fd, 0);
677 #endif
679 if (page == (char *) MAP_FAILED)
681 perror ("virtual memory exhausted");
682 exit (FATAL_EXIT_CODE);
685 /* Remember that we allocated this memory. */
686 G.bytes_mapped += size;
688 /* Pretend we don't have access to the allocated pages. We'll enable
689 access to smaller pieces of the area in ggc_alloc. Discard the
690 handle to avoid handle leak. */
691 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
693 return page;
695 #endif
696 #ifdef USING_MALLOC_PAGE_GROUPS
697 /* Compute the index for this page into the page group. */
699 static inline size_t
700 page_group_index (char *allocation, char *page)
702 return (size_t) (page - allocation) >> G.lg_pagesize;
705 /* Set and clear the in_use bit for this page in the page group. */
707 static inline void
708 set_page_group_in_use (page_group *group, char *page)
710 group->in_use |= 1 << page_group_index (group->allocation, page);
713 static inline void
714 clear_page_group_in_use (page_group *group, char *page)
716 group->in_use &= ~(1 << page_group_index (group->allocation, page));
718 #endif
720 /* Allocate a new page for allocating objects of size 2^ORDER,
721 and return an entry for it. The entry is not added to the
722 appropriate page_table list. */
724 static inline struct page_entry *
725 alloc_page (unsigned order)
727 struct page_entry *entry, *p, **pp;
728 char *page;
729 size_t num_objects;
730 size_t bitmap_size;
731 size_t page_entry_size;
732 size_t entry_size;
733 #ifdef USING_MALLOC_PAGE_GROUPS
734 page_group *group;
735 #endif
737 num_objects = OBJECTS_PER_PAGE (order);
738 bitmap_size = BITMAP_SIZE (num_objects + 1);
739 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
740 entry_size = num_objects * OBJECT_SIZE (order);
741 if (entry_size < G.pagesize)
742 entry_size = G.pagesize;
744 entry = NULL;
745 page = NULL;
747 /* Check the list of free pages for one we can use. */
748 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
749 if (p->bytes == entry_size)
750 break;
752 if (p != NULL)
754 /* Recycle the allocated memory from this page ... */
755 *pp = p->next;
756 page = p->page;
758 #ifdef USING_MALLOC_PAGE_GROUPS
759 group = p->group;
760 #endif
762 /* ... and, if possible, the page entry itself. */
763 if (p->order == order)
765 entry = p;
766 memset (entry, 0, page_entry_size);
768 else
769 free (p);
771 #ifdef USING_MMAP
772 else if (entry_size == G.pagesize)
774 /* We want just one page. Allocate a bunch of them and put the
775 extras on the freelist. (Can only do this optimization with
776 mmap for backing store.) */
777 struct page_entry *e, *f = G.free_pages;
778 int i;
780 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
782 /* This loop counts down so that the chain will be in ascending
783 memory order. */
784 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
786 e = xcalloc (1, page_entry_size);
787 e->order = order;
788 e->bytes = G.pagesize;
789 e->page = page + (i << G.lg_pagesize);
790 e->next = f;
791 f = e;
794 G.free_pages = f;
796 else
797 page = alloc_anon (NULL, entry_size);
798 #endif
799 #ifdef USING_MALLOC_PAGE_GROUPS
800 else
802 /* Allocate a large block of memory and serve out the aligned
803 pages therein. This results in much less memory wastage
804 than the traditional implementation of valloc. */
806 char *allocation, *a, *enda;
807 size_t alloc_size, head_slop, tail_slop;
808 int multiple_pages = (entry_size == G.pagesize);
810 if (multiple_pages)
811 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
812 else
813 alloc_size = entry_size + G.pagesize - 1;
814 allocation = xmalloc (alloc_size);
816 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
817 head_slop = page - allocation;
818 if (multiple_pages)
819 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
820 else
821 tail_slop = alloc_size - entry_size - head_slop;
822 enda = allocation + alloc_size - tail_slop;
824 /* We allocated N pages, which are likely not aligned, leaving
825 us with N-1 usable pages. We plan to place the page_group
826 structure somewhere in the slop. */
827 if (head_slop >= sizeof (page_group))
828 group = (page_group *)page - 1;
829 else
831 /* We magically got an aligned allocation. Too bad, we have
832 to waste a page anyway. */
833 if (tail_slop == 0)
835 enda -= G.pagesize;
836 tail_slop += G.pagesize;
838 gcc_assert (tail_slop >= sizeof (page_group));
839 group = (page_group *)enda;
840 tail_slop -= sizeof (page_group);
843 /* Remember that we allocated this memory. */
844 group->next = G.page_groups;
845 group->allocation = allocation;
846 group->alloc_size = alloc_size;
847 group->in_use = 0;
848 G.page_groups = group;
849 G.bytes_mapped += alloc_size;
851 /* If we allocated multiple pages, put the rest on the free list. */
852 if (multiple_pages)
854 struct page_entry *e, *f = G.free_pages;
855 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
857 e = xcalloc (1, page_entry_size);
858 e->order = order;
859 e->bytes = G.pagesize;
860 e->page = a;
861 e->group = group;
862 e->next = f;
863 f = e;
865 G.free_pages = f;
868 #endif
870 if (entry == NULL)
871 entry = xcalloc (1, page_entry_size);
873 entry->bytes = entry_size;
874 entry->page = page;
875 entry->context_depth = G.context_depth;
876 entry->order = order;
877 entry->num_free_objects = num_objects;
878 entry->next_bit_hint = 1;
880 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
882 #ifdef USING_MALLOC_PAGE_GROUPS
883 entry->group = group;
884 set_page_group_in_use (group, page);
885 #endif
887 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
888 increment the hint. */
889 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
890 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
892 set_page_table_entry (page, entry);
894 if (GGC_DEBUG_LEVEL >= 2)
895 fprintf (G.debug_file,
896 "Allocating page at %p, object size=%lu, data %p-%p\n",
897 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
898 page + entry_size - 1);
900 return entry;
903 /* Adjust the size of G.depth so that no index greater than the one
904 used by the top of the G.by_depth is used. */
906 static inline void
907 adjust_depth (void)
909 page_entry *top;
911 if (G.by_depth_in_use)
913 top = G.by_depth[G.by_depth_in_use-1];
915 /* Peel back indices in depth that index into by_depth, so that
916 as new elements are added to by_depth, we note the indices
917 of those elements, if they are for new context depths. */
918 while (G.depth_in_use > (size_t)top->context_depth+1)
919 --G.depth_in_use;
923 /* For a page that is no longer needed, put it on the free page list. */
925 static void
926 free_page (page_entry *entry)
928 if (GGC_DEBUG_LEVEL >= 2)
929 fprintf (G.debug_file,
930 "Deallocating page at %p, data %p-%p\n", (void *) entry,
931 entry->page, entry->page + entry->bytes - 1);
933 /* Mark the page as inaccessible. Discard the handle to avoid handle
934 leak. */
935 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
937 set_page_table_entry (entry->page, NULL);
939 #ifdef USING_MALLOC_PAGE_GROUPS
940 clear_page_group_in_use (entry->group, entry->page);
941 #endif
943 if (G.by_depth_in_use > 1)
945 page_entry *top = G.by_depth[G.by_depth_in_use-1];
946 int i = entry->index_by_depth;
948 /* We cannot free a page from a context deeper than the current
949 one. */
950 gcc_assert (entry->context_depth == top->context_depth);
952 /* Put top element into freed slot. */
953 G.by_depth[i] = top;
954 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
955 top->index_by_depth = i;
957 --G.by_depth_in_use;
959 adjust_depth ();
961 entry->next = G.free_pages;
962 G.free_pages = entry;
965 /* Release the free page cache to the system. */
967 static void
968 release_pages (void)
970 #ifdef USING_MMAP
971 page_entry *p, *next;
972 char *start;
973 size_t len;
975 /* Gather up adjacent pages so they are unmapped together. */
976 p = G.free_pages;
978 while (p)
980 start = p->page;
981 next = p->next;
982 len = p->bytes;
983 free (p);
984 p = next;
986 while (p && p->page == start + len)
988 next = p->next;
989 len += p->bytes;
990 free (p);
991 p = next;
994 munmap (start, len);
995 G.bytes_mapped -= len;
998 G.free_pages = NULL;
999 #endif
1000 #ifdef USING_MALLOC_PAGE_GROUPS
1001 page_entry **pp, *p;
1002 page_group **gp, *g;
1004 /* Remove all pages from free page groups from the list. */
1005 pp = &G.free_pages;
1006 while ((p = *pp) != NULL)
1007 if (p->group->in_use == 0)
1009 *pp = p->next;
1010 free (p);
1012 else
1013 pp = &p->next;
1015 /* Remove all free page groups, and release the storage. */
1016 gp = &G.page_groups;
1017 while ((g = *gp) != NULL)
1018 if (g->in_use == 0)
1020 *gp = g->next;
1021 G.bytes_mapped -= g->alloc_size;
1022 free (g->allocation);
1024 else
1025 gp = &g->next;
1026 #endif
1029 /* This table provides a fast way to determine ceil(log_2(size)) for
1030 allocation requests. The minimum allocation size is eight bytes. */
1031 #define NUM_SIZE_LOOKUP 512
1032 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1034 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1035 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1036 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1037 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1038 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1039 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1040 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1041 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1042 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1043 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1044 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1045 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1047 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1048 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1049 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1050 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1063 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1064 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1065 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1068 /* Typed allocation function. Does nothing special in this collector. */
1070 void *
1071 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1072 MEM_STAT_DECL)
1074 return ggc_alloc_stat (size PASS_MEM_STAT);
1077 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1079 void *
1080 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1082 size_t order, word, bit, object_offset, object_size;
1083 struct page_entry *entry;
1084 void *result;
1086 if (size < NUM_SIZE_LOOKUP)
1088 order = size_lookup[size];
1089 object_size = OBJECT_SIZE (order);
1091 else
1093 order = 10;
1094 while (size > (object_size = OBJECT_SIZE (order)))
1095 order++;
1098 /* If there are non-full pages for this size allocation, they are at
1099 the head of the list. */
1100 entry = G.pages[order];
1102 /* If there is no page for this object size, or all pages in this
1103 context are full, allocate a new page. */
1104 if (entry == NULL || entry->num_free_objects == 0)
1106 struct page_entry *new_entry;
1107 new_entry = alloc_page (order);
1109 new_entry->index_by_depth = G.by_depth_in_use;
1110 push_by_depth (new_entry, 0);
1112 /* We can skip context depths, if we do, make sure we go all the
1113 way to the new depth. */
1114 while (new_entry->context_depth >= G.depth_in_use)
1115 push_depth (G.by_depth_in_use-1);
1117 /* If this is the only entry, it's also the tail. If it is not
1118 the only entry, then we must update the PREV pointer of the
1119 ENTRY (G.pages[order]) to point to our new page entry. */
1120 if (entry == NULL)
1121 G.page_tails[order] = new_entry;
1122 else
1123 entry->prev = new_entry;
1125 /* Put new pages at the head of the page list. By definition the
1126 entry at the head of the list always has a NULL pointer. */
1127 new_entry->next = entry;
1128 new_entry->prev = NULL;
1129 entry = new_entry;
1130 G.pages[order] = new_entry;
1132 /* For a new page, we know the word and bit positions (in the
1133 in_use bitmap) of the first available object -- they're zero. */
1134 new_entry->next_bit_hint = 1;
1135 word = 0;
1136 bit = 0;
1137 object_offset = 0;
1139 else
1141 /* First try to use the hint left from the previous allocation
1142 to locate a clear bit in the in-use bitmap. We've made sure
1143 that the one-past-the-end bit is always set, so if the hint
1144 has run over, this test will fail. */
1145 unsigned hint = entry->next_bit_hint;
1146 word = hint / HOST_BITS_PER_LONG;
1147 bit = hint % HOST_BITS_PER_LONG;
1149 /* If the hint didn't work, scan the bitmap from the beginning. */
1150 if ((entry->in_use_p[word] >> bit) & 1)
1152 word = bit = 0;
1153 while (~entry->in_use_p[word] == 0)
1154 ++word;
1156 #if GCC_VERSION >= 3004
1157 bit = __builtin_ctzl (~entry->in_use_p[word]);
1158 #else
1159 while ((entry->in_use_p[word] >> bit) & 1)
1160 ++bit;
1161 #endif
1163 hint = word * HOST_BITS_PER_LONG + bit;
1166 /* Next time, try the next bit. */
1167 entry->next_bit_hint = hint + 1;
1169 object_offset = hint * object_size;
1172 /* Set the in-use bit. */
1173 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1175 /* Keep a running total of the number of free objects. If this page
1176 fills up, we may have to move it to the end of the list if the
1177 next page isn't full. If the next page is full, all subsequent
1178 pages are full, so there's no need to move it. */
1179 if (--entry->num_free_objects == 0
1180 && entry->next != NULL
1181 && entry->next->num_free_objects > 0)
1183 /* We have a new head for the list. */
1184 G.pages[order] = entry->next;
1186 /* We are moving ENTRY to the end of the page table list.
1187 The new page at the head of the list will have NULL in
1188 its PREV field and ENTRY will have NULL in its NEXT field. */
1189 entry->next->prev = NULL;
1190 entry->next = NULL;
1192 /* Append ENTRY to the tail of the list. */
1193 entry->prev = G.page_tails[order];
1194 G.page_tails[order]->next = entry;
1195 G.page_tails[order] = entry;
1198 /* Calculate the object's address. */
1199 result = entry->page + object_offset;
1200 #ifdef GATHER_STATISTICS
1201 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1202 result PASS_MEM_STAT);
1203 #endif
1205 #ifdef ENABLE_GC_CHECKING
1206 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1207 exact same semantics in presence of memory bugs, regardless of
1208 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1209 handle to avoid handle leak. */
1210 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1212 /* `Poison' the entire allocated object, including any padding at
1213 the end. */
1214 memset (result, 0xaf, object_size);
1216 /* Make the bytes after the end of the object unaccessible. Discard the
1217 handle to avoid handle leak. */
1218 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1219 object_size - size));
1220 #endif
1222 /* Tell Valgrind that the memory is there, but its content isn't
1223 defined. The bytes at the end of the object are still marked
1224 unaccessible. */
1225 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1227 /* Keep track of how many bytes are being allocated. This
1228 information is used in deciding when to collect. */
1229 G.allocated += object_size;
1231 /* For timevar statistics. */
1232 timevar_ggc_mem_total += object_size;
1234 #ifdef GATHER_STATISTICS
1236 size_t overhead = object_size - size;
1238 G.stats.total_overhead += overhead;
1239 G.stats.total_allocated += object_size;
1240 G.stats.total_overhead_per_order[order] += overhead;
1241 G.stats.total_allocated_per_order[order] += object_size;
1243 if (size <= 32)
1245 G.stats.total_overhead_under32 += overhead;
1246 G.stats.total_allocated_under32 += object_size;
1248 if (size <= 64)
1250 G.stats.total_overhead_under64 += overhead;
1251 G.stats.total_allocated_under64 += object_size;
1253 if (size <= 128)
1255 G.stats.total_overhead_under128 += overhead;
1256 G.stats.total_allocated_under128 += object_size;
1259 #endif
1261 if (GGC_DEBUG_LEVEL >= 3)
1262 fprintf (G.debug_file,
1263 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1264 (unsigned long) size, (unsigned long) object_size, result,
1265 (void *) entry);
1267 return result;
1270 /* If P is not marked, marks it and return false. Otherwise return true.
1271 P must have been allocated by the GC allocator; it mustn't point to
1272 static objects, stack variables, or memory allocated with malloc. */
1275 ggc_set_mark (const void *p)
1277 page_entry *entry;
1278 unsigned bit, word;
1279 unsigned long mask;
1281 /* Look up the page on which the object is alloced. If the object
1282 wasn't allocated by the collector, we'll probably die. */
1283 entry = lookup_page_table_entry (p);
1284 gcc_assert (entry);
1286 /* Calculate the index of the object on the page; this is its bit
1287 position in the in_use_p bitmap. */
1288 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1289 word = bit / HOST_BITS_PER_LONG;
1290 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1292 /* If the bit was previously set, skip it. */
1293 if (entry->in_use_p[word] & mask)
1294 return 1;
1296 /* Otherwise set it, and decrement the free object count. */
1297 entry->in_use_p[word] |= mask;
1298 entry->num_free_objects -= 1;
1300 if (GGC_DEBUG_LEVEL >= 4)
1301 fprintf (G.debug_file, "Marking %p\n", p);
1303 return 0;
1306 /* Return 1 if P has been marked, zero otherwise.
1307 P must have been allocated by the GC allocator; it mustn't point to
1308 static objects, stack variables, or memory allocated with malloc. */
1311 ggc_marked_p (const void *p)
1313 page_entry *entry;
1314 unsigned bit, word;
1315 unsigned long mask;
1317 /* Look up the page on which the object is alloced. If the object
1318 wasn't allocated by the collector, we'll probably die. */
1319 entry = lookup_page_table_entry (p);
1320 gcc_assert (entry);
1322 /* Calculate the index of the object on the page; this is its bit
1323 position in the in_use_p bitmap. */
1324 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1325 word = bit / HOST_BITS_PER_LONG;
1326 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1328 return (entry->in_use_p[word] & mask) != 0;
1331 /* Return the size of the gc-able object P. */
1333 size_t
1334 ggc_get_size (const void *p)
1336 page_entry *pe = lookup_page_table_entry (p);
1337 return OBJECT_SIZE (pe->order);
1340 /* Release the memory for object P. */
1342 void
1343 ggc_free (void *p)
1345 page_entry *pe = lookup_page_table_entry (p);
1346 size_t order = pe->order;
1347 size_t size = OBJECT_SIZE (order);
1349 #ifdef GATHER_STATISTICS
1350 ggc_free_overhead (p);
1351 #endif
1353 if (GGC_DEBUG_LEVEL >= 3)
1354 fprintf (G.debug_file,
1355 "Freeing object, actual size=%lu, at %p on %p\n",
1356 (unsigned long) size, p, (void *) pe);
1358 #ifdef ENABLE_GC_CHECKING
1359 /* Poison the data, to indicate the data is garbage. */
1360 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1361 memset (p, 0xa5, size);
1362 #endif
1363 /* Let valgrind know the object is free. */
1364 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1366 #ifdef ENABLE_GC_ALWAYS_COLLECT
1367 /* In the completely-anal-checking mode, we do *not* immediately free
1368 the data, but instead verify that the data is *actually* not
1369 reachable the next time we collect. */
1371 struct free_object *fo = XNEW (struct free_object);
1372 fo->object = p;
1373 fo->next = G.free_object_list;
1374 G.free_object_list = fo;
1376 #else
1378 unsigned int bit_offset, word, bit;
1380 G.allocated -= size;
1382 /* Mark the object not-in-use. */
1383 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1384 word = bit_offset / HOST_BITS_PER_LONG;
1385 bit = bit_offset % HOST_BITS_PER_LONG;
1386 pe->in_use_p[word] &= ~(1UL << bit);
1388 if (pe->num_free_objects++ == 0)
1390 page_entry *p, *q;
1392 /* If the page is completely full, then it's supposed to
1393 be after all pages that aren't. Since we've freed one
1394 object from a page that was full, we need to move the
1395 page to the head of the list.
1397 PE is the node we want to move. Q is the previous node
1398 and P is the next node in the list. */
1399 q = pe->prev;
1400 if (q && q->num_free_objects == 0)
1402 p = pe->next;
1404 q->next = p;
1406 /* If PE was at the end of the list, then Q becomes the
1407 new end of the list. If PE was not the end of the
1408 list, then we need to update the PREV field for P. */
1409 if (!p)
1410 G.page_tails[order] = q;
1411 else
1412 p->prev = q;
1414 /* Move PE to the head of the list. */
1415 pe->next = G.pages[order];
1416 pe->prev = NULL;
1417 G.pages[order]->prev = pe;
1418 G.pages[order] = pe;
1421 /* Reset the hint bit to point to the only free object. */
1422 pe->next_bit_hint = bit_offset;
1425 #endif
1428 /* Subroutine of init_ggc which computes the pair of numbers used to
1429 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1431 This algorithm is taken from Granlund and Montgomery's paper
1432 "Division by Invariant Integers using Multiplication"
1433 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1434 constants). */
1436 static void
1437 compute_inverse (unsigned order)
1439 size_t size, inv;
1440 unsigned int e;
1442 size = OBJECT_SIZE (order);
1443 e = 0;
1444 while (size % 2 == 0)
1446 e++;
1447 size >>= 1;
1450 inv = size;
1451 while (inv * size != 1)
1452 inv = inv * (2 - inv*size);
1454 DIV_MULT (order) = inv;
1455 DIV_SHIFT (order) = e;
1458 /* Initialize the ggc-mmap allocator. */
1459 void
1460 init_ggc (void)
1462 unsigned order;
1464 G.pagesize = getpagesize();
1465 G.lg_pagesize = exact_log2 (G.pagesize);
1467 #ifdef HAVE_MMAP_DEV_ZERO
1468 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1469 if (G.dev_zero_fd == -1)
1470 internal_error ("open /dev/zero: %m");
1471 #endif
1473 #if 0
1474 G.debug_file = fopen ("ggc-mmap.debug", "w");
1475 #else
1476 G.debug_file = stdout;
1477 #endif
1479 #ifdef USING_MMAP
1480 /* StunOS has an amazing off-by-one error for the first mmap allocation
1481 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1482 believe, is an unaligned page allocation, which would cause us to
1483 hork badly if we tried to use it. */
1485 char *p = alloc_anon (NULL, G.pagesize);
1486 struct page_entry *e;
1487 if ((size_t)p & (G.pagesize - 1))
1489 /* How losing. Discard this one and try another. If we still
1490 can't get something useful, give up. */
1492 p = alloc_anon (NULL, G.pagesize);
1493 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1496 /* We have a good page, might as well hold onto it... */
1497 e = XCNEW (struct page_entry);
1498 e->bytes = G.pagesize;
1499 e->page = p;
1500 e->next = G.free_pages;
1501 G.free_pages = e;
1503 #endif
1505 /* Initialize the object size table. */
1506 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1507 object_size_table[order] = (size_t) 1 << order;
1508 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1510 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1512 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1513 so that we're sure of getting aligned memory. */
1514 s = ROUND_UP (s, MAX_ALIGNMENT);
1515 object_size_table[order] = s;
1518 /* Initialize the objects-per-page and inverse tables. */
1519 for (order = 0; order < NUM_ORDERS; ++order)
1521 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1522 if (objects_per_page_table[order] == 0)
1523 objects_per_page_table[order] = 1;
1524 compute_inverse (order);
1527 /* Reset the size_lookup array to put appropriately sized objects in
1528 the special orders. All objects bigger than the previous power
1529 of two, but no greater than the special size, should go in the
1530 new order. */
1531 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1533 int o;
1534 int i;
1536 i = OBJECT_SIZE (order);
1537 if (i >= NUM_SIZE_LOOKUP)
1538 continue;
1540 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1541 size_lookup[i] = order;
1544 G.depth_in_use = 0;
1545 G.depth_max = 10;
1546 G.depth = XNEWVEC (unsigned int, G.depth_max);
1548 G.by_depth_in_use = 0;
1549 G.by_depth_max = INITIAL_PTE_COUNT;
1550 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1551 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1554 /* Start a new GGC zone. */
1556 struct alloc_zone *
1557 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1559 return NULL;
1562 /* Destroy a GGC zone. */
1563 void
1564 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1568 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1569 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1571 static void
1572 ggc_recalculate_in_use_p (page_entry *p)
1574 unsigned int i;
1575 size_t num_objects;
1577 /* Because the past-the-end bit in in_use_p is always set, we
1578 pretend there is one additional object. */
1579 num_objects = OBJECTS_IN_PAGE (p) + 1;
1581 /* Reset the free object count. */
1582 p->num_free_objects = num_objects;
1584 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1585 for (i = 0;
1586 i < CEIL (BITMAP_SIZE (num_objects),
1587 sizeof (*p->in_use_p));
1588 ++i)
1590 unsigned long j;
1592 /* Something is in use if it is marked, or if it was in use in a
1593 context further down the context stack. */
1594 p->in_use_p[i] |= save_in_use_p (p)[i];
1596 /* Decrement the free object count for every object allocated. */
1597 for (j = p->in_use_p[i]; j; j >>= 1)
1598 p->num_free_objects -= (j & 1);
1601 gcc_assert (p->num_free_objects < num_objects);
1604 /* Unmark all objects. */
1606 static void
1607 clear_marks (void)
1609 unsigned order;
1611 for (order = 2; order < NUM_ORDERS; order++)
1613 page_entry *p;
1615 for (p = G.pages[order]; p != NULL; p = p->next)
1617 size_t num_objects = OBJECTS_IN_PAGE (p);
1618 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1620 /* The data should be page-aligned. */
1621 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1623 /* Pages that aren't in the topmost context are not collected;
1624 nevertheless, we need their in-use bit vectors to store GC
1625 marks. So, back them up first. */
1626 if (p->context_depth < G.context_depth)
1628 if (! save_in_use_p (p))
1629 save_in_use_p (p) = xmalloc (bitmap_size);
1630 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1633 /* Reset reset the number of free objects and clear the
1634 in-use bits. These will be adjusted by mark_obj. */
1635 p->num_free_objects = num_objects;
1636 memset (p->in_use_p, 0, bitmap_size);
1638 /* Make sure the one-past-the-end bit is always set. */
1639 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1640 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1645 /* Free all empty pages. Partially empty pages need no attention
1646 because the `mark' bit doubles as an `unused' bit. */
1648 static void
1649 sweep_pages (void)
1651 unsigned order;
1653 for (order = 2; order < NUM_ORDERS; order++)
1655 /* The last page-entry to consider, regardless of entries
1656 placed at the end of the list. */
1657 page_entry * const last = G.page_tails[order];
1659 size_t num_objects;
1660 size_t live_objects;
1661 page_entry *p, *previous;
1662 int done;
1664 p = G.pages[order];
1665 if (p == NULL)
1666 continue;
1668 previous = NULL;
1671 page_entry *next = p->next;
1673 /* Loop until all entries have been examined. */
1674 done = (p == last);
1676 num_objects = OBJECTS_IN_PAGE (p);
1678 /* Add all live objects on this page to the count of
1679 allocated memory. */
1680 live_objects = num_objects - p->num_free_objects;
1682 G.allocated += OBJECT_SIZE (order) * live_objects;
1684 /* Only objects on pages in the topmost context should get
1685 collected. */
1686 if (p->context_depth < G.context_depth)
1689 /* Remove the page if it's empty. */
1690 else if (live_objects == 0)
1692 /* If P was the first page in the list, then NEXT
1693 becomes the new first page in the list, otherwise
1694 splice P out of the forward pointers. */
1695 if (! previous)
1696 G.pages[order] = next;
1697 else
1698 previous->next = next;
1700 /* Splice P out of the back pointers too. */
1701 if (next)
1702 next->prev = previous;
1704 /* Are we removing the last element? */
1705 if (p == G.page_tails[order])
1706 G.page_tails[order] = previous;
1707 free_page (p);
1708 p = previous;
1711 /* If the page is full, move it to the end. */
1712 else if (p->num_free_objects == 0)
1714 /* Don't move it if it's already at the end. */
1715 if (p != G.page_tails[order])
1717 /* Move p to the end of the list. */
1718 p->next = NULL;
1719 p->prev = G.page_tails[order];
1720 G.page_tails[order]->next = p;
1722 /* Update the tail pointer... */
1723 G.page_tails[order] = p;
1725 /* ... and the head pointer, if necessary. */
1726 if (! previous)
1727 G.pages[order] = next;
1728 else
1729 previous->next = next;
1731 /* And update the backpointer in NEXT if necessary. */
1732 if (next)
1733 next->prev = previous;
1735 p = previous;
1739 /* If we've fallen through to here, it's a page in the
1740 topmost context that is neither full nor empty. Such a
1741 page must precede pages at lesser context depth in the
1742 list, so move it to the head. */
1743 else if (p != G.pages[order])
1745 previous->next = p->next;
1747 /* Update the backchain in the next node if it exists. */
1748 if (p->next)
1749 p->next->prev = previous;
1751 /* Move P to the head of the list. */
1752 p->next = G.pages[order];
1753 p->prev = NULL;
1754 G.pages[order]->prev = p;
1756 /* Update the head pointer. */
1757 G.pages[order] = p;
1759 /* Are we moving the last element? */
1760 if (G.page_tails[order] == p)
1761 G.page_tails[order] = previous;
1762 p = previous;
1765 previous = p;
1766 p = next;
1768 while (! done);
1770 /* Now, restore the in_use_p vectors for any pages from contexts
1771 other than the current one. */
1772 for (p = G.pages[order]; p; p = p->next)
1773 if (p->context_depth != G.context_depth)
1774 ggc_recalculate_in_use_p (p);
1778 #ifdef ENABLE_GC_CHECKING
1779 /* Clobber all free objects. */
1781 static void
1782 poison_pages (void)
1784 unsigned order;
1786 for (order = 2; order < NUM_ORDERS; order++)
1788 size_t size = OBJECT_SIZE (order);
1789 page_entry *p;
1791 for (p = G.pages[order]; p != NULL; p = p->next)
1793 size_t num_objects;
1794 size_t i;
1796 if (p->context_depth != G.context_depth)
1797 /* Since we don't do any collection for pages in pushed
1798 contexts, there's no need to do any poisoning. And
1799 besides, the IN_USE_P array isn't valid until we pop
1800 contexts. */
1801 continue;
1803 num_objects = OBJECTS_IN_PAGE (p);
1804 for (i = 0; i < num_objects; i++)
1806 size_t word, bit;
1807 word = i / HOST_BITS_PER_LONG;
1808 bit = i % HOST_BITS_PER_LONG;
1809 if (((p->in_use_p[word] >> bit) & 1) == 0)
1811 char *object = p->page + i * size;
1813 /* Keep poison-by-write when we expect to use Valgrind,
1814 so the exact same memory semantics is kept, in case
1815 there are memory errors. We override this request
1816 below. */
1817 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1818 memset (object, 0xa5, size);
1820 /* Drop the handle to avoid handle leak. */
1821 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1827 #else
1828 #define poison_pages()
1829 #endif
1831 #ifdef ENABLE_GC_ALWAYS_COLLECT
1832 /* Validate that the reportedly free objects actually are. */
1834 static void
1835 validate_free_objects (void)
1837 struct free_object *f, *next, *still_free = NULL;
1839 for (f = G.free_object_list; f ; f = next)
1841 page_entry *pe = lookup_page_table_entry (f->object);
1842 size_t bit, word;
1844 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1845 word = bit / HOST_BITS_PER_LONG;
1846 bit = bit % HOST_BITS_PER_LONG;
1847 next = f->next;
1849 /* Make certain it isn't visible from any root. Notice that we
1850 do this check before sweep_pages merges save_in_use_p. */
1851 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1853 /* If the object comes from an outer context, then retain the
1854 free_object entry, so that we can verify that the address
1855 isn't live on the stack in some outer context. */
1856 if (pe->context_depth != G.context_depth)
1858 f->next = still_free;
1859 still_free = f;
1861 else
1862 free (f);
1865 G.free_object_list = still_free;
1867 #else
1868 #define validate_free_objects()
1869 #endif
1871 /* Top level mark-and-sweep routine. */
1873 void
1874 ggc_collect (void)
1876 /* Avoid frequent unnecessary work by skipping collection if the
1877 total allocations haven't expanded much since the last
1878 collection. */
1879 float allocated_last_gc =
1880 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1882 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1884 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1885 return;
1887 timevar_push (TV_GC);
1888 if (!quiet_flag)
1889 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1890 if (GGC_DEBUG_LEVEL >= 2)
1891 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1893 /* Zero the total allocated bytes. This will be recalculated in the
1894 sweep phase. */
1895 G.allocated = 0;
1897 /* Release the pages we freed the last time we collected, but didn't
1898 reuse in the interim. */
1899 release_pages ();
1901 /* Indicate that we've seen collections at this context depth. */
1902 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1904 clear_marks ();
1905 ggc_mark_roots ();
1906 #ifdef GATHER_STATISTICS
1907 ggc_prune_overhead_list ();
1908 #endif
1909 poison_pages ();
1910 validate_free_objects ();
1911 sweep_pages ();
1913 G.allocated_last_gc = G.allocated;
1915 timevar_pop (TV_GC);
1917 if (!quiet_flag)
1918 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1919 if (GGC_DEBUG_LEVEL >= 2)
1920 fprintf (G.debug_file, "END COLLECTING\n");
1923 /* Print allocation statistics. */
1924 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1925 ? (x) \
1926 : ((x) < 1024*1024*10 \
1927 ? (x) / 1024 \
1928 : (x) / (1024*1024))))
1929 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1931 void
1932 ggc_print_statistics (void)
1934 struct ggc_statistics stats;
1935 unsigned int i;
1936 size_t total_overhead = 0;
1938 /* Clear the statistics. */
1939 memset (&stats, 0, sizeof (stats));
1941 /* Make sure collection will really occur. */
1942 G.allocated_last_gc = 0;
1944 /* Collect and print the statistics common across collectors. */
1945 ggc_print_common_statistics (stderr, &stats);
1947 /* Release free pages so that we will not count the bytes allocated
1948 there as part of the total allocated memory. */
1949 release_pages ();
1951 /* Collect some information about the various sizes of
1952 allocation. */
1953 fprintf (stderr,
1954 "Memory still allocated at the end of the compilation process\n");
1955 fprintf (stderr, "%-5s %10s %10s %10s\n",
1956 "Size", "Allocated", "Used", "Overhead");
1957 for (i = 0; i < NUM_ORDERS; ++i)
1959 page_entry *p;
1960 size_t allocated;
1961 size_t in_use;
1962 size_t overhead;
1964 /* Skip empty entries. */
1965 if (!G.pages[i])
1966 continue;
1968 overhead = allocated = in_use = 0;
1970 /* Figure out the total number of bytes allocated for objects of
1971 this size, and how many of them are actually in use. Also figure
1972 out how much memory the page table is using. */
1973 for (p = G.pages[i]; p; p = p->next)
1975 allocated += p->bytes;
1976 in_use +=
1977 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1979 overhead += (sizeof (page_entry) - sizeof (long)
1980 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1982 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1983 (unsigned long) OBJECT_SIZE (i),
1984 SCALE (allocated), STAT_LABEL (allocated),
1985 SCALE (in_use), STAT_LABEL (in_use),
1986 SCALE (overhead), STAT_LABEL (overhead));
1987 total_overhead += overhead;
1989 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1990 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
1991 SCALE (G.allocated), STAT_LABEL(G.allocated),
1992 SCALE (total_overhead), STAT_LABEL (total_overhead));
1994 #ifdef GATHER_STATISTICS
1996 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1998 fprintf (stderr, "Total Overhead: %10lld\n",
1999 G.stats.total_overhead);
2000 fprintf (stderr, "Total Allocated: %10lld\n",
2001 G.stats.total_allocated);
2003 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2004 G.stats.total_overhead_under32);
2005 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2006 G.stats.total_allocated_under32);
2007 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2008 G.stats.total_overhead_under64);
2009 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2010 G.stats.total_allocated_under64);
2011 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2012 G.stats.total_overhead_under128);
2013 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2014 G.stats.total_allocated_under128);
2016 for (i = 0; i < NUM_ORDERS; i++)
2017 if (G.stats.total_allocated_per_order[i])
2019 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2020 (unsigned long) OBJECT_SIZE (i),
2021 G.stats.total_overhead_per_order[i]);
2022 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2023 (unsigned long) OBJECT_SIZE (i),
2024 G.stats.total_allocated_per_order[i]);
2027 #endif
2030 struct ggc_pch_data
2032 struct ggc_pch_ondisk
2034 unsigned totals[NUM_ORDERS];
2035 } d;
2036 size_t base[NUM_ORDERS];
2037 size_t written[NUM_ORDERS];
2040 struct ggc_pch_data *
2041 init_ggc_pch (void)
2043 return XCNEW (struct ggc_pch_data);
2046 void
2047 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2048 size_t size, bool is_string ATTRIBUTE_UNUSED,
2049 enum gt_types_enum type ATTRIBUTE_UNUSED)
2051 unsigned order;
2053 if (size < NUM_SIZE_LOOKUP)
2054 order = size_lookup[size];
2055 else
2057 order = 10;
2058 while (size > OBJECT_SIZE (order))
2059 order++;
2062 d->d.totals[order]++;
2065 size_t
2066 ggc_pch_total_size (struct ggc_pch_data *d)
2068 size_t a = 0;
2069 unsigned i;
2071 for (i = 0; i < NUM_ORDERS; i++)
2072 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2073 return a;
2076 void
2077 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2079 size_t a = (size_t) base;
2080 unsigned i;
2082 for (i = 0; i < NUM_ORDERS; i++)
2084 d->base[i] = a;
2085 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2090 char *
2091 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2092 size_t size, bool is_string ATTRIBUTE_UNUSED,
2093 enum gt_types_enum type ATTRIBUTE_UNUSED)
2095 unsigned order;
2096 char *result;
2098 if (size < NUM_SIZE_LOOKUP)
2099 order = size_lookup[size];
2100 else
2102 order = 10;
2103 while (size > OBJECT_SIZE (order))
2104 order++;
2107 result = (char *) d->base[order];
2108 d->base[order] += OBJECT_SIZE (order);
2109 return result;
2112 void
2113 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2114 FILE *f ATTRIBUTE_UNUSED)
2116 /* Nothing to do. */
2119 void
2120 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2121 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2122 size_t size, bool is_string ATTRIBUTE_UNUSED)
2124 unsigned order;
2125 static const char emptyBytes[256];
2127 if (size < NUM_SIZE_LOOKUP)
2128 order = size_lookup[size];
2129 else
2131 order = 10;
2132 while (size > OBJECT_SIZE (order))
2133 order++;
2136 if (fwrite (x, size, 1, f) != 1)
2137 fatal_error ("can't write PCH file: %m");
2139 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2140 object out to OBJECT_SIZE(order). This happens for strings. */
2142 if (size != OBJECT_SIZE (order))
2144 unsigned padding = OBJECT_SIZE(order) - size;
2146 /* To speed small writes, we use a nulled-out array that's larger
2147 than most padding requests as the source for our null bytes. This
2148 permits us to do the padding with fwrite() rather than fseek(), and
2149 limits the chance the OS may try to flush any outstanding writes. */
2150 if (padding <= sizeof(emptyBytes))
2152 if (fwrite (emptyBytes, 1, padding, f) != padding)
2153 fatal_error ("can't write PCH file");
2155 else
2157 /* Larger than our buffer? Just default to fseek. */
2158 if (fseek (f, padding, SEEK_CUR) != 0)
2159 fatal_error ("can't write PCH file");
2163 d->written[order]++;
2164 if (d->written[order] == d->d.totals[order]
2165 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2166 G.pagesize),
2167 SEEK_CUR) != 0)
2168 fatal_error ("can't write PCH file: %m");
2171 void
2172 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2174 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2175 fatal_error ("can't write PCH file: %m");
2176 free (d);
2179 /* Move the PCH PTE entries just added to the end of by_depth, to the
2180 front. */
2182 static void
2183 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2185 unsigned i;
2187 /* First, we swap the new entries to the front of the varrays. */
2188 page_entry **new_by_depth;
2189 unsigned long **new_save_in_use;
2191 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2192 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2194 memcpy (&new_by_depth[0],
2195 &G.by_depth[count_old_page_tables],
2196 count_new_page_tables * sizeof (void *));
2197 memcpy (&new_by_depth[count_new_page_tables],
2198 &G.by_depth[0],
2199 count_old_page_tables * sizeof (void *));
2200 memcpy (&new_save_in_use[0],
2201 &G.save_in_use[count_old_page_tables],
2202 count_new_page_tables * sizeof (void *));
2203 memcpy (&new_save_in_use[count_new_page_tables],
2204 &G.save_in_use[0],
2205 count_old_page_tables * sizeof (void *));
2207 free (G.by_depth);
2208 free (G.save_in_use);
2210 G.by_depth = new_by_depth;
2211 G.save_in_use = new_save_in_use;
2213 /* Now update all the index_by_depth fields. */
2214 for (i = G.by_depth_in_use; i > 0; --i)
2216 page_entry *p = G.by_depth[i-1];
2217 p->index_by_depth = i-1;
2220 /* And last, we update the depth pointers in G.depth. The first
2221 entry is already 0, and context 0 entries always start at index
2222 0, so there is nothing to update in the first slot. We need a
2223 second slot, only if we have old ptes, and if we do, they start
2224 at index count_new_page_tables. */
2225 if (count_old_page_tables)
2226 push_depth (count_new_page_tables);
2229 void
2230 ggc_pch_read (FILE *f, void *addr)
2232 struct ggc_pch_ondisk d;
2233 unsigned i;
2234 char *offs = addr;
2235 unsigned long count_old_page_tables;
2236 unsigned long count_new_page_tables;
2238 count_old_page_tables = G.by_depth_in_use;
2240 /* We've just read in a PCH file. So, every object that used to be
2241 allocated is now free. */
2242 clear_marks ();
2243 #ifdef ENABLE_GC_CHECKING
2244 poison_pages ();
2245 #endif
2246 /* Since we free all the allocated objects, the free list becomes
2247 useless. Validate it now, which will also clear it. */
2248 validate_free_objects();
2250 /* No object read from a PCH file should ever be freed. So, set the
2251 context depth to 1, and set the depth of all the currently-allocated
2252 pages to be 1 too. PCH pages will have depth 0. */
2253 gcc_assert (!G.context_depth);
2254 G.context_depth = 1;
2255 for (i = 0; i < NUM_ORDERS; i++)
2257 page_entry *p;
2258 for (p = G.pages[i]; p != NULL; p = p->next)
2259 p->context_depth = G.context_depth;
2262 /* Allocate the appropriate page-table entries for the pages read from
2263 the PCH file. */
2264 if (fread (&d, sizeof (d), 1, f) != 1)
2265 fatal_error ("can't read PCH file: %m");
2267 for (i = 0; i < NUM_ORDERS; i++)
2269 struct page_entry *entry;
2270 char *pte;
2271 size_t bytes;
2272 size_t num_objs;
2273 size_t j;
2275 if (d.totals[i] == 0)
2276 continue;
2278 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2279 num_objs = bytes / OBJECT_SIZE (i);
2280 entry = xcalloc (1, (sizeof (struct page_entry)
2281 - sizeof (long)
2282 + BITMAP_SIZE (num_objs + 1)));
2283 entry->bytes = bytes;
2284 entry->page = offs;
2285 entry->context_depth = 0;
2286 offs += bytes;
2287 entry->num_free_objects = 0;
2288 entry->order = i;
2290 for (j = 0;
2291 j + HOST_BITS_PER_LONG <= num_objs + 1;
2292 j += HOST_BITS_PER_LONG)
2293 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2294 for (; j < num_objs + 1; j++)
2295 entry->in_use_p[j / HOST_BITS_PER_LONG]
2296 |= 1L << (j % HOST_BITS_PER_LONG);
2298 for (pte = entry->page;
2299 pte < entry->page + entry->bytes;
2300 pte += G.pagesize)
2301 set_page_table_entry (pte, entry);
2303 if (G.page_tails[i] != NULL)
2304 G.page_tails[i]->next = entry;
2305 else
2306 G.pages[i] = entry;
2307 G.page_tails[i] = entry;
2309 /* We start off by just adding all the new information to the
2310 end of the varrays, later, we will move the new information
2311 to the front of the varrays, as the PCH page tables are at
2312 context 0. */
2313 push_by_depth (entry, 0);
2316 /* Now, we update the various data structures that speed page table
2317 handling. */
2318 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2320 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2322 /* Update the statistics. */
2323 G.allocated = G.allocated_last_gc = offs - (char *)addr;