Enable dumping of alias graphs.
[official-gcc/Ramakrishna.git] / gcc / ggc-page.c
blob744355e3de301416fac70dff959de37544835b48
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
34 #include "plugin.h"
36 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
37 file open. Prefer either to valloc. */
38 #ifdef HAVE_MMAP_ANON
39 # undef HAVE_MMAP_DEV_ZERO
41 # include <sys/mman.h>
42 # ifndef MAP_FAILED
43 # define MAP_FAILED -1
44 # endif
45 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
46 # define MAP_ANONYMOUS MAP_ANON
47 # endif
48 # define USING_MMAP
50 #endif
52 #ifdef HAVE_MMAP_DEV_ZERO
54 # include <sys/mman.h>
55 # ifndef MAP_FAILED
56 # define MAP_FAILED -1
57 # endif
58 # define USING_MMAP
60 #endif
62 #ifndef USING_MMAP
63 #define USING_MALLOC_PAGE_GROUPS
64 #endif
66 /* Strategy:
68 This garbage-collecting allocator allocates objects on one of a set
69 of pages. Each page can allocate objects of a single size only;
70 available sizes are powers of two starting at four bytes. The size
71 of an allocation request is rounded up to the next power of two
72 (`order'), and satisfied from the appropriate page.
74 Each page is recorded in a page-entry, which also maintains an
75 in-use bitmap of object positions on the page. This allows the
76 allocation state of a particular object to be flipped without
77 touching the page itself.
79 Each page-entry also has a context depth, which is used to track
80 pushing and popping of allocation contexts. Only objects allocated
81 in the current (highest-numbered) context may be collected.
83 Page entries are arranged in an array of singly-linked lists. The
84 array is indexed by the allocation size, in bits, of the pages on
85 it; i.e. all pages on a list allocate objects of the same size.
86 Pages are ordered on the list such that all non-full pages precede
87 all full pages, with non-full pages arranged in order of decreasing
88 context depth.
90 Empty pages (of all orders) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. */
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
105 #endif
108 /* A two-level tree is used to look up the page-entry for a given
109 pointer. Two chunks of the pointer's bits are extracted to index
110 the first and second levels of the tree, as follows:
112 HOST_PAGE_SIZE_BITS
113 32 | |
114 msb +----------------+----+------+------+ lsb
115 | | |
116 PAGE_L1_BITS |
118 PAGE_L2_BITS
120 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
121 pages are aligned on system page boundaries. The next most
122 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
123 index values in the lookup table, respectively.
125 For 32-bit architectures and the settings below, there are no
126 leftover bits. For architectures with wider pointers, the lookup
127 tree points to a list of pages, which must be scanned to find the
128 correct one. */
130 #define PAGE_L1_BITS (8)
131 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
132 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
133 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
135 #define LOOKUP_L1(p) \
136 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
138 #define LOOKUP_L2(p) \
139 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
141 /* The number of objects per allocation page, for objects on a page of
142 the indicated ORDER. */
143 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
145 /* The number of objects in P. */
146 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
148 /* The size of an object on a page of the indicated ORDER. */
149 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
151 /* For speed, we avoid doing a general integer divide to locate the
152 offset in the allocation bitmap, by precalculating numbers M, S
153 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
154 within the page which is evenly divisible by the object size Z. */
155 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
156 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
157 #define OFFSET_TO_BIT(OFFSET, ORDER) \
158 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
160 /* The number of extra orders, not corresponding to power-of-two sized
161 objects. */
163 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
165 #define RTL_SIZE(NSLOTS) \
166 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
168 #define TREE_EXP_SIZE(OPS) \
169 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
171 /* The Ith entry is the maximum size of an object to be stored in the
172 Ith extra order. Adding a new entry to this array is the *only*
173 thing you need to do to add a new special allocation size. */
175 static const size_t extra_order_size_table[] = {
176 sizeof (struct var_ann_d),
177 sizeof (struct tree_decl_non_common),
178 sizeof (struct tree_field_decl),
179 sizeof (struct tree_parm_decl),
180 sizeof (struct tree_var_decl),
181 sizeof (struct tree_list),
182 sizeof (struct tree_ssa_name),
183 sizeof (struct function),
184 sizeof (struct basic_block_def),
185 sizeof (bitmap_element),
186 sizeof (bitmap_head),
187 TREE_EXP_SIZE (2),
188 RTL_SIZE (2), /* MEM, PLUS, etc. */
189 RTL_SIZE (9), /* INSN */
192 /* The total number of orders. */
194 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
196 /* We use this structure to determine the alignment required for
197 allocations. For power-of-two sized allocations, that's not a
198 problem, but it does matter for odd-sized allocations. */
200 struct max_alignment {
201 char c;
202 union {
203 HOST_WIDEST_INT i;
204 long double d;
205 } u;
208 /* The biggest alignment required. */
210 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
212 /* Compute the smallest nonnegative number which when added to X gives
213 a multiple of F. */
215 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
217 /* Compute the smallest multiple of F that is >= X. */
219 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
221 /* The Ith entry is the number of objects on a page or order I. */
223 static unsigned objects_per_page_table[NUM_ORDERS];
225 /* The Ith entry is the size of an object on a page of order I. */
227 static size_t object_size_table[NUM_ORDERS];
229 /* The Ith entry is a pair of numbers (mult, shift) such that
230 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
231 for all k evenly divisible by OBJECT_SIZE(I). */
233 static struct
235 size_t mult;
236 unsigned int shift;
238 inverse_table[NUM_ORDERS];
240 /* A page_entry records the status of an allocation page. This
241 structure is dynamically sized to fit the bitmap in_use_p. */
242 typedef struct page_entry
244 /* The next page-entry with objects of the same size, or NULL if
245 this is the last page-entry. */
246 struct page_entry *next;
248 /* The previous page-entry with objects of the same size, or NULL if
249 this is the first page-entry. The PREV pointer exists solely to
250 keep the cost of ggc_free manageable. */
251 struct page_entry *prev;
253 /* The number of bytes allocated. (This will always be a multiple
254 of the host system page size.) */
255 size_t bytes;
257 /* The address at which the memory is allocated. */
258 char *page;
260 #ifdef USING_MALLOC_PAGE_GROUPS
261 /* Back pointer to the page group this page came from. */
262 struct page_group *group;
263 #endif
265 /* This is the index in the by_depth varray where this page table
266 can be found. */
267 unsigned long index_by_depth;
269 /* Context depth of this page. */
270 unsigned short context_depth;
272 /* The number of free objects remaining on this page. */
273 unsigned short num_free_objects;
275 /* A likely candidate for the bit position of a free object for the
276 next allocation from this page. */
277 unsigned short next_bit_hint;
279 /* The lg of size of objects allocated from this page. */
280 unsigned char order;
282 /* A bit vector indicating whether or not objects are in use. The
283 Nth bit is one if the Nth object on this page is allocated. This
284 array is dynamically sized. */
285 unsigned long in_use_p[1];
286 } page_entry;
288 #ifdef USING_MALLOC_PAGE_GROUPS
289 /* A page_group describes a large allocation from malloc, from which
290 we parcel out aligned pages. */
291 typedef struct page_group
293 /* A linked list of all extant page groups. */
294 struct page_group *next;
296 /* The address we received from malloc. */
297 char *allocation;
299 /* The size of the block. */
300 size_t alloc_size;
302 /* A bitmask of pages in use. */
303 unsigned int in_use;
304 } page_group;
305 #endif
307 #if HOST_BITS_PER_PTR <= 32
309 /* On 32-bit hosts, we use a two level page table, as pictured above. */
310 typedef page_entry **page_table[PAGE_L1_SIZE];
312 #else
314 /* On 64-bit hosts, we use the same two level page tables plus a linked
315 list that disambiguates the top 32-bits. There will almost always be
316 exactly one entry in the list. */
317 typedef struct page_table_chain
319 struct page_table_chain *next;
320 size_t high_bits;
321 page_entry **table[PAGE_L1_SIZE];
322 } *page_table;
324 #endif
326 /* The rest of the global variables. */
327 static struct globals
329 /* The Nth element in this array is a page with objects of size 2^N.
330 If there are any pages with free objects, they will be at the
331 head of the list. NULL if there are no page-entries for this
332 object size. */
333 page_entry *pages[NUM_ORDERS];
335 /* The Nth element in this array is the last page with objects of
336 size 2^N. NULL if there are no page-entries for this object
337 size. */
338 page_entry *page_tails[NUM_ORDERS];
340 /* Lookup table for associating allocation pages with object addresses. */
341 page_table lookup;
343 /* The system's page size. */
344 size_t pagesize;
345 size_t lg_pagesize;
347 /* Bytes currently allocated. */
348 size_t allocated;
350 /* Bytes currently allocated at the end of the last collection. */
351 size_t allocated_last_gc;
353 /* Total amount of memory mapped. */
354 size_t bytes_mapped;
356 /* Bit N set if any allocations have been done at context depth N. */
357 unsigned long context_depth_allocations;
359 /* Bit N set if any collections have been done at context depth N. */
360 unsigned long context_depth_collections;
362 /* The current depth in the context stack. */
363 unsigned short context_depth;
365 /* A file descriptor open to /dev/zero for reading. */
366 #if defined (HAVE_MMAP_DEV_ZERO)
367 int dev_zero_fd;
368 #endif
370 /* A cache of free system pages. */
371 page_entry *free_pages;
373 #ifdef USING_MALLOC_PAGE_GROUPS
374 page_group *page_groups;
375 #endif
377 /* The file descriptor for debugging output. */
378 FILE *debug_file;
380 /* Current number of elements in use in depth below. */
381 unsigned int depth_in_use;
383 /* Maximum number of elements that can be used before resizing. */
384 unsigned int depth_max;
386 /* Each element of this array is an index in by_depth where the given
387 depth starts. This structure is indexed by that given depth we
388 are interested in. */
389 unsigned int *depth;
391 /* Current number of elements in use in by_depth below. */
392 unsigned int by_depth_in_use;
394 /* Maximum number of elements that can be used before resizing. */
395 unsigned int by_depth_max;
397 /* Each element of this array is a pointer to a page_entry, all
398 page_entries can be found in here by increasing depth.
399 index_by_depth in the page_entry is the index into this data
400 structure where that page_entry can be found. This is used to
401 speed up finding all page_entries at a particular depth. */
402 page_entry **by_depth;
404 /* Each element is a pointer to the saved in_use_p bits, if any,
405 zero otherwise. We allocate them all together, to enable a
406 better runtime data access pattern. */
407 unsigned long **save_in_use;
409 #ifdef ENABLE_GC_ALWAYS_COLLECT
410 /* List of free objects to be verified as actually free on the
411 next collection. */
412 struct free_object
414 void *object;
415 struct free_object *next;
416 } *free_object_list;
417 #endif
419 #ifdef GATHER_STATISTICS
420 struct
422 /* Total memory allocated with ggc_alloc. */
423 unsigned long long total_allocated;
424 /* Total overhead for memory to be allocated with ggc_alloc. */
425 unsigned long long total_overhead;
427 /* Total allocations and overhead for sizes less than 32, 64 and 128.
428 These sizes are interesting because they are typical cache line
429 sizes. */
431 unsigned long long total_allocated_under32;
432 unsigned long long total_overhead_under32;
434 unsigned long long total_allocated_under64;
435 unsigned long long total_overhead_under64;
437 unsigned long long total_allocated_under128;
438 unsigned long long total_overhead_under128;
440 /* The allocations for each of the allocation orders. */
441 unsigned long long total_allocated_per_order[NUM_ORDERS];
443 /* The overhead for each of the allocation orders. */
444 unsigned long long total_overhead_per_order[NUM_ORDERS];
445 } stats;
446 #endif
447 } G;
449 /* The size in bytes required to maintain a bitmap for the objects
450 on a page-entry. */
451 #define BITMAP_SIZE(Num_objects) \
452 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
454 /* Allocate pages in chunks of this size, to throttle calls to memory
455 allocation routines. The first page is used, the rest go onto the
456 free list. This cannot be larger than HOST_BITS_PER_INT for the
457 in_use bitmask for page_group. Hosts that need a different value
458 can override this by defining GGC_QUIRE_SIZE explicitly. */
459 #ifndef GGC_QUIRE_SIZE
460 # ifdef USING_MMAP
461 # define GGC_QUIRE_SIZE 256
462 # else
463 # define GGC_QUIRE_SIZE 16
464 # endif
465 #endif
467 /* Initial guess as to how many page table entries we might need. */
468 #define INITIAL_PTE_COUNT 128
470 static int ggc_allocated_p (const void *);
471 static page_entry *lookup_page_table_entry (const void *);
472 static void set_page_table_entry (void *, page_entry *);
473 #ifdef USING_MMAP
474 static char *alloc_anon (char *, size_t);
475 #endif
476 #ifdef USING_MALLOC_PAGE_GROUPS
477 static size_t page_group_index (char *, char *);
478 static void set_page_group_in_use (page_group *, char *);
479 static void clear_page_group_in_use (page_group *, char *);
480 #endif
481 static struct page_entry * alloc_page (unsigned);
482 static void free_page (struct page_entry *);
483 static void release_pages (void);
484 static void clear_marks (void);
485 static void sweep_pages (void);
486 static void ggc_recalculate_in_use_p (page_entry *);
487 static void compute_inverse (unsigned);
488 static inline void adjust_depth (void);
489 static void move_ptes_to_front (int, int);
491 void debug_print_page_list (int);
492 static void push_depth (unsigned int);
493 static void push_by_depth (page_entry *, unsigned long *);
495 /* Push an entry onto G.depth. */
497 inline static void
498 push_depth (unsigned int i)
500 if (G.depth_in_use >= G.depth_max)
502 G.depth_max *= 2;
503 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
505 G.depth[G.depth_in_use++] = i;
508 /* Push an entry onto G.by_depth and G.save_in_use. */
510 inline static void
511 push_by_depth (page_entry *p, unsigned long *s)
513 if (G.by_depth_in_use >= G.by_depth_max)
515 G.by_depth_max *= 2;
516 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
517 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
518 G.by_depth_max);
520 G.by_depth[G.by_depth_in_use] = p;
521 G.save_in_use[G.by_depth_in_use++] = s;
524 #if (GCC_VERSION < 3001)
525 #define prefetch(X) ((void) X)
526 #else
527 #define prefetch(X) __builtin_prefetch (X)
528 #endif
530 #define save_in_use_p_i(__i) \
531 (G.save_in_use[__i])
532 #define save_in_use_p(__p) \
533 (save_in_use_p_i (__p->index_by_depth))
535 /* Returns nonzero if P was allocated in GC'able memory. */
537 static inline int
538 ggc_allocated_p (const void *p)
540 page_entry ***base;
541 size_t L1, L2;
543 #if HOST_BITS_PER_PTR <= 32
544 base = &G.lookup[0];
545 #else
546 page_table table = G.lookup;
547 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
548 while (1)
550 if (table == NULL)
551 return 0;
552 if (table->high_bits == high_bits)
553 break;
554 table = table->next;
556 base = &table->table[0];
557 #endif
559 /* Extract the level 1 and 2 indices. */
560 L1 = LOOKUP_L1 (p);
561 L2 = LOOKUP_L2 (p);
563 return base[L1] && base[L1][L2];
566 /* Traverse the page table and find the entry for a page.
567 Die (probably) if the object wasn't allocated via GC. */
569 static inline page_entry *
570 lookup_page_table_entry (const void *p)
572 page_entry ***base;
573 size_t L1, L2;
575 #if HOST_BITS_PER_PTR <= 32
576 base = &G.lookup[0];
577 #else
578 page_table table = G.lookup;
579 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
580 while (table->high_bits != high_bits)
581 table = table->next;
582 base = &table->table[0];
583 #endif
585 /* Extract the level 1 and 2 indices. */
586 L1 = LOOKUP_L1 (p);
587 L2 = LOOKUP_L2 (p);
589 return base[L1][L2];
592 /* Set the page table entry for a page. */
594 static void
595 set_page_table_entry (void *p, page_entry *entry)
597 page_entry ***base;
598 size_t L1, L2;
600 #if HOST_BITS_PER_PTR <= 32
601 base = &G.lookup[0];
602 #else
603 page_table table;
604 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
605 for (table = G.lookup; table; table = table->next)
606 if (table->high_bits == high_bits)
607 goto found;
609 /* Not found -- allocate a new table. */
610 table = XCNEW (struct page_table_chain);
611 table->next = G.lookup;
612 table->high_bits = high_bits;
613 G.lookup = table;
614 found:
615 base = &table->table[0];
616 #endif
618 /* Extract the level 1 and 2 indices. */
619 L1 = LOOKUP_L1 (p);
620 L2 = LOOKUP_L2 (p);
622 if (base[L1] == NULL)
623 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
625 base[L1][L2] = entry;
628 /* Prints the page-entry for object size ORDER, for debugging. */
630 void
631 debug_print_page_list (int order)
633 page_entry *p;
634 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
635 (void *) G.page_tails[order]);
636 p = G.pages[order];
637 while (p != NULL)
639 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
640 p->num_free_objects);
641 p = p->next;
643 printf ("NULL\n");
644 fflush (stdout);
647 #ifdef USING_MMAP
648 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
649 (if non-null). The ifdef structure here is intended to cause a
650 compile error unless exactly one of the HAVE_* is defined. */
652 static inline char *
653 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
655 #ifdef HAVE_MMAP_ANON
656 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
657 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
658 #endif
659 #ifdef HAVE_MMAP_DEV_ZERO
660 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
661 MAP_PRIVATE, G.dev_zero_fd, 0);
662 #endif
664 if (page == (char *) MAP_FAILED)
666 perror ("virtual memory exhausted");
667 exit (FATAL_EXIT_CODE);
670 /* Remember that we allocated this memory. */
671 G.bytes_mapped += size;
673 /* Pretend we don't have access to the allocated pages. We'll enable
674 access to smaller pieces of the area in ggc_alloc. Discard the
675 handle to avoid handle leak. */
676 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
678 return page;
680 #endif
681 #ifdef USING_MALLOC_PAGE_GROUPS
682 /* Compute the index for this page into the page group. */
684 static inline size_t
685 page_group_index (char *allocation, char *page)
687 return (size_t) (page - allocation) >> G.lg_pagesize;
690 /* Set and clear the in_use bit for this page in the page group. */
692 static inline void
693 set_page_group_in_use (page_group *group, char *page)
695 group->in_use |= 1 << page_group_index (group->allocation, page);
698 static inline void
699 clear_page_group_in_use (page_group *group, char *page)
701 group->in_use &= ~(1 << page_group_index (group->allocation, page));
703 #endif
705 /* Allocate a new page for allocating objects of size 2^ORDER,
706 and return an entry for it. The entry is not added to the
707 appropriate page_table list. */
709 static inline struct page_entry *
710 alloc_page (unsigned order)
712 struct page_entry *entry, *p, **pp;
713 char *page;
714 size_t num_objects;
715 size_t bitmap_size;
716 size_t page_entry_size;
717 size_t entry_size;
718 #ifdef USING_MALLOC_PAGE_GROUPS
719 page_group *group;
720 #endif
722 num_objects = OBJECTS_PER_PAGE (order);
723 bitmap_size = BITMAP_SIZE (num_objects + 1);
724 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
725 entry_size = num_objects * OBJECT_SIZE (order);
726 if (entry_size < G.pagesize)
727 entry_size = G.pagesize;
729 entry = NULL;
730 page = NULL;
732 /* Check the list of free pages for one we can use. */
733 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
734 if (p->bytes == entry_size)
735 break;
737 if (p != NULL)
739 /* Recycle the allocated memory from this page ... */
740 *pp = p->next;
741 page = p->page;
743 #ifdef USING_MALLOC_PAGE_GROUPS
744 group = p->group;
745 #endif
747 /* ... and, if possible, the page entry itself. */
748 if (p->order == order)
750 entry = p;
751 memset (entry, 0, page_entry_size);
753 else
754 free (p);
756 #ifdef USING_MMAP
757 else if (entry_size == G.pagesize)
759 /* We want just one page. Allocate a bunch of them and put the
760 extras on the freelist. (Can only do this optimization with
761 mmap for backing store.) */
762 struct page_entry *e, *f = G.free_pages;
763 int i;
765 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
767 /* This loop counts down so that the chain will be in ascending
768 memory order. */
769 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
771 e = XCNEWVAR (struct page_entry, page_entry_size);
772 e->order = order;
773 e->bytes = G.pagesize;
774 e->page = page + (i << G.lg_pagesize);
775 e->next = f;
776 f = e;
779 G.free_pages = f;
781 else
782 page = alloc_anon (NULL, entry_size);
783 #endif
784 #ifdef USING_MALLOC_PAGE_GROUPS
785 else
787 /* Allocate a large block of memory and serve out the aligned
788 pages therein. This results in much less memory wastage
789 than the traditional implementation of valloc. */
791 char *allocation, *a, *enda;
792 size_t alloc_size, head_slop, tail_slop;
793 int multiple_pages = (entry_size == G.pagesize);
795 if (multiple_pages)
796 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
797 else
798 alloc_size = entry_size + G.pagesize - 1;
799 allocation = XNEWVEC (char, alloc_size);
801 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
802 head_slop = page - allocation;
803 if (multiple_pages)
804 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
805 else
806 tail_slop = alloc_size - entry_size - head_slop;
807 enda = allocation + alloc_size - tail_slop;
809 /* We allocated N pages, which are likely not aligned, leaving
810 us with N-1 usable pages. We plan to place the page_group
811 structure somewhere in the slop. */
812 if (head_slop >= sizeof (page_group))
813 group = (page_group *)page - 1;
814 else
816 /* We magically got an aligned allocation. Too bad, we have
817 to waste a page anyway. */
818 if (tail_slop == 0)
820 enda -= G.pagesize;
821 tail_slop += G.pagesize;
823 gcc_assert (tail_slop >= sizeof (page_group));
824 group = (page_group *)enda;
825 tail_slop -= sizeof (page_group);
828 /* Remember that we allocated this memory. */
829 group->next = G.page_groups;
830 group->allocation = allocation;
831 group->alloc_size = alloc_size;
832 group->in_use = 0;
833 G.page_groups = group;
834 G.bytes_mapped += alloc_size;
836 /* If we allocated multiple pages, put the rest on the free list. */
837 if (multiple_pages)
839 struct page_entry *e, *f = G.free_pages;
840 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
842 e = XCNEWVAR (struct page_entry, page_entry_size);
843 e->order = order;
844 e->bytes = G.pagesize;
845 e->page = a;
846 e->group = group;
847 e->next = f;
848 f = e;
850 G.free_pages = f;
853 #endif
855 if (entry == NULL)
856 entry = XCNEWVAR (struct page_entry, page_entry_size);
858 entry->bytes = entry_size;
859 entry->page = page;
860 entry->context_depth = G.context_depth;
861 entry->order = order;
862 entry->num_free_objects = num_objects;
863 entry->next_bit_hint = 1;
865 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
867 #ifdef USING_MALLOC_PAGE_GROUPS
868 entry->group = group;
869 set_page_group_in_use (group, page);
870 #endif
872 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
873 increment the hint. */
874 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
875 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
877 set_page_table_entry (page, entry);
879 if (GGC_DEBUG_LEVEL >= 2)
880 fprintf (G.debug_file,
881 "Allocating page at %p, object size=%lu, data %p-%p\n",
882 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
883 page + entry_size - 1);
885 return entry;
888 /* Adjust the size of G.depth so that no index greater than the one
889 used by the top of the G.by_depth is used. */
891 static inline void
892 adjust_depth (void)
894 page_entry *top;
896 if (G.by_depth_in_use)
898 top = G.by_depth[G.by_depth_in_use-1];
900 /* Peel back indices in depth that index into by_depth, so that
901 as new elements are added to by_depth, we note the indices
902 of those elements, if they are for new context depths. */
903 while (G.depth_in_use > (size_t)top->context_depth+1)
904 --G.depth_in_use;
908 /* For a page that is no longer needed, put it on the free page list. */
910 static void
911 free_page (page_entry *entry)
913 if (GGC_DEBUG_LEVEL >= 2)
914 fprintf (G.debug_file,
915 "Deallocating page at %p, data %p-%p\n", (void *) entry,
916 entry->page, entry->page + entry->bytes - 1);
918 /* Mark the page as inaccessible. Discard the handle to avoid handle
919 leak. */
920 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
922 set_page_table_entry (entry->page, NULL);
924 #ifdef USING_MALLOC_PAGE_GROUPS
925 clear_page_group_in_use (entry->group, entry->page);
926 #endif
928 if (G.by_depth_in_use > 1)
930 page_entry *top = G.by_depth[G.by_depth_in_use-1];
931 int i = entry->index_by_depth;
933 /* We cannot free a page from a context deeper than the current
934 one. */
935 gcc_assert (entry->context_depth == top->context_depth);
937 /* Put top element into freed slot. */
938 G.by_depth[i] = top;
939 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
940 top->index_by_depth = i;
942 --G.by_depth_in_use;
944 adjust_depth ();
946 entry->next = G.free_pages;
947 G.free_pages = entry;
950 /* Release the free page cache to the system. */
952 static void
953 release_pages (void)
955 #ifdef USING_MMAP
956 page_entry *p, *next;
957 char *start;
958 size_t len;
960 /* Gather up adjacent pages so they are unmapped together. */
961 p = G.free_pages;
963 while (p)
965 start = p->page;
966 next = p->next;
967 len = p->bytes;
968 free (p);
969 p = next;
971 while (p && p->page == start + len)
973 next = p->next;
974 len += p->bytes;
975 free (p);
976 p = next;
979 munmap (start, len);
980 G.bytes_mapped -= len;
983 G.free_pages = NULL;
984 #endif
985 #ifdef USING_MALLOC_PAGE_GROUPS
986 page_entry **pp, *p;
987 page_group **gp, *g;
989 /* Remove all pages from free page groups from the list. */
990 pp = &G.free_pages;
991 while ((p = *pp) != NULL)
992 if (p->group->in_use == 0)
994 *pp = p->next;
995 free (p);
997 else
998 pp = &p->next;
1000 /* Remove all free page groups, and release the storage. */
1001 gp = &G.page_groups;
1002 while ((g = *gp) != NULL)
1003 if (g->in_use == 0)
1005 *gp = g->next;
1006 G.bytes_mapped -= g->alloc_size;
1007 free (g->allocation);
1009 else
1010 gp = &g->next;
1011 #endif
1014 /* This table provides a fast way to determine ceil(log_2(size)) for
1015 allocation requests. The minimum allocation size is eight bytes. */
1016 #define NUM_SIZE_LOOKUP 512
1017 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1019 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1020 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1021 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1022 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1023 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1024 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1025 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1026 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1027 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1028 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1029 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1030 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1031 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1036 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1037 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1038 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1039 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1040 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1053 /* Typed allocation function. Does nothing special in this collector. */
1055 void *
1056 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1057 MEM_STAT_DECL)
1059 return ggc_alloc_stat (size PASS_MEM_STAT);
1062 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1064 void *
1065 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1067 size_t order, word, bit, object_offset, object_size;
1068 struct page_entry *entry;
1069 void *result;
1071 if (size < NUM_SIZE_LOOKUP)
1073 order = size_lookup[size];
1074 object_size = OBJECT_SIZE (order);
1076 else
1078 order = 10;
1079 while (size > (object_size = OBJECT_SIZE (order)))
1080 order++;
1083 /* If there are non-full pages for this size allocation, they are at
1084 the head of the list. */
1085 entry = G.pages[order];
1087 /* If there is no page for this object size, or all pages in this
1088 context are full, allocate a new page. */
1089 if (entry == NULL || entry->num_free_objects == 0)
1091 struct page_entry *new_entry;
1092 new_entry = alloc_page (order);
1094 new_entry->index_by_depth = G.by_depth_in_use;
1095 push_by_depth (new_entry, 0);
1097 /* We can skip context depths, if we do, make sure we go all the
1098 way to the new depth. */
1099 while (new_entry->context_depth >= G.depth_in_use)
1100 push_depth (G.by_depth_in_use-1);
1102 /* If this is the only entry, it's also the tail. If it is not
1103 the only entry, then we must update the PREV pointer of the
1104 ENTRY (G.pages[order]) to point to our new page entry. */
1105 if (entry == NULL)
1106 G.page_tails[order] = new_entry;
1107 else
1108 entry->prev = new_entry;
1110 /* Put new pages at the head of the page list. By definition the
1111 entry at the head of the list always has a NULL pointer. */
1112 new_entry->next = entry;
1113 new_entry->prev = NULL;
1114 entry = new_entry;
1115 G.pages[order] = new_entry;
1117 /* For a new page, we know the word and bit positions (in the
1118 in_use bitmap) of the first available object -- they're zero. */
1119 new_entry->next_bit_hint = 1;
1120 word = 0;
1121 bit = 0;
1122 object_offset = 0;
1124 else
1126 /* First try to use the hint left from the previous allocation
1127 to locate a clear bit in the in-use bitmap. We've made sure
1128 that the one-past-the-end bit is always set, so if the hint
1129 has run over, this test will fail. */
1130 unsigned hint = entry->next_bit_hint;
1131 word = hint / HOST_BITS_PER_LONG;
1132 bit = hint % HOST_BITS_PER_LONG;
1134 /* If the hint didn't work, scan the bitmap from the beginning. */
1135 if ((entry->in_use_p[word] >> bit) & 1)
1137 word = bit = 0;
1138 while (~entry->in_use_p[word] == 0)
1139 ++word;
1141 #if GCC_VERSION >= 3004
1142 bit = __builtin_ctzl (~entry->in_use_p[word]);
1143 #else
1144 while ((entry->in_use_p[word] >> bit) & 1)
1145 ++bit;
1146 #endif
1148 hint = word * HOST_BITS_PER_LONG + bit;
1151 /* Next time, try the next bit. */
1152 entry->next_bit_hint = hint + 1;
1154 object_offset = hint * object_size;
1157 /* Set the in-use bit. */
1158 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1160 /* Keep a running total of the number of free objects. If this page
1161 fills up, we may have to move it to the end of the list if the
1162 next page isn't full. If the next page is full, all subsequent
1163 pages are full, so there's no need to move it. */
1164 if (--entry->num_free_objects == 0
1165 && entry->next != NULL
1166 && entry->next->num_free_objects > 0)
1168 /* We have a new head for the list. */
1169 G.pages[order] = entry->next;
1171 /* We are moving ENTRY to the end of the page table list.
1172 The new page at the head of the list will have NULL in
1173 its PREV field and ENTRY will have NULL in its NEXT field. */
1174 entry->next->prev = NULL;
1175 entry->next = NULL;
1177 /* Append ENTRY to the tail of the list. */
1178 entry->prev = G.page_tails[order];
1179 G.page_tails[order]->next = entry;
1180 G.page_tails[order] = entry;
1183 /* Calculate the object's address. */
1184 result = entry->page + object_offset;
1185 #ifdef GATHER_STATISTICS
1186 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1187 result PASS_MEM_STAT);
1188 #endif
1190 #ifdef ENABLE_GC_CHECKING
1191 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1192 exact same semantics in presence of memory bugs, regardless of
1193 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1194 handle to avoid handle leak. */
1195 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1197 /* `Poison' the entire allocated object, including any padding at
1198 the end. */
1199 memset (result, 0xaf, object_size);
1201 /* Make the bytes after the end of the object unaccessible. Discard the
1202 handle to avoid handle leak. */
1203 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1204 object_size - size));
1205 #endif
1207 /* Tell Valgrind that the memory is there, but its content isn't
1208 defined. The bytes at the end of the object are still marked
1209 unaccessible. */
1210 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1212 /* Keep track of how many bytes are being allocated. This
1213 information is used in deciding when to collect. */
1214 G.allocated += object_size;
1216 /* For timevar statistics. */
1217 timevar_ggc_mem_total += object_size;
1219 #ifdef GATHER_STATISTICS
1221 size_t overhead = object_size - size;
1223 G.stats.total_overhead += overhead;
1224 G.stats.total_allocated += object_size;
1225 G.stats.total_overhead_per_order[order] += overhead;
1226 G.stats.total_allocated_per_order[order] += object_size;
1228 if (size <= 32)
1230 G.stats.total_overhead_under32 += overhead;
1231 G.stats.total_allocated_under32 += object_size;
1233 if (size <= 64)
1235 G.stats.total_overhead_under64 += overhead;
1236 G.stats.total_allocated_under64 += object_size;
1238 if (size <= 128)
1240 G.stats.total_overhead_under128 += overhead;
1241 G.stats.total_allocated_under128 += object_size;
1244 #endif
1246 if (GGC_DEBUG_LEVEL >= 3)
1247 fprintf (G.debug_file,
1248 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1249 (unsigned long) size, (unsigned long) object_size, result,
1250 (void *) entry);
1252 return result;
1255 /* Mark function for strings. */
1257 void
1258 gt_ggc_m_S (const void *p)
1260 page_entry *entry;
1261 unsigned bit, word;
1262 unsigned long mask;
1263 unsigned long offset;
1265 if (!p || !ggc_allocated_p (p))
1266 return;
1268 /* Look up the page on which the object is alloced. . */
1269 entry = lookup_page_table_entry (p);
1270 gcc_assert (entry);
1272 /* Calculate the index of the object on the page; this is its bit
1273 position in the in_use_p bitmap. Note that because a char* might
1274 point to the middle of an object, we need special code here to
1275 make sure P points to the start of an object. */
1276 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1277 if (offset)
1279 /* Here we've seen a char* which does not point to the beginning
1280 of an allocated object. We assume it points to the middle of
1281 a STRING_CST. */
1282 gcc_assert (offset == offsetof (struct tree_string, str));
1283 p = ((const char *) p) - offset;
1284 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1285 return;
1288 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1289 word = bit / HOST_BITS_PER_LONG;
1290 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1292 /* If the bit was previously set, skip it. */
1293 if (entry->in_use_p[word] & mask)
1294 return;
1296 /* Otherwise set it, and decrement the free object count. */
1297 entry->in_use_p[word] |= mask;
1298 entry->num_free_objects -= 1;
1300 if (GGC_DEBUG_LEVEL >= 4)
1301 fprintf (G.debug_file, "Marking %p\n", p);
1303 return;
1306 /* If P is not marked, marks it and return false. Otherwise return true.
1307 P must have been allocated by the GC allocator; it mustn't point to
1308 static objects, stack variables, or memory allocated with malloc. */
1311 ggc_set_mark (const void *p)
1313 page_entry *entry;
1314 unsigned bit, word;
1315 unsigned long mask;
1317 /* Look up the page on which the object is alloced. If the object
1318 wasn't allocated by the collector, we'll probably die. */
1319 entry = lookup_page_table_entry (p);
1320 gcc_assert (entry);
1322 /* Calculate the index of the object on the page; this is its bit
1323 position in the in_use_p bitmap. */
1324 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1325 word = bit / HOST_BITS_PER_LONG;
1326 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1328 /* If the bit was previously set, skip it. */
1329 if (entry->in_use_p[word] & mask)
1330 return 1;
1332 /* Otherwise set it, and decrement the free object count. */
1333 entry->in_use_p[word] |= mask;
1334 entry->num_free_objects -= 1;
1336 if (GGC_DEBUG_LEVEL >= 4)
1337 fprintf (G.debug_file, "Marking %p\n", p);
1339 return 0;
1342 /* Return 1 if P has been marked, zero otherwise.
1343 P must have been allocated by the GC allocator; it mustn't point to
1344 static objects, stack variables, or memory allocated with malloc. */
1347 ggc_marked_p (const void *p)
1349 page_entry *entry;
1350 unsigned bit, word;
1351 unsigned long mask;
1353 /* Look up the page on which the object is alloced. If the object
1354 wasn't allocated by the collector, we'll probably die. */
1355 entry = lookup_page_table_entry (p);
1356 gcc_assert (entry);
1358 /* Calculate the index of the object on the page; this is its bit
1359 position in the in_use_p bitmap. */
1360 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1361 word = bit / HOST_BITS_PER_LONG;
1362 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1364 return (entry->in_use_p[word] & mask) != 0;
1367 /* Return the size of the gc-able object P. */
1369 size_t
1370 ggc_get_size (const void *p)
1372 page_entry *pe = lookup_page_table_entry (p);
1373 return OBJECT_SIZE (pe->order);
1376 /* Release the memory for object P. */
1378 void
1379 ggc_free (void *p)
1381 page_entry *pe = lookup_page_table_entry (p);
1382 size_t order = pe->order;
1383 size_t size = OBJECT_SIZE (order);
1385 #ifdef GATHER_STATISTICS
1386 ggc_free_overhead (p);
1387 #endif
1389 if (GGC_DEBUG_LEVEL >= 3)
1390 fprintf (G.debug_file,
1391 "Freeing object, actual size=%lu, at %p on %p\n",
1392 (unsigned long) size, p, (void *) pe);
1394 #ifdef ENABLE_GC_CHECKING
1395 /* Poison the data, to indicate the data is garbage. */
1396 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1397 memset (p, 0xa5, size);
1398 #endif
1399 /* Let valgrind know the object is free. */
1400 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1402 #ifdef ENABLE_GC_ALWAYS_COLLECT
1403 /* In the completely-anal-checking mode, we do *not* immediately free
1404 the data, but instead verify that the data is *actually* not
1405 reachable the next time we collect. */
1407 struct free_object *fo = XNEW (struct free_object);
1408 fo->object = p;
1409 fo->next = G.free_object_list;
1410 G.free_object_list = fo;
1412 #else
1414 unsigned int bit_offset, word, bit;
1416 G.allocated -= size;
1418 /* Mark the object not-in-use. */
1419 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1420 word = bit_offset / HOST_BITS_PER_LONG;
1421 bit = bit_offset % HOST_BITS_PER_LONG;
1422 pe->in_use_p[word] &= ~(1UL << bit);
1424 if (pe->num_free_objects++ == 0)
1426 page_entry *p, *q;
1428 /* If the page is completely full, then it's supposed to
1429 be after all pages that aren't. Since we've freed one
1430 object from a page that was full, we need to move the
1431 page to the head of the list.
1433 PE is the node we want to move. Q is the previous node
1434 and P is the next node in the list. */
1435 q = pe->prev;
1436 if (q && q->num_free_objects == 0)
1438 p = pe->next;
1440 q->next = p;
1442 /* If PE was at the end of the list, then Q becomes the
1443 new end of the list. If PE was not the end of the
1444 list, then we need to update the PREV field for P. */
1445 if (!p)
1446 G.page_tails[order] = q;
1447 else
1448 p->prev = q;
1450 /* Move PE to the head of the list. */
1451 pe->next = G.pages[order];
1452 pe->prev = NULL;
1453 G.pages[order]->prev = pe;
1454 G.pages[order] = pe;
1457 /* Reset the hint bit to point to the only free object. */
1458 pe->next_bit_hint = bit_offset;
1461 #endif
1464 /* Subroutine of init_ggc which computes the pair of numbers used to
1465 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1467 This algorithm is taken from Granlund and Montgomery's paper
1468 "Division by Invariant Integers using Multiplication"
1469 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1470 constants). */
1472 static void
1473 compute_inverse (unsigned order)
1475 size_t size, inv;
1476 unsigned int e;
1478 size = OBJECT_SIZE (order);
1479 e = 0;
1480 while (size % 2 == 0)
1482 e++;
1483 size >>= 1;
1486 inv = size;
1487 while (inv * size != 1)
1488 inv = inv * (2 - inv*size);
1490 DIV_MULT (order) = inv;
1491 DIV_SHIFT (order) = e;
1494 /* Initialize the ggc-mmap allocator. */
1495 void
1496 init_ggc (void)
1498 unsigned order;
1500 G.pagesize = getpagesize();
1501 G.lg_pagesize = exact_log2 (G.pagesize);
1503 #ifdef HAVE_MMAP_DEV_ZERO
1504 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1505 if (G.dev_zero_fd == -1)
1506 internal_error ("open /dev/zero: %m");
1507 #endif
1509 #if 0
1510 G.debug_file = fopen ("ggc-mmap.debug", "w");
1511 #else
1512 G.debug_file = stdout;
1513 #endif
1515 #ifdef USING_MMAP
1516 /* StunOS has an amazing off-by-one error for the first mmap allocation
1517 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1518 believe, is an unaligned page allocation, which would cause us to
1519 hork badly if we tried to use it. */
1521 char *p = alloc_anon (NULL, G.pagesize);
1522 struct page_entry *e;
1523 if ((size_t)p & (G.pagesize - 1))
1525 /* How losing. Discard this one and try another. If we still
1526 can't get something useful, give up. */
1528 p = alloc_anon (NULL, G.pagesize);
1529 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1532 /* We have a good page, might as well hold onto it... */
1533 e = XCNEW (struct page_entry);
1534 e->bytes = G.pagesize;
1535 e->page = p;
1536 e->next = G.free_pages;
1537 G.free_pages = e;
1539 #endif
1541 /* Initialize the object size table. */
1542 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1543 object_size_table[order] = (size_t) 1 << order;
1544 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1546 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1548 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1549 so that we're sure of getting aligned memory. */
1550 s = ROUND_UP (s, MAX_ALIGNMENT);
1551 object_size_table[order] = s;
1554 /* Initialize the objects-per-page and inverse tables. */
1555 for (order = 0; order < NUM_ORDERS; ++order)
1557 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1558 if (objects_per_page_table[order] == 0)
1559 objects_per_page_table[order] = 1;
1560 compute_inverse (order);
1563 /* Reset the size_lookup array to put appropriately sized objects in
1564 the special orders. All objects bigger than the previous power
1565 of two, but no greater than the special size, should go in the
1566 new order. */
1567 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1569 int o;
1570 int i;
1572 i = OBJECT_SIZE (order);
1573 if (i >= NUM_SIZE_LOOKUP)
1574 continue;
1576 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1577 size_lookup[i] = order;
1580 G.depth_in_use = 0;
1581 G.depth_max = 10;
1582 G.depth = XNEWVEC (unsigned int, G.depth_max);
1584 G.by_depth_in_use = 0;
1585 G.by_depth_max = INITIAL_PTE_COUNT;
1586 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1587 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1590 /* Start a new GGC zone. */
1592 struct alloc_zone *
1593 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1595 return NULL;
1598 /* Destroy a GGC zone. */
1599 void
1600 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1604 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1605 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1607 static void
1608 ggc_recalculate_in_use_p (page_entry *p)
1610 unsigned int i;
1611 size_t num_objects;
1613 /* Because the past-the-end bit in in_use_p is always set, we
1614 pretend there is one additional object. */
1615 num_objects = OBJECTS_IN_PAGE (p) + 1;
1617 /* Reset the free object count. */
1618 p->num_free_objects = num_objects;
1620 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1621 for (i = 0;
1622 i < CEIL (BITMAP_SIZE (num_objects),
1623 sizeof (*p->in_use_p));
1624 ++i)
1626 unsigned long j;
1628 /* Something is in use if it is marked, or if it was in use in a
1629 context further down the context stack. */
1630 p->in_use_p[i] |= save_in_use_p (p)[i];
1632 /* Decrement the free object count for every object allocated. */
1633 for (j = p->in_use_p[i]; j; j >>= 1)
1634 p->num_free_objects -= (j & 1);
1637 gcc_assert (p->num_free_objects < num_objects);
1640 /* Unmark all objects. */
1642 static void
1643 clear_marks (void)
1645 unsigned order;
1647 for (order = 2; order < NUM_ORDERS; order++)
1649 page_entry *p;
1651 for (p = G.pages[order]; p != NULL; p = p->next)
1653 size_t num_objects = OBJECTS_IN_PAGE (p);
1654 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1656 /* The data should be page-aligned. */
1657 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1659 /* Pages that aren't in the topmost context are not collected;
1660 nevertheless, we need their in-use bit vectors to store GC
1661 marks. So, back them up first. */
1662 if (p->context_depth < G.context_depth)
1664 if (! save_in_use_p (p))
1665 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1666 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1669 /* Reset reset the number of free objects and clear the
1670 in-use bits. These will be adjusted by mark_obj. */
1671 p->num_free_objects = num_objects;
1672 memset (p->in_use_p, 0, bitmap_size);
1674 /* Make sure the one-past-the-end bit is always set. */
1675 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1676 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1681 /* Free all empty pages. Partially empty pages need no attention
1682 because the `mark' bit doubles as an `unused' bit. */
1684 static void
1685 sweep_pages (void)
1687 unsigned order;
1689 for (order = 2; order < NUM_ORDERS; order++)
1691 /* The last page-entry to consider, regardless of entries
1692 placed at the end of the list. */
1693 page_entry * const last = G.page_tails[order];
1695 size_t num_objects;
1696 size_t live_objects;
1697 page_entry *p, *previous;
1698 int done;
1700 p = G.pages[order];
1701 if (p == NULL)
1702 continue;
1704 previous = NULL;
1707 page_entry *next = p->next;
1709 /* Loop until all entries have been examined. */
1710 done = (p == last);
1712 num_objects = OBJECTS_IN_PAGE (p);
1714 /* Add all live objects on this page to the count of
1715 allocated memory. */
1716 live_objects = num_objects - p->num_free_objects;
1718 G.allocated += OBJECT_SIZE (order) * live_objects;
1720 /* Only objects on pages in the topmost context should get
1721 collected. */
1722 if (p->context_depth < G.context_depth)
1725 /* Remove the page if it's empty. */
1726 else if (live_objects == 0)
1728 /* If P was the first page in the list, then NEXT
1729 becomes the new first page in the list, otherwise
1730 splice P out of the forward pointers. */
1731 if (! previous)
1732 G.pages[order] = next;
1733 else
1734 previous->next = next;
1736 /* Splice P out of the back pointers too. */
1737 if (next)
1738 next->prev = previous;
1740 /* Are we removing the last element? */
1741 if (p == G.page_tails[order])
1742 G.page_tails[order] = previous;
1743 free_page (p);
1744 p = previous;
1747 /* If the page is full, move it to the end. */
1748 else if (p->num_free_objects == 0)
1750 /* Don't move it if it's already at the end. */
1751 if (p != G.page_tails[order])
1753 /* Move p to the end of the list. */
1754 p->next = NULL;
1755 p->prev = G.page_tails[order];
1756 G.page_tails[order]->next = p;
1758 /* Update the tail pointer... */
1759 G.page_tails[order] = p;
1761 /* ... and the head pointer, if necessary. */
1762 if (! previous)
1763 G.pages[order] = next;
1764 else
1765 previous->next = next;
1767 /* And update the backpointer in NEXT if necessary. */
1768 if (next)
1769 next->prev = previous;
1771 p = previous;
1775 /* If we've fallen through to here, it's a page in the
1776 topmost context that is neither full nor empty. Such a
1777 page must precede pages at lesser context depth in the
1778 list, so move it to the head. */
1779 else if (p != G.pages[order])
1781 previous->next = p->next;
1783 /* Update the backchain in the next node if it exists. */
1784 if (p->next)
1785 p->next->prev = previous;
1787 /* Move P to the head of the list. */
1788 p->next = G.pages[order];
1789 p->prev = NULL;
1790 G.pages[order]->prev = p;
1792 /* Update the head pointer. */
1793 G.pages[order] = p;
1795 /* Are we moving the last element? */
1796 if (G.page_tails[order] == p)
1797 G.page_tails[order] = previous;
1798 p = previous;
1801 previous = p;
1802 p = next;
1804 while (! done);
1806 /* Now, restore the in_use_p vectors for any pages from contexts
1807 other than the current one. */
1808 for (p = G.pages[order]; p; p = p->next)
1809 if (p->context_depth != G.context_depth)
1810 ggc_recalculate_in_use_p (p);
1814 #ifdef ENABLE_GC_CHECKING
1815 /* Clobber all free objects. */
1817 static void
1818 poison_pages (void)
1820 unsigned order;
1822 for (order = 2; order < NUM_ORDERS; order++)
1824 size_t size = OBJECT_SIZE (order);
1825 page_entry *p;
1827 for (p = G.pages[order]; p != NULL; p = p->next)
1829 size_t num_objects;
1830 size_t i;
1832 if (p->context_depth != G.context_depth)
1833 /* Since we don't do any collection for pages in pushed
1834 contexts, there's no need to do any poisoning. And
1835 besides, the IN_USE_P array isn't valid until we pop
1836 contexts. */
1837 continue;
1839 num_objects = OBJECTS_IN_PAGE (p);
1840 for (i = 0; i < num_objects; i++)
1842 size_t word, bit;
1843 word = i / HOST_BITS_PER_LONG;
1844 bit = i % HOST_BITS_PER_LONG;
1845 if (((p->in_use_p[word] >> bit) & 1) == 0)
1847 char *object = p->page + i * size;
1849 /* Keep poison-by-write when we expect to use Valgrind,
1850 so the exact same memory semantics is kept, in case
1851 there are memory errors. We override this request
1852 below. */
1853 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1854 size));
1855 memset (object, 0xa5, size);
1857 /* Drop the handle to avoid handle leak. */
1858 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1864 #else
1865 #define poison_pages()
1866 #endif
1868 #ifdef ENABLE_GC_ALWAYS_COLLECT
1869 /* Validate that the reportedly free objects actually are. */
1871 static void
1872 validate_free_objects (void)
1874 struct free_object *f, *next, *still_free = NULL;
1876 for (f = G.free_object_list; f ; f = next)
1878 page_entry *pe = lookup_page_table_entry (f->object);
1879 size_t bit, word;
1881 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1882 word = bit / HOST_BITS_PER_LONG;
1883 bit = bit % HOST_BITS_PER_LONG;
1884 next = f->next;
1886 /* Make certain it isn't visible from any root. Notice that we
1887 do this check before sweep_pages merges save_in_use_p. */
1888 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1890 /* If the object comes from an outer context, then retain the
1891 free_object entry, so that we can verify that the address
1892 isn't live on the stack in some outer context. */
1893 if (pe->context_depth != G.context_depth)
1895 f->next = still_free;
1896 still_free = f;
1898 else
1899 free (f);
1902 G.free_object_list = still_free;
1904 #else
1905 #define validate_free_objects()
1906 #endif
1908 /* Top level mark-and-sweep routine. */
1910 void
1911 ggc_collect (void)
1913 /* Avoid frequent unnecessary work by skipping collection if the
1914 total allocations haven't expanded much since the last
1915 collection. */
1916 float allocated_last_gc =
1917 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1919 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1921 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1922 return;
1924 timevar_push (TV_GC);
1925 if (!quiet_flag)
1926 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1927 if (GGC_DEBUG_LEVEL >= 2)
1928 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1930 /* Zero the total allocated bytes. This will be recalculated in the
1931 sweep phase. */
1932 G.allocated = 0;
1934 /* Release the pages we freed the last time we collected, but didn't
1935 reuse in the interim. */
1936 release_pages ();
1938 /* Indicate that we've seen collections at this context depth. */
1939 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1941 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1943 clear_marks ();
1944 ggc_mark_roots ();
1945 #ifdef GATHER_STATISTICS
1946 ggc_prune_overhead_list ();
1947 #endif
1948 poison_pages ();
1949 validate_free_objects ();
1950 sweep_pages ();
1952 G.allocated_last_gc = G.allocated;
1954 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1956 timevar_pop (TV_GC);
1958 if (!quiet_flag)
1959 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1960 if (GGC_DEBUG_LEVEL >= 2)
1961 fprintf (G.debug_file, "END COLLECTING\n");
1964 /* Print allocation statistics. */
1965 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1966 ? (x) \
1967 : ((x) < 1024*1024*10 \
1968 ? (x) / 1024 \
1969 : (x) / (1024*1024))))
1970 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1972 void
1973 ggc_print_statistics (void)
1975 struct ggc_statistics stats;
1976 unsigned int i;
1977 size_t total_overhead = 0;
1979 /* Clear the statistics. */
1980 memset (&stats, 0, sizeof (stats));
1982 /* Make sure collection will really occur. */
1983 G.allocated_last_gc = 0;
1985 /* Collect and print the statistics common across collectors. */
1986 ggc_print_common_statistics (stderr, &stats);
1988 /* Release free pages so that we will not count the bytes allocated
1989 there as part of the total allocated memory. */
1990 release_pages ();
1992 /* Collect some information about the various sizes of
1993 allocation. */
1994 fprintf (stderr,
1995 "Memory still allocated at the end of the compilation process\n");
1996 fprintf (stderr, "%-5s %10s %10s %10s\n",
1997 "Size", "Allocated", "Used", "Overhead");
1998 for (i = 0; i < NUM_ORDERS; ++i)
2000 page_entry *p;
2001 size_t allocated;
2002 size_t in_use;
2003 size_t overhead;
2005 /* Skip empty entries. */
2006 if (!G.pages[i])
2007 continue;
2009 overhead = allocated = in_use = 0;
2011 /* Figure out the total number of bytes allocated for objects of
2012 this size, and how many of them are actually in use. Also figure
2013 out how much memory the page table is using. */
2014 for (p = G.pages[i]; p; p = p->next)
2016 allocated += p->bytes;
2017 in_use +=
2018 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2020 overhead += (sizeof (page_entry) - sizeof (long)
2021 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2023 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2024 (unsigned long) OBJECT_SIZE (i),
2025 SCALE (allocated), STAT_LABEL (allocated),
2026 SCALE (in_use), STAT_LABEL (in_use),
2027 SCALE (overhead), STAT_LABEL (overhead));
2028 total_overhead += overhead;
2030 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2031 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2032 SCALE (G.allocated), STAT_LABEL(G.allocated),
2033 SCALE (total_overhead), STAT_LABEL (total_overhead));
2035 #ifdef GATHER_STATISTICS
2037 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2039 fprintf (stderr, "Total Overhead: %10lld\n",
2040 G.stats.total_overhead);
2041 fprintf (stderr, "Total Allocated: %10lld\n",
2042 G.stats.total_allocated);
2044 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2045 G.stats.total_overhead_under32);
2046 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2047 G.stats.total_allocated_under32);
2048 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2049 G.stats.total_overhead_under64);
2050 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2051 G.stats.total_allocated_under64);
2052 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2053 G.stats.total_overhead_under128);
2054 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2055 G.stats.total_allocated_under128);
2057 for (i = 0; i < NUM_ORDERS; i++)
2058 if (G.stats.total_allocated_per_order[i])
2060 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2061 (unsigned long) OBJECT_SIZE (i),
2062 G.stats.total_overhead_per_order[i]);
2063 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2064 (unsigned long) OBJECT_SIZE (i),
2065 G.stats.total_allocated_per_order[i]);
2068 #endif
2071 struct ggc_pch_ondisk
2073 unsigned totals[NUM_ORDERS];
2076 struct ggc_pch_data
2078 struct ggc_pch_ondisk d;
2079 size_t base[NUM_ORDERS];
2080 size_t written[NUM_ORDERS];
2083 struct ggc_pch_data *
2084 init_ggc_pch (void)
2086 return XCNEW (struct ggc_pch_data);
2089 void
2090 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2091 size_t size, bool is_string ATTRIBUTE_UNUSED,
2092 enum gt_types_enum type ATTRIBUTE_UNUSED)
2094 unsigned order;
2096 if (size < NUM_SIZE_LOOKUP)
2097 order = size_lookup[size];
2098 else
2100 order = 10;
2101 while (size > OBJECT_SIZE (order))
2102 order++;
2105 d->d.totals[order]++;
2108 size_t
2109 ggc_pch_total_size (struct ggc_pch_data *d)
2111 size_t a = 0;
2112 unsigned i;
2114 for (i = 0; i < NUM_ORDERS; i++)
2115 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2116 return a;
2119 void
2120 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2122 size_t a = (size_t) base;
2123 unsigned i;
2125 for (i = 0; i < NUM_ORDERS; i++)
2127 d->base[i] = a;
2128 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2133 char *
2134 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2135 size_t size, bool is_string ATTRIBUTE_UNUSED,
2136 enum gt_types_enum type ATTRIBUTE_UNUSED)
2138 unsigned order;
2139 char *result;
2141 if (size < NUM_SIZE_LOOKUP)
2142 order = size_lookup[size];
2143 else
2145 order = 10;
2146 while (size > OBJECT_SIZE (order))
2147 order++;
2150 result = (char *) d->base[order];
2151 d->base[order] += OBJECT_SIZE (order);
2152 return result;
2155 void
2156 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2157 FILE *f ATTRIBUTE_UNUSED)
2159 /* Nothing to do. */
2162 void
2163 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2164 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2165 size_t size, bool is_string ATTRIBUTE_UNUSED)
2167 unsigned order;
2168 static const char emptyBytes[256] = { 0 };
2170 if (size < NUM_SIZE_LOOKUP)
2171 order = size_lookup[size];
2172 else
2174 order = 10;
2175 while (size > OBJECT_SIZE (order))
2176 order++;
2179 if (fwrite (x, size, 1, f) != 1)
2180 fatal_error ("can't write PCH file: %m");
2182 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2183 object out to OBJECT_SIZE(order). This happens for strings. */
2185 if (size != OBJECT_SIZE (order))
2187 unsigned padding = OBJECT_SIZE(order) - size;
2189 /* To speed small writes, we use a nulled-out array that's larger
2190 than most padding requests as the source for our null bytes. This
2191 permits us to do the padding with fwrite() rather than fseek(), and
2192 limits the chance the OS may try to flush any outstanding writes. */
2193 if (padding <= sizeof(emptyBytes))
2195 if (fwrite (emptyBytes, 1, padding, f) != padding)
2196 fatal_error ("can't write PCH file");
2198 else
2200 /* Larger than our buffer? Just default to fseek. */
2201 if (fseek (f, padding, SEEK_CUR) != 0)
2202 fatal_error ("can't write PCH file");
2206 d->written[order]++;
2207 if (d->written[order] == d->d.totals[order]
2208 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2209 G.pagesize),
2210 SEEK_CUR) != 0)
2211 fatal_error ("can't write PCH file: %m");
2214 void
2215 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2217 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2218 fatal_error ("can't write PCH file: %m");
2219 free (d);
2222 /* Move the PCH PTE entries just added to the end of by_depth, to the
2223 front. */
2225 static void
2226 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2228 unsigned i;
2230 /* First, we swap the new entries to the front of the varrays. */
2231 page_entry **new_by_depth;
2232 unsigned long **new_save_in_use;
2234 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2235 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2237 memcpy (&new_by_depth[0],
2238 &G.by_depth[count_old_page_tables],
2239 count_new_page_tables * sizeof (void *));
2240 memcpy (&new_by_depth[count_new_page_tables],
2241 &G.by_depth[0],
2242 count_old_page_tables * sizeof (void *));
2243 memcpy (&new_save_in_use[0],
2244 &G.save_in_use[count_old_page_tables],
2245 count_new_page_tables * sizeof (void *));
2246 memcpy (&new_save_in_use[count_new_page_tables],
2247 &G.save_in_use[0],
2248 count_old_page_tables * sizeof (void *));
2250 free (G.by_depth);
2251 free (G.save_in_use);
2253 G.by_depth = new_by_depth;
2254 G.save_in_use = new_save_in_use;
2256 /* Now update all the index_by_depth fields. */
2257 for (i = G.by_depth_in_use; i > 0; --i)
2259 page_entry *p = G.by_depth[i-1];
2260 p->index_by_depth = i-1;
2263 /* And last, we update the depth pointers in G.depth. The first
2264 entry is already 0, and context 0 entries always start at index
2265 0, so there is nothing to update in the first slot. We need a
2266 second slot, only if we have old ptes, and if we do, they start
2267 at index count_new_page_tables. */
2268 if (count_old_page_tables)
2269 push_depth (count_new_page_tables);
2272 void
2273 ggc_pch_read (FILE *f, void *addr)
2275 struct ggc_pch_ondisk d;
2276 unsigned i;
2277 char *offs = (char *) addr;
2278 unsigned long count_old_page_tables;
2279 unsigned long count_new_page_tables;
2281 count_old_page_tables = G.by_depth_in_use;
2283 /* We've just read in a PCH file. So, every object that used to be
2284 allocated is now free. */
2285 clear_marks ();
2286 #ifdef ENABLE_GC_CHECKING
2287 poison_pages ();
2288 #endif
2289 /* Since we free all the allocated objects, the free list becomes
2290 useless. Validate it now, which will also clear it. */
2291 validate_free_objects();
2293 /* No object read from a PCH file should ever be freed. So, set the
2294 context depth to 1, and set the depth of all the currently-allocated
2295 pages to be 1 too. PCH pages will have depth 0. */
2296 gcc_assert (!G.context_depth);
2297 G.context_depth = 1;
2298 for (i = 0; i < NUM_ORDERS; i++)
2300 page_entry *p;
2301 for (p = G.pages[i]; p != NULL; p = p->next)
2302 p->context_depth = G.context_depth;
2305 /* Allocate the appropriate page-table entries for the pages read from
2306 the PCH file. */
2307 if (fread (&d, sizeof (d), 1, f) != 1)
2308 fatal_error ("can't read PCH file: %m");
2310 for (i = 0; i < NUM_ORDERS; i++)
2312 struct page_entry *entry;
2313 char *pte;
2314 size_t bytes;
2315 size_t num_objs;
2316 size_t j;
2318 if (d.totals[i] == 0)
2319 continue;
2321 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2322 num_objs = bytes / OBJECT_SIZE (i);
2323 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2324 - sizeof (long)
2325 + BITMAP_SIZE (num_objs + 1)));
2326 entry->bytes = bytes;
2327 entry->page = offs;
2328 entry->context_depth = 0;
2329 offs += bytes;
2330 entry->num_free_objects = 0;
2331 entry->order = i;
2333 for (j = 0;
2334 j + HOST_BITS_PER_LONG <= num_objs + 1;
2335 j += HOST_BITS_PER_LONG)
2336 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2337 for (; j < num_objs + 1; j++)
2338 entry->in_use_p[j / HOST_BITS_PER_LONG]
2339 |= 1L << (j % HOST_BITS_PER_LONG);
2341 for (pte = entry->page;
2342 pte < entry->page + entry->bytes;
2343 pte += G.pagesize)
2344 set_page_table_entry (pte, entry);
2346 if (G.page_tails[i] != NULL)
2347 G.page_tails[i]->next = entry;
2348 else
2349 G.pages[i] = entry;
2350 G.page_tails[i] = entry;
2352 /* We start off by just adding all the new information to the
2353 end of the varrays, later, we will move the new information
2354 to the front of the varrays, as the PCH page tables are at
2355 context 0. */
2356 push_by_depth (entry, 0);
2359 /* Now, we update the various data structures that speed page table
2360 handling. */
2361 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2363 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2365 /* Update the statistics. */
2366 G.allocated = G.allocated_last_gc = offs - (char *)addr;