Remove outermost loop parameter.
[official-gcc/graphite-test-results.git] / gcc / ggc-page.c
blob67483f1934933b4991c4b6d79582d854924542da
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
42 # include <sys/mman.h>
43 # ifndef MAP_FAILED
44 # define MAP_FAILED -1
45 # endif
46 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
47 # define MAP_ANONYMOUS MAP_ANON
48 # endif
49 # define USING_MMAP
51 #endif
53 #ifdef HAVE_MMAP_DEV_ZERO
55 # include <sys/mman.h>
56 # ifndef MAP_FAILED
57 # define MAP_FAILED -1
58 # endif
59 # define USING_MMAP
61 #endif
63 #ifndef USING_MMAP
64 #define USING_MALLOC_PAGE_GROUPS
65 #endif
67 /* Strategy:
69 This garbage-collecting allocator allocates objects on one of a set
70 of pages. Each page can allocate objects of a single size only;
71 available sizes are powers of two starting at four bytes. The size
72 of an allocation request is rounded up to the next power of two
73 (`order'), and satisfied from the appropriate page.
75 Each page is recorded in a page-entry, which also maintains an
76 in-use bitmap of object positions on the page. This allows the
77 allocation state of a particular object to be flipped without
78 touching the page itself.
80 Each page-entry also has a context depth, which is used to track
81 pushing and popping of allocation contexts. Only objects allocated
82 in the current (highest-numbered) context may be collected.
84 Page entries are arranged in an array of singly-linked lists. The
85 array is indexed by the allocation size, in bits, of the pages on
86 it; i.e. all pages on a list allocate objects of the same size.
87 Pages are ordered on the list such that all non-full pages precede
88 all full pages, with non-full pages arranged in order of decreasing
89 context depth.
91 Empty pages (of all orders) are kept on a single page cache list,
92 and are considered first when new pages are required; they are
93 deallocated at the start of the next collection if they haven't
94 been recycled by then. */
96 /* Define GGC_DEBUG_LEVEL to print debugging information.
97 0: No debugging output.
98 1: GC statistics only.
99 2: Page-entry allocations/deallocations as well.
100 3: Object allocations as well.
101 4: Object marks as well. */
102 #define GGC_DEBUG_LEVEL (0)
104 #ifndef HOST_BITS_PER_PTR
105 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
106 #endif
109 /* A two-level tree is used to look up the page-entry for a given
110 pointer. Two chunks of the pointer's bits are extracted to index
111 the first and second levels of the tree, as follows:
113 HOST_PAGE_SIZE_BITS
114 32 | |
115 msb +----------------+----+------+------+ lsb
116 | | |
117 PAGE_L1_BITS |
119 PAGE_L2_BITS
121 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
122 pages are aligned on system page boundaries. The next most
123 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
124 index values in the lookup table, respectively.
126 For 32-bit architectures and the settings below, there are no
127 leftover bits. For architectures with wider pointers, the lookup
128 tree points to a list of pages, which must be scanned to find the
129 correct one. */
131 #define PAGE_L1_BITS (8)
132 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
133 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
134 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
136 #define LOOKUP_L1(p) \
137 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
139 #define LOOKUP_L2(p) \
140 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
142 /* The number of objects per allocation page, for objects on a page of
143 the indicated ORDER. */
144 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
146 /* The number of objects in P. */
147 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
149 /* The size of an object on a page of the indicated ORDER. */
150 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
152 /* For speed, we avoid doing a general integer divide to locate the
153 offset in the allocation bitmap, by precalculating numbers M, S
154 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
155 within the page which is evenly divisible by the object size Z. */
156 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
157 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
158 #define OFFSET_TO_BIT(OFFSET, ORDER) \
159 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
161 /* We use this structure to determine the alignment required for
162 allocations. For power-of-two sized allocations, that's not a
163 problem, but it does matter for odd-sized allocations.
164 We do not care about alignment for floating-point types. */
166 struct max_alignment {
167 char c;
168 union {
169 HOST_WIDEST_INT i;
170 void *p;
171 } u;
174 /* The biggest alignment required. */
176 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
179 /* The number of extra orders, not corresponding to power-of-two sized
180 objects. */
182 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
184 #define RTL_SIZE(NSLOTS) \
185 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
187 #define TREE_EXP_SIZE(OPS) \
188 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
190 /* The Ith entry is the maximum size of an object to be stored in the
191 Ith extra order. Adding a new entry to this array is the *only*
192 thing you need to do to add a new special allocation size. */
194 static const size_t extra_order_size_table[] = {
195 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
196 There are a lot of structures with these sizes and explicitly
197 listing them risks orders being dropped because they changed size. */
198 MAX_ALIGNMENT * 3,
199 MAX_ALIGNMENT * 5,
200 MAX_ALIGNMENT * 6,
201 MAX_ALIGNMENT * 7,
202 MAX_ALIGNMENT * 9,
203 MAX_ALIGNMENT * 10,
204 MAX_ALIGNMENT * 11,
205 MAX_ALIGNMENT * 12,
206 MAX_ALIGNMENT * 13,
207 MAX_ALIGNMENT * 14,
208 MAX_ALIGNMENT * 15,
209 sizeof (struct tree_decl_non_common),
210 sizeof (struct tree_field_decl),
211 sizeof (struct tree_parm_decl),
212 sizeof (struct tree_var_decl),
213 sizeof (struct tree_type),
214 sizeof (struct function),
215 sizeof (struct basic_block_def),
216 sizeof (struct cgraph_node),
217 sizeof (struct loop),
220 /* The total number of orders. */
222 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
224 /* Compute the smallest nonnegative number which when added to X gives
225 a multiple of F. */
227 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
229 /* Compute the smallest multiple of F that is >= X. */
231 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
233 /* The Ith entry is the number of objects on a page or order I. */
235 static unsigned objects_per_page_table[NUM_ORDERS];
237 /* The Ith entry is the size of an object on a page of order I. */
239 static size_t object_size_table[NUM_ORDERS];
241 /* The Ith entry is a pair of numbers (mult, shift) such that
242 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
243 for all k evenly divisible by OBJECT_SIZE(I). */
245 static struct
247 size_t mult;
248 unsigned int shift;
250 inverse_table[NUM_ORDERS];
252 /* A page_entry records the status of an allocation page. This
253 structure is dynamically sized to fit the bitmap in_use_p. */
254 typedef struct page_entry
256 /* The next page-entry with objects of the same size, or NULL if
257 this is the last page-entry. */
258 struct page_entry *next;
260 /* The previous page-entry with objects of the same size, or NULL if
261 this is the first page-entry. The PREV pointer exists solely to
262 keep the cost of ggc_free manageable. */
263 struct page_entry *prev;
265 /* The number of bytes allocated. (This will always be a multiple
266 of the host system page size.) */
267 size_t bytes;
269 /* The address at which the memory is allocated. */
270 char *page;
272 #ifdef USING_MALLOC_PAGE_GROUPS
273 /* Back pointer to the page group this page came from. */
274 struct page_group *group;
275 #endif
277 /* This is the index in the by_depth varray where this page table
278 can be found. */
279 unsigned long index_by_depth;
281 /* Context depth of this page. */
282 unsigned short context_depth;
284 /* The number of free objects remaining on this page. */
285 unsigned short num_free_objects;
287 /* A likely candidate for the bit position of a free object for the
288 next allocation from this page. */
289 unsigned short next_bit_hint;
291 /* The lg of size of objects allocated from this page. */
292 unsigned char order;
294 /* A bit vector indicating whether or not objects are in use. The
295 Nth bit is one if the Nth object on this page is allocated. This
296 array is dynamically sized. */
297 unsigned long in_use_p[1];
298 } page_entry;
300 #ifdef USING_MALLOC_PAGE_GROUPS
301 /* A page_group describes a large allocation from malloc, from which
302 we parcel out aligned pages. */
303 typedef struct page_group
305 /* A linked list of all extant page groups. */
306 struct page_group *next;
308 /* The address we received from malloc. */
309 char *allocation;
311 /* The size of the block. */
312 size_t alloc_size;
314 /* A bitmask of pages in use. */
315 unsigned int in_use;
316 } page_group;
317 #endif
319 #if HOST_BITS_PER_PTR <= 32
321 /* On 32-bit hosts, we use a two level page table, as pictured above. */
322 typedef page_entry **page_table[PAGE_L1_SIZE];
324 #else
326 /* On 64-bit hosts, we use the same two level page tables plus a linked
327 list that disambiguates the top 32-bits. There will almost always be
328 exactly one entry in the list. */
329 typedef struct page_table_chain
331 struct page_table_chain *next;
332 size_t high_bits;
333 page_entry **table[PAGE_L1_SIZE];
334 } *page_table;
336 #endif
338 #ifdef ENABLE_GC_ALWAYS_COLLECT
339 /* List of free objects to be verified as actually free on the
340 next collection. */
341 struct free_object
343 void *object;
344 struct free_object *next;
346 #endif
348 /* The rest of the global variables. */
349 static struct globals
351 /* The Nth element in this array is a page with objects of size 2^N.
352 If there are any pages with free objects, they will be at the
353 head of the list. NULL if there are no page-entries for this
354 object size. */
355 page_entry *pages[NUM_ORDERS];
357 /* The Nth element in this array is the last page with objects of
358 size 2^N. NULL if there are no page-entries for this object
359 size. */
360 page_entry *page_tails[NUM_ORDERS];
362 /* Lookup table for associating allocation pages with object addresses. */
363 page_table lookup;
365 /* The system's page size. */
366 size_t pagesize;
367 size_t lg_pagesize;
369 /* Bytes currently allocated. */
370 size_t allocated;
372 /* Bytes currently allocated at the end of the last collection. */
373 size_t allocated_last_gc;
375 /* Total amount of memory mapped. */
376 size_t bytes_mapped;
378 /* Bit N set if any allocations have been done at context depth N. */
379 unsigned long context_depth_allocations;
381 /* Bit N set if any collections have been done at context depth N. */
382 unsigned long context_depth_collections;
384 /* The current depth in the context stack. */
385 unsigned short context_depth;
387 /* A file descriptor open to /dev/zero for reading. */
388 #if defined (HAVE_MMAP_DEV_ZERO)
389 int dev_zero_fd;
390 #endif
392 /* A cache of free system pages. */
393 page_entry *free_pages;
395 #ifdef USING_MALLOC_PAGE_GROUPS
396 page_group *page_groups;
397 #endif
399 /* The file descriptor for debugging output. */
400 FILE *debug_file;
402 /* Current number of elements in use in depth below. */
403 unsigned int depth_in_use;
405 /* Maximum number of elements that can be used before resizing. */
406 unsigned int depth_max;
408 /* Each element of this array is an index in by_depth where the given
409 depth starts. This structure is indexed by that given depth we
410 are interested in. */
411 unsigned int *depth;
413 /* Current number of elements in use in by_depth below. */
414 unsigned int by_depth_in_use;
416 /* Maximum number of elements that can be used before resizing. */
417 unsigned int by_depth_max;
419 /* Each element of this array is a pointer to a page_entry, all
420 page_entries can be found in here by increasing depth.
421 index_by_depth in the page_entry is the index into this data
422 structure where that page_entry can be found. This is used to
423 speed up finding all page_entries at a particular depth. */
424 page_entry **by_depth;
426 /* Each element is a pointer to the saved in_use_p bits, if any,
427 zero otherwise. We allocate them all together, to enable a
428 better runtime data access pattern. */
429 unsigned long **save_in_use;
431 #ifdef ENABLE_GC_ALWAYS_COLLECT
432 /* List of free objects to be verified as actually free on the
433 next collection. */
434 struct free_object *free_object_list;
435 #endif
437 #ifdef GATHER_STATISTICS
438 struct
440 /* Total memory allocated with ggc_alloc. */
441 unsigned long long total_allocated;
442 /* Total overhead for memory to be allocated with ggc_alloc. */
443 unsigned long long total_overhead;
445 /* Total allocations and overhead for sizes less than 32, 64 and 128.
446 These sizes are interesting because they are typical cache line
447 sizes. */
449 unsigned long long total_allocated_under32;
450 unsigned long long total_overhead_under32;
452 unsigned long long total_allocated_under64;
453 unsigned long long total_overhead_under64;
455 unsigned long long total_allocated_under128;
456 unsigned long long total_overhead_under128;
458 /* The allocations for each of the allocation orders. */
459 unsigned long long total_allocated_per_order[NUM_ORDERS];
461 /* The overhead for each of the allocation orders. */
462 unsigned long long total_overhead_per_order[NUM_ORDERS];
463 } stats;
464 #endif
465 } G;
467 /* The size in bytes required to maintain a bitmap for the objects
468 on a page-entry. */
469 #define BITMAP_SIZE(Num_objects) \
470 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
472 /* Allocate pages in chunks of this size, to throttle calls to memory
473 allocation routines. The first page is used, the rest go onto the
474 free list. This cannot be larger than HOST_BITS_PER_INT for the
475 in_use bitmask for page_group. Hosts that need a different value
476 can override this by defining GGC_QUIRE_SIZE explicitly. */
477 #ifndef GGC_QUIRE_SIZE
478 # ifdef USING_MMAP
479 # define GGC_QUIRE_SIZE 256
480 # else
481 # define GGC_QUIRE_SIZE 16
482 # endif
483 #endif
485 /* Initial guess as to how many page table entries we might need. */
486 #define INITIAL_PTE_COUNT 128
488 static int ggc_allocated_p (const void *);
489 static page_entry *lookup_page_table_entry (const void *);
490 static void set_page_table_entry (void *, page_entry *);
491 #ifdef USING_MMAP
492 static char *alloc_anon (char *, size_t);
493 #endif
494 #ifdef USING_MALLOC_PAGE_GROUPS
495 static size_t page_group_index (char *, char *);
496 static void set_page_group_in_use (page_group *, char *);
497 static void clear_page_group_in_use (page_group *, char *);
498 #endif
499 static struct page_entry * alloc_page (unsigned);
500 static void free_page (struct page_entry *);
501 static void release_pages (void);
502 static void clear_marks (void);
503 static void sweep_pages (void);
504 static void ggc_recalculate_in_use_p (page_entry *);
505 static void compute_inverse (unsigned);
506 static inline void adjust_depth (void);
507 static void move_ptes_to_front (int, int);
509 void debug_print_page_list (int);
510 static void push_depth (unsigned int);
511 static void push_by_depth (page_entry *, unsigned long *);
513 /* Push an entry onto G.depth. */
515 inline static void
516 push_depth (unsigned int i)
518 if (G.depth_in_use >= G.depth_max)
520 G.depth_max *= 2;
521 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
523 G.depth[G.depth_in_use++] = i;
526 /* Push an entry onto G.by_depth and G.save_in_use. */
528 inline static void
529 push_by_depth (page_entry *p, unsigned long *s)
531 if (G.by_depth_in_use >= G.by_depth_max)
533 G.by_depth_max *= 2;
534 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
535 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
536 G.by_depth_max);
538 G.by_depth[G.by_depth_in_use] = p;
539 G.save_in_use[G.by_depth_in_use++] = s;
542 #if (GCC_VERSION < 3001)
543 #define prefetch(X) ((void) X)
544 #else
545 #define prefetch(X) __builtin_prefetch (X)
546 #endif
548 #define save_in_use_p_i(__i) \
549 (G.save_in_use[__i])
550 #define save_in_use_p(__p) \
551 (save_in_use_p_i (__p->index_by_depth))
553 /* Returns nonzero if P was allocated in GC'able memory. */
555 static inline int
556 ggc_allocated_p (const void *p)
558 page_entry ***base;
559 size_t L1, L2;
561 #if HOST_BITS_PER_PTR <= 32
562 base = &G.lookup[0];
563 #else
564 page_table table = G.lookup;
565 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
566 while (1)
568 if (table == NULL)
569 return 0;
570 if (table->high_bits == high_bits)
571 break;
572 table = table->next;
574 base = &table->table[0];
575 #endif
577 /* Extract the level 1 and 2 indices. */
578 L1 = LOOKUP_L1 (p);
579 L2 = LOOKUP_L2 (p);
581 return base[L1] && base[L1][L2];
584 /* Traverse the page table and find the entry for a page.
585 Die (probably) if the object wasn't allocated via GC. */
587 static inline page_entry *
588 lookup_page_table_entry (const void *p)
590 page_entry ***base;
591 size_t L1, L2;
593 #if HOST_BITS_PER_PTR <= 32
594 base = &G.lookup[0];
595 #else
596 page_table table = G.lookup;
597 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
598 while (table->high_bits != high_bits)
599 table = table->next;
600 base = &table->table[0];
601 #endif
603 /* Extract the level 1 and 2 indices. */
604 L1 = LOOKUP_L1 (p);
605 L2 = LOOKUP_L2 (p);
607 return base[L1][L2];
610 /* Set the page table entry for a page. */
612 static void
613 set_page_table_entry (void *p, page_entry *entry)
615 page_entry ***base;
616 size_t L1, L2;
618 #if HOST_BITS_PER_PTR <= 32
619 base = &G.lookup[0];
620 #else
621 page_table table;
622 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
623 for (table = G.lookup; table; table = table->next)
624 if (table->high_bits == high_bits)
625 goto found;
627 /* Not found -- allocate a new table. */
628 table = XCNEW (struct page_table_chain);
629 table->next = G.lookup;
630 table->high_bits = high_bits;
631 G.lookup = table;
632 found:
633 base = &table->table[0];
634 #endif
636 /* Extract the level 1 and 2 indices. */
637 L1 = LOOKUP_L1 (p);
638 L2 = LOOKUP_L2 (p);
640 if (base[L1] == NULL)
641 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
643 base[L1][L2] = entry;
646 /* Prints the page-entry for object size ORDER, for debugging. */
648 DEBUG_FUNCTION void
649 debug_print_page_list (int order)
651 page_entry *p;
652 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
653 (void *) G.page_tails[order]);
654 p = G.pages[order];
655 while (p != NULL)
657 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
658 p->num_free_objects);
659 p = p->next;
661 printf ("NULL\n");
662 fflush (stdout);
665 #ifdef USING_MMAP
666 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
667 (if non-null). The ifdef structure here is intended to cause a
668 compile error unless exactly one of the HAVE_* is defined. */
670 static inline char *
671 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
673 #ifdef HAVE_MMAP_ANON
674 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
675 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
676 #endif
677 #ifdef HAVE_MMAP_DEV_ZERO
678 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
679 MAP_PRIVATE, G.dev_zero_fd, 0);
680 #endif
682 if (page == (char *) MAP_FAILED)
684 perror ("virtual memory exhausted");
685 exit (FATAL_EXIT_CODE);
688 /* Remember that we allocated this memory. */
689 G.bytes_mapped += size;
691 /* Pretend we don't have access to the allocated pages. We'll enable
692 access to smaller pieces of the area in ggc_alloc. Discard the
693 handle to avoid handle leak. */
694 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
696 return page;
698 #endif
699 #ifdef USING_MALLOC_PAGE_GROUPS
700 /* Compute the index for this page into the page group. */
702 static inline size_t
703 page_group_index (char *allocation, char *page)
705 return (size_t) (page - allocation) >> G.lg_pagesize;
708 /* Set and clear the in_use bit for this page in the page group. */
710 static inline void
711 set_page_group_in_use (page_group *group, char *page)
713 group->in_use |= 1 << page_group_index (group->allocation, page);
716 static inline void
717 clear_page_group_in_use (page_group *group, char *page)
719 group->in_use &= ~(1 << page_group_index (group->allocation, page));
721 #endif
723 /* Allocate a new page for allocating objects of size 2^ORDER,
724 and return an entry for it. The entry is not added to the
725 appropriate page_table list. */
727 static inline struct page_entry *
728 alloc_page (unsigned order)
730 struct page_entry *entry, *p, **pp;
731 char *page;
732 size_t num_objects;
733 size_t bitmap_size;
734 size_t page_entry_size;
735 size_t entry_size;
736 #ifdef USING_MALLOC_PAGE_GROUPS
737 page_group *group;
738 #endif
740 num_objects = OBJECTS_PER_PAGE (order);
741 bitmap_size = BITMAP_SIZE (num_objects + 1);
742 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
743 entry_size = num_objects * OBJECT_SIZE (order);
744 if (entry_size < G.pagesize)
745 entry_size = G.pagesize;
747 entry = NULL;
748 page = NULL;
750 /* Check the list of free pages for one we can use. */
751 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
752 if (p->bytes == entry_size)
753 break;
755 if (p != NULL)
757 /* Recycle the allocated memory from this page ... */
758 *pp = p->next;
759 page = p->page;
761 #ifdef USING_MALLOC_PAGE_GROUPS
762 group = p->group;
763 #endif
765 /* ... and, if possible, the page entry itself. */
766 if (p->order == order)
768 entry = p;
769 memset (entry, 0, page_entry_size);
771 else
772 free (p);
774 #ifdef USING_MMAP
775 else if (entry_size == G.pagesize)
777 /* We want just one page. Allocate a bunch of them and put the
778 extras on the freelist. (Can only do this optimization with
779 mmap for backing store.) */
780 struct page_entry *e, *f = G.free_pages;
781 int i;
783 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
785 /* This loop counts down so that the chain will be in ascending
786 memory order. */
787 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
789 e = XCNEWVAR (struct page_entry, page_entry_size);
790 e->order = order;
791 e->bytes = G.pagesize;
792 e->page = page + (i << G.lg_pagesize);
793 e->next = f;
794 f = e;
797 G.free_pages = f;
799 else
800 page = alloc_anon (NULL, entry_size);
801 #endif
802 #ifdef USING_MALLOC_PAGE_GROUPS
803 else
805 /* Allocate a large block of memory and serve out the aligned
806 pages therein. This results in much less memory wastage
807 than the traditional implementation of valloc. */
809 char *allocation, *a, *enda;
810 size_t alloc_size, head_slop, tail_slop;
811 int multiple_pages = (entry_size == G.pagesize);
813 if (multiple_pages)
814 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
815 else
816 alloc_size = entry_size + G.pagesize - 1;
817 allocation = XNEWVEC (char, alloc_size);
819 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
820 head_slop = page - allocation;
821 if (multiple_pages)
822 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
823 else
824 tail_slop = alloc_size - entry_size - head_slop;
825 enda = allocation + alloc_size - tail_slop;
827 /* We allocated N pages, which are likely not aligned, leaving
828 us with N-1 usable pages. We plan to place the page_group
829 structure somewhere in the slop. */
830 if (head_slop >= sizeof (page_group))
831 group = (page_group *)page - 1;
832 else
834 /* We magically got an aligned allocation. Too bad, we have
835 to waste a page anyway. */
836 if (tail_slop == 0)
838 enda -= G.pagesize;
839 tail_slop += G.pagesize;
841 gcc_assert (tail_slop >= sizeof (page_group));
842 group = (page_group *)enda;
843 tail_slop -= sizeof (page_group);
846 /* Remember that we allocated this memory. */
847 group->next = G.page_groups;
848 group->allocation = allocation;
849 group->alloc_size = alloc_size;
850 group->in_use = 0;
851 G.page_groups = group;
852 G.bytes_mapped += alloc_size;
854 /* If we allocated multiple pages, put the rest on the free list. */
855 if (multiple_pages)
857 struct page_entry *e, *f = G.free_pages;
858 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
860 e = XCNEWVAR (struct page_entry, page_entry_size);
861 e->order = order;
862 e->bytes = G.pagesize;
863 e->page = a;
864 e->group = group;
865 e->next = f;
866 f = e;
868 G.free_pages = f;
871 #endif
873 if (entry == NULL)
874 entry = XCNEWVAR (struct page_entry, page_entry_size);
876 entry->bytes = entry_size;
877 entry->page = page;
878 entry->context_depth = G.context_depth;
879 entry->order = order;
880 entry->num_free_objects = num_objects;
881 entry->next_bit_hint = 1;
883 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
885 #ifdef USING_MALLOC_PAGE_GROUPS
886 entry->group = group;
887 set_page_group_in_use (group, page);
888 #endif
890 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
891 increment the hint. */
892 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
893 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
895 set_page_table_entry (page, entry);
897 if (GGC_DEBUG_LEVEL >= 2)
898 fprintf (G.debug_file,
899 "Allocating page at %p, object size=%lu, data %p-%p\n",
900 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
901 page + entry_size - 1);
903 return entry;
906 /* Adjust the size of G.depth so that no index greater than the one
907 used by the top of the G.by_depth is used. */
909 static inline void
910 adjust_depth (void)
912 page_entry *top;
914 if (G.by_depth_in_use)
916 top = G.by_depth[G.by_depth_in_use-1];
918 /* Peel back indices in depth that index into by_depth, so that
919 as new elements are added to by_depth, we note the indices
920 of those elements, if they are for new context depths. */
921 while (G.depth_in_use > (size_t)top->context_depth+1)
922 --G.depth_in_use;
926 /* For a page that is no longer needed, put it on the free page list. */
928 static void
929 free_page (page_entry *entry)
931 if (GGC_DEBUG_LEVEL >= 2)
932 fprintf (G.debug_file,
933 "Deallocating page at %p, data %p-%p\n", (void *) entry,
934 entry->page, entry->page + entry->bytes - 1);
936 /* Mark the page as inaccessible. Discard the handle to avoid handle
937 leak. */
938 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
940 set_page_table_entry (entry->page, NULL);
942 #ifdef USING_MALLOC_PAGE_GROUPS
943 clear_page_group_in_use (entry->group, entry->page);
944 #endif
946 if (G.by_depth_in_use > 1)
948 page_entry *top = G.by_depth[G.by_depth_in_use-1];
949 int i = entry->index_by_depth;
951 /* We cannot free a page from a context deeper than the current
952 one. */
953 gcc_assert (entry->context_depth == top->context_depth);
955 /* Put top element into freed slot. */
956 G.by_depth[i] = top;
957 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
958 top->index_by_depth = i;
960 --G.by_depth_in_use;
962 adjust_depth ();
964 entry->next = G.free_pages;
965 G.free_pages = entry;
968 /* Release the free page cache to the system. */
970 static void
971 release_pages (void)
973 #ifdef USING_MMAP
974 page_entry *p, *next;
975 char *start;
976 size_t len;
978 /* Gather up adjacent pages so they are unmapped together. */
979 p = G.free_pages;
981 while (p)
983 start = p->page;
984 next = p->next;
985 len = p->bytes;
986 free (p);
987 p = next;
989 while (p && p->page == start + len)
991 next = p->next;
992 len += p->bytes;
993 free (p);
994 p = next;
997 munmap (start, len);
998 G.bytes_mapped -= len;
1001 G.free_pages = NULL;
1002 #endif
1003 #ifdef USING_MALLOC_PAGE_GROUPS
1004 page_entry **pp, *p;
1005 page_group **gp, *g;
1007 /* Remove all pages from free page groups from the list. */
1008 pp = &G.free_pages;
1009 while ((p = *pp) != NULL)
1010 if (p->group->in_use == 0)
1012 *pp = p->next;
1013 free (p);
1015 else
1016 pp = &p->next;
1018 /* Remove all free page groups, and release the storage. */
1019 gp = &G.page_groups;
1020 while ((g = *gp) != NULL)
1021 if (g->in_use == 0)
1023 *gp = g->next;
1024 G.bytes_mapped -= g->alloc_size;
1025 free (g->allocation);
1027 else
1028 gp = &g->next;
1029 #endif
1032 /* This table provides a fast way to determine ceil(log_2(size)) for
1033 allocation requests. The minimum allocation size is eight bytes. */
1034 #define NUM_SIZE_LOOKUP 512
1035 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1037 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1038 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1039 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1040 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1041 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1042 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1043 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1044 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1045 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1047 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1048 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1049 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1050 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1051 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1052 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1053 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1063 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1064 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1065 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1066 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1067 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1068 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1071 /* Typed allocation function. Does nothing special in this collector. */
1073 void *
1074 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1075 MEM_STAT_DECL)
1077 return ggc_alloc_stat (size PASS_MEM_STAT);
1080 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1082 void *
1083 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1085 size_t order, word, bit, object_offset, object_size;
1086 struct page_entry *entry;
1087 void *result;
1089 if (size < NUM_SIZE_LOOKUP)
1091 order = size_lookup[size];
1092 object_size = OBJECT_SIZE (order);
1094 else
1096 order = 10;
1097 while (size > (object_size = OBJECT_SIZE (order)))
1098 order++;
1101 /* If there are non-full pages for this size allocation, they are at
1102 the head of the list. */
1103 entry = G.pages[order];
1105 /* If there is no page for this object size, or all pages in this
1106 context are full, allocate a new page. */
1107 if (entry == NULL || entry->num_free_objects == 0)
1109 struct page_entry *new_entry;
1110 new_entry = alloc_page (order);
1112 new_entry->index_by_depth = G.by_depth_in_use;
1113 push_by_depth (new_entry, 0);
1115 /* We can skip context depths, if we do, make sure we go all the
1116 way to the new depth. */
1117 while (new_entry->context_depth >= G.depth_in_use)
1118 push_depth (G.by_depth_in_use-1);
1120 /* If this is the only entry, it's also the tail. If it is not
1121 the only entry, then we must update the PREV pointer of the
1122 ENTRY (G.pages[order]) to point to our new page entry. */
1123 if (entry == NULL)
1124 G.page_tails[order] = new_entry;
1125 else
1126 entry->prev = new_entry;
1128 /* Put new pages at the head of the page list. By definition the
1129 entry at the head of the list always has a NULL pointer. */
1130 new_entry->next = entry;
1131 new_entry->prev = NULL;
1132 entry = new_entry;
1133 G.pages[order] = new_entry;
1135 /* For a new page, we know the word and bit positions (in the
1136 in_use bitmap) of the first available object -- they're zero. */
1137 new_entry->next_bit_hint = 1;
1138 word = 0;
1139 bit = 0;
1140 object_offset = 0;
1142 else
1144 /* First try to use the hint left from the previous allocation
1145 to locate a clear bit in the in-use bitmap. We've made sure
1146 that the one-past-the-end bit is always set, so if the hint
1147 has run over, this test will fail. */
1148 unsigned hint = entry->next_bit_hint;
1149 word = hint / HOST_BITS_PER_LONG;
1150 bit = hint % HOST_BITS_PER_LONG;
1152 /* If the hint didn't work, scan the bitmap from the beginning. */
1153 if ((entry->in_use_p[word] >> bit) & 1)
1155 word = bit = 0;
1156 while (~entry->in_use_p[word] == 0)
1157 ++word;
1159 #if GCC_VERSION >= 3004
1160 bit = __builtin_ctzl (~entry->in_use_p[word]);
1161 #else
1162 while ((entry->in_use_p[word] >> bit) & 1)
1163 ++bit;
1164 #endif
1166 hint = word * HOST_BITS_PER_LONG + bit;
1169 /* Next time, try the next bit. */
1170 entry->next_bit_hint = hint + 1;
1172 object_offset = hint * object_size;
1175 /* Set the in-use bit. */
1176 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1178 /* Keep a running total of the number of free objects. If this page
1179 fills up, we may have to move it to the end of the list if the
1180 next page isn't full. If the next page is full, all subsequent
1181 pages are full, so there's no need to move it. */
1182 if (--entry->num_free_objects == 0
1183 && entry->next != NULL
1184 && entry->next->num_free_objects > 0)
1186 /* We have a new head for the list. */
1187 G.pages[order] = entry->next;
1189 /* We are moving ENTRY to the end of the page table list.
1190 The new page at the head of the list will have NULL in
1191 its PREV field and ENTRY will have NULL in its NEXT field. */
1192 entry->next->prev = NULL;
1193 entry->next = NULL;
1195 /* Append ENTRY to the tail of the list. */
1196 entry->prev = G.page_tails[order];
1197 G.page_tails[order]->next = entry;
1198 G.page_tails[order] = entry;
1201 /* Calculate the object's address. */
1202 result = entry->page + object_offset;
1203 #ifdef GATHER_STATISTICS
1204 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1205 result PASS_MEM_STAT);
1206 #endif
1208 #ifdef ENABLE_GC_CHECKING
1209 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1210 exact same semantics in presence of memory bugs, regardless of
1211 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1212 handle to avoid handle leak. */
1213 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1215 /* `Poison' the entire allocated object, including any padding at
1216 the end. */
1217 memset (result, 0xaf, object_size);
1219 /* Make the bytes after the end of the object unaccessible. Discard the
1220 handle to avoid handle leak. */
1221 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1222 object_size - size));
1223 #endif
1225 /* Tell Valgrind that the memory is there, but its content isn't
1226 defined. The bytes at the end of the object are still marked
1227 unaccessible. */
1228 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1230 /* Keep track of how many bytes are being allocated. This
1231 information is used in deciding when to collect. */
1232 G.allocated += object_size;
1234 /* For timevar statistics. */
1235 timevar_ggc_mem_total += object_size;
1237 #ifdef GATHER_STATISTICS
1239 size_t overhead = object_size - size;
1241 G.stats.total_overhead += overhead;
1242 G.stats.total_allocated += object_size;
1243 G.stats.total_overhead_per_order[order] += overhead;
1244 G.stats.total_allocated_per_order[order] += object_size;
1246 if (size <= 32)
1248 G.stats.total_overhead_under32 += overhead;
1249 G.stats.total_allocated_under32 += object_size;
1251 if (size <= 64)
1253 G.stats.total_overhead_under64 += overhead;
1254 G.stats.total_allocated_under64 += object_size;
1256 if (size <= 128)
1258 G.stats.total_overhead_under128 += overhead;
1259 G.stats.total_allocated_under128 += object_size;
1262 #endif
1264 if (GGC_DEBUG_LEVEL >= 3)
1265 fprintf (G.debug_file,
1266 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1267 (unsigned long) size, (unsigned long) object_size, result,
1268 (void *) entry);
1270 return result;
1273 /* Mark function for strings. */
1275 void
1276 gt_ggc_m_S (const void *p)
1278 page_entry *entry;
1279 unsigned bit, word;
1280 unsigned long mask;
1281 unsigned long offset;
1283 if (!p || !ggc_allocated_p (p))
1284 return;
1286 /* Look up the page on which the object is alloced. . */
1287 entry = lookup_page_table_entry (p);
1288 gcc_assert (entry);
1290 /* Calculate the index of the object on the page; this is its bit
1291 position in the in_use_p bitmap. Note that because a char* might
1292 point to the middle of an object, we need special code here to
1293 make sure P points to the start of an object. */
1294 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1295 if (offset)
1297 /* Here we've seen a char* which does not point to the beginning
1298 of an allocated object. We assume it points to the middle of
1299 a STRING_CST. */
1300 gcc_assert (offset == offsetof (struct tree_string, str));
1301 p = ((const char *) p) - offset;
1302 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1303 return;
1306 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1307 word = bit / HOST_BITS_PER_LONG;
1308 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1310 /* If the bit was previously set, skip it. */
1311 if (entry->in_use_p[word] & mask)
1312 return;
1314 /* Otherwise set it, and decrement the free object count. */
1315 entry->in_use_p[word] |= mask;
1316 entry->num_free_objects -= 1;
1318 if (GGC_DEBUG_LEVEL >= 4)
1319 fprintf (G.debug_file, "Marking %p\n", p);
1321 return;
1324 /* If P is not marked, marks it and return false. Otherwise return true.
1325 P must have been allocated by the GC allocator; it mustn't point to
1326 static objects, stack variables, or memory allocated with malloc. */
1329 ggc_set_mark (const void *p)
1331 page_entry *entry;
1332 unsigned bit, word;
1333 unsigned long mask;
1335 /* Look up the page on which the object is alloced. If the object
1336 wasn't allocated by the collector, we'll probably die. */
1337 entry = lookup_page_table_entry (p);
1338 gcc_assert (entry);
1340 /* Calculate the index of the object on the page; this is its bit
1341 position in the in_use_p bitmap. */
1342 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1343 word = bit / HOST_BITS_PER_LONG;
1344 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1346 /* If the bit was previously set, skip it. */
1347 if (entry->in_use_p[word] & mask)
1348 return 1;
1350 /* Otherwise set it, and decrement the free object count. */
1351 entry->in_use_p[word] |= mask;
1352 entry->num_free_objects -= 1;
1354 if (GGC_DEBUG_LEVEL >= 4)
1355 fprintf (G.debug_file, "Marking %p\n", p);
1357 return 0;
1360 /* Return 1 if P has been marked, zero otherwise.
1361 P must have been allocated by the GC allocator; it mustn't point to
1362 static objects, stack variables, or memory allocated with malloc. */
1365 ggc_marked_p (const void *p)
1367 page_entry *entry;
1368 unsigned bit, word;
1369 unsigned long mask;
1371 /* Look up the page on which the object is alloced. If the object
1372 wasn't allocated by the collector, we'll probably die. */
1373 entry = lookup_page_table_entry (p);
1374 gcc_assert (entry);
1376 /* Calculate the index of the object on the page; this is its bit
1377 position in the in_use_p bitmap. */
1378 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1379 word = bit / HOST_BITS_PER_LONG;
1380 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1382 return (entry->in_use_p[word] & mask) != 0;
1385 /* Return the size of the gc-able object P. */
1387 size_t
1388 ggc_get_size (const void *p)
1390 page_entry *pe = lookup_page_table_entry (p);
1391 return OBJECT_SIZE (pe->order);
1394 /* Release the memory for object P. */
1396 void
1397 ggc_free (void *p)
1399 page_entry *pe = lookup_page_table_entry (p);
1400 size_t order = pe->order;
1401 size_t size = OBJECT_SIZE (order);
1403 #ifdef GATHER_STATISTICS
1404 ggc_free_overhead (p);
1405 #endif
1407 if (GGC_DEBUG_LEVEL >= 3)
1408 fprintf (G.debug_file,
1409 "Freeing object, actual size=%lu, at %p on %p\n",
1410 (unsigned long) size, p, (void *) pe);
1412 #ifdef ENABLE_GC_CHECKING
1413 /* Poison the data, to indicate the data is garbage. */
1414 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1415 memset (p, 0xa5, size);
1416 #endif
1417 /* Let valgrind know the object is free. */
1418 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1420 #ifdef ENABLE_GC_ALWAYS_COLLECT
1421 /* In the completely-anal-checking mode, we do *not* immediately free
1422 the data, but instead verify that the data is *actually* not
1423 reachable the next time we collect. */
1425 struct free_object *fo = XNEW (struct free_object);
1426 fo->object = p;
1427 fo->next = G.free_object_list;
1428 G.free_object_list = fo;
1430 #else
1432 unsigned int bit_offset, word, bit;
1434 G.allocated -= size;
1436 /* Mark the object not-in-use. */
1437 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1438 word = bit_offset / HOST_BITS_PER_LONG;
1439 bit = bit_offset % HOST_BITS_PER_LONG;
1440 pe->in_use_p[word] &= ~(1UL << bit);
1442 if (pe->num_free_objects++ == 0)
1444 page_entry *p, *q;
1446 /* If the page is completely full, then it's supposed to
1447 be after all pages that aren't. Since we've freed one
1448 object from a page that was full, we need to move the
1449 page to the head of the list.
1451 PE is the node we want to move. Q is the previous node
1452 and P is the next node in the list. */
1453 q = pe->prev;
1454 if (q && q->num_free_objects == 0)
1456 p = pe->next;
1458 q->next = p;
1460 /* If PE was at the end of the list, then Q becomes the
1461 new end of the list. If PE was not the end of the
1462 list, then we need to update the PREV field for P. */
1463 if (!p)
1464 G.page_tails[order] = q;
1465 else
1466 p->prev = q;
1468 /* Move PE to the head of the list. */
1469 pe->next = G.pages[order];
1470 pe->prev = NULL;
1471 G.pages[order]->prev = pe;
1472 G.pages[order] = pe;
1475 /* Reset the hint bit to point to the only free object. */
1476 pe->next_bit_hint = bit_offset;
1479 #endif
1482 /* Subroutine of init_ggc which computes the pair of numbers used to
1483 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1485 This algorithm is taken from Granlund and Montgomery's paper
1486 "Division by Invariant Integers using Multiplication"
1487 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1488 constants). */
1490 static void
1491 compute_inverse (unsigned order)
1493 size_t size, inv;
1494 unsigned int e;
1496 size = OBJECT_SIZE (order);
1497 e = 0;
1498 while (size % 2 == 0)
1500 e++;
1501 size >>= 1;
1504 inv = size;
1505 while (inv * size != 1)
1506 inv = inv * (2 - inv*size);
1508 DIV_MULT (order) = inv;
1509 DIV_SHIFT (order) = e;
1512 /* Initialize the ggc-mmap allocator. */
1513 void
1514 init_ggc (void)
1516 unsigned order;
1518 G.pagesize = getpagesize();
1519 G.lg_pagesize = exact_log2 (G.pagesize);
1521 #ifdef HAVE_MMAP_DEV_ZERO
1522 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1523 if (G.dev_zero_fd == -1)
1524 internal_error ("open /dev/zero: %m");
1525 #endif
1527 #if 0
1528 G.debug_file = fopen ("ggc-mmap.debug", "w");
1529 #else
1530 G.debug_file = stdout;
1531 #endif
1533 #ifdef USING_MMAP
1534 /* StunOS has an amazing off-by-one error for the first mmap allocation
1535 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1536 believe, is an unaligned page allocation, which would cause us to
1537 hork badly if we tried to use it. */
1539 char *p = alloc_anon (NULL, G.pagesize);
1540 struct page_entry *e;
1541 if ((size_t)p & (G.pagesize - 1))
1543 /* How losing. Discard this one and try another. If we still
1544 can't get something useful, give up. */
1546 p = alloc_anon (NULL, G.pagesize);
1547 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1550 /* We have a good page, might as well hold onto it... */
1551 e = XCNEW (struct page_entry);
1552 e->bytes = G.pagesize;
1553 e->page = p;
1554 e->next = G.free_pages;
1555 G.free_pages = e;
1557 #endif
1559 /* Initialize the object size table. */
1560 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1561 object_size_table[order] = (size_t) 1 << order;
1562 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1564 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1566 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1567 so that we're sure of getting aligned memory. */
1568 s = ROUND_UP (s, MAX_ALIGNMENT);
1569 object_size_table[order] = s;
1572 /* Initialize the objects-per-page and inverse tables. */
1573 for (order = 0; order < NUM_ORDERS; ++order)
1575 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1576 if (objects_per_page_table[order] == 0)
1577 objects_per_page_table[order] = 1;
1578 compute_inverse (order);
1581 /* Reset the size_lookup array to put appropriately sized objects in
1582 the special orders. All objects bigger than the previous power
1583 of two, but no greater than the special size, should go in the
1584 new order. */
1585 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1587 int o;
1588 int i;
1590 i = OBJECT_SIZE (order);
1591 if (i >= NUM_SIZE_LOOKUP)
1592 continue;
1594 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1595 size_lookup[i] = order;
1598 G.depth_in_use = 0;
1599 G.depth_max = 10;
1600 G.depth = XNEWVEC (unsigned int, G.depth_max);
1602 G.by_depth_in_use = 0;
1603 G.by_depth_max = INITIAL_PTE_COUNT;
1604 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1605 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1608 /* Start a new GGC zone. */
1610 struct alloc_zone *
1611 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1613 return NULL;
1616 /* Destroy a GGC zone. */
1617 void
1618 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1622 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1623 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1625 static void
1626 ggc_recalculate_in_use_p (page_entry *p)
1628 unsigned int i;
1629 size_t num_objects;
1631 /* Because the past-the-end bit in in_use_p is always set, we
1632 pretend there is one additional object. */
1633 num_objects = OBJECTS_IN_PAGE (p) + 1;
1635 /* Reset the free object count. */
1636 p->num_free_objects = num_objects;
1638 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1639 for (i = 0;
1640 i < CEIL (BITMAP_SIZE (num_objects),
1641 sizeof (*p->in_use_p));
1642 ++i)
1644 unsigned long j;
1646 /* Something is in use if it is marked, or if it was in use in a
1647 context further down the context stack. */
1648 p->in_use_p[i] |= save_in_use_p (p)[i];
1650 /* Decrement the free object count for every object allocated. */
1651 for (j = p->in_use_p[i]; j; j >>= 1)
1652 p->num_free_objects -= (j & 1);
1655 gcc_assert (p->num_free_objects < num_objects);
1658 /* Unmark all objects. */
1660 static void
1661 clear_marks (void)
1663 unsigned order;
1665 for (order = 2; order < NUM_ORDERS; order++)
1667 page_entry *p;
1669 for (p = G.pages[order]; p != NULL; p = p->next)
1671 size_t num_objects = OBJECTS_IN_PAGE (p);
1672 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1674 /* The data should be page-aligned. */
1675 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1677 /* Pages that aren't in the topmost context are not collected;
1678 nevertheless, we need their in-use bit vectors to store GC
1679 marks. So, back them up first. */
1680 if (p->context_depth < G.context_depth)
1682 if (! save_in_use_p (p))
1683 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1684 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1687 /* Reset reset the number of free objects and clear the
1688 in-use bits. These will be adjusted by mark_obj. */
1689 p->num_free_objects = num_objects;
1690 memset (p->in_use_p, 0, bitmap_size);
1692 /* Make sure the one-past-the-end bit is always set. */
1693 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1694 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1699 /* Free all empty pages. Partially empty pages need no attention
1700 because the `mark' bit doubles as an `unused' bit. */
1702 static void
1703 sweep_pages (void)
1705 unsigned order;
1707 for (order = 2; order < NUM_ORDERS; order++)
1709 /* The last page-entry to consider, regardless of entries
1710 placed at the end of the list. */
1711 page_entry * const last = G.page_tails[order];
1713 size_t num_objects;
1714 size_t live_objects;
1715 page_entry *p, *previous;
1716 int done;
1718 p = G.pages[order];
1719 if (p == NULL)
1720 continue;
1722 previous = NULL;
1725 page_entry *next = p->next;
1727 /* Loop until all entries have been examined. */
1728 done = (p == last);
1730 num_objects = OBJECTS_IN_PAGE (p);
1732 /* Add all live objects on this page to the count of
1733 allocated memory. */
1734 live_objects = num_objects - p->num_free_objects;
1736 G.allocated += OBJECT_SIZE (order) * live_objects;
1738 /* Only objects on pages in the topmost context should get
1739 collected. */
1740 if (p->context_depth < G.context_depth)
1743 /* Remove the page if it's empty. */
1744 else if (live_objects == 0)
1746 /* If P was the first page in the list, then NEXT
1747 becomes the new first page in the list, otherwise
1748 splice P out of the forward pointers. */
1749 if (! previous)
1750 G.pages[order] = next;
1751 else
1752 previous->next = next;
1754 /* Splice P out of the back pointers too. */
1755 if (next)
1756 next->prev = previous;
1758 /* Are we removing the last element? */
1759 if (p == G.page_tails[order])
1760 G.page_tails[order] = previous;
1761 free_page (p);
1762 p = previous;
1765 /* If the page is full, move it to the end. */
1766 else if (p->num_free_objects == 0)
1768 /* Don't move it if it's already at the end. */
1769 if (p != G.page_tails[order])
1771 /* Move p to the end of the list. */
1772 p->next = NULL;
1773 p->prev = G.page_tails[order];
1774 G.page_tails[order]->next = p;
1776 /* Update the tail pointer... */
1777 G.page_tails[order] = p;
1779 /* ... and the head pointer, if necessary. */
1780 if (! previous)
1781 G.pages[order] = next;
1782 else
1783 previous->next = next;
1785 /* And update the backpointer in NEXT if necessary. */
1786 if (next)
1787 next->prev = previous;
1789 p = previous;
1793 /* If we've fallen through to here, it's a page in the
1794 topmost context that is neither full nor empty. Such a
1795 page must precede pages at lesser context depth in the
1796 list, so move it to the head. */
1797 else if (p != G.pages[order])
1799 previous->next = p->next;
1801 /* Update the backchain in the next node if it exists. */
1802 if (p->next)
1803 p->next->prev = previous;
1805 /* Move P to the head of the list. */
1806 p->next = G.pages[order];
1807 p->prev = NULL;
1808 G.pages[order]->prev = p;
1810 /* Update the head pointer. */
1811 G.pages[order] = p;
1813 /* Are we moving the last element? */
1814 if (G.page_tails[order] == p)
1815 G.page_tails[order] = previous;
1816 p = previous;
1819 previous = p;
1820 p = next;
1822 while (! done);
1824 /* Now, restore the in_use_p vectors for any pages from contexts
1825 other than the current one. */
1826 for (p = G.pages[order]; p; p = p->next)
1827 if (p->context_depth != G.context_depth)
1828 ggc_recalculate_in_use_p (p);
1832 #ifdef ENABLE_GC_CHECKING
1833 /* Clobber all free objects. */
1835 static void
1836 poison_pages (void)
1838 unsigned order;
1840 for (order = 2; order < NUM_ORDERS; order++)
1842 size_t size = OBJECT_SIZE (order);
1843 page_entry *p;
1845 for (p = G.pages[order]; p != NULL; p = p->next)
1847 size_t num_objects;
1848 size_t i;
1850 if (p->context_depth != G.context_depth)
1851 /* Since we don't do any collection for pages in pushed
1852 contexts, there's no need to do any poisoning. And
1853 besides, the IN_USE_P array isn't valid until we pop
1854 contexts. */
1855 continue;
1857 num_objects = OBJECTS_IN_PAGE (p);
1858 for (i = 0; i < num_objects; i++)
1860 size_t word, bit;
1861 word = i / HOST_BITS_PER_LONG;
1862 bit = i % HOST_BITS_PER_LONG;
1863 if (((p->in_use_p[word] >> bit) & 1) == 0)
1865 char *object = p->page + i * size;
1867 /* Keep poison-by-write when we expect to use Valgrind,
1868 so the exact same memory semantics is kept, in case
1869 there are memory errors. We override this request
1870 below. */
1871 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1872 size));
1873 memset (object, 0xa5, size);
1875 /* Drop the handle to avoid handle leak. */
1876 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1882 #else
1883 #define poison_pages()
1884 #endif
1886 #ifdef ENABLE_GC_ALWAYS_COLLECT
1887 /* Validate that the reportedly free objects actually are. */
1889 static void
1890 validate_free_objects (void)
1892 struct free_object *f, *next, *still_free = NULL;
1894 for (f = G.free_object_list; f ; f = next)
1896 page_entry *pe = lookup_page_table_entry (f->object);
1897 size_t bit, word;
1899 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1900 word = bit / HOST_BITS_PER_LONG;
1901 bit = bit % HOST_BITS_PER_LONG;
1902 next = f->next;
1904 /* Make certain it isn't visible from any root. Notice that we
1905 do this check before sweep_pages merges save_in_use_p. */
1906 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1908 /* If the object comes from an outer context, then retain the
1909 free_object entry, so that we can verify that the address
1910 isn't live on the stack in some outer context. */
1911 if (pe->context_depth != G.context_depth)
1913 f->next = still_free;
1914 still_free = f;
1916 else
1917 free (f);
1920 G.free_object_list = still_free;
1922 #else
1923 #define validate_free_objects()
1924 #endif
1926 /* Top level mark-and-sweep routine. */
1928 void
1929 ggc_collect (void)
1931 /* Avoid frequent unnecessary work by skipping collection if the
1932 total allocations haven't expanded much since the last
1933 collection. */
1934 float allocated_last_gc =
1935 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1937 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1939 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1940 return;
1942 timevar_push (TV_GC);
1943 if (!quiet_flag)
1944 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1945 if (GGC_DEBUG_LEVEL >= 2)
1946 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1948 /* Zero the total allocated bytes. This will be recalculated in the
1949 sweep phase. */
1950 G.allocated = 0;
1952 /* Release the pages we freed the last time we collected, but didn't
1953 reuse in the interim. */
1954 release_pages ();
1956 /* Indicate that we've seen collections at this context depth. */
1957 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1959 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1961 clear_marks ();
1962 ggc_mark_roots ();
1963 #ifdef GATHER_STATISTICS
1964 ggc_prune_overhead_list ();
1965 #endif
1966 poison_pages ();
1967 validate_free_objects ();
1968 sweep_pages ();
1970 G.allocated_last_gc = G.allocated;
1972 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1974 timevar_pop (TV_GC);
1976 if (!quiet_flag)
1977 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1978 if (GGC_DEBUG_LEVEL >= 2)
1979 fprintf (G.debug_file, "END COLLECTING\n");
1982 /* Print allocation statistics. */
1983 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1984 ? (x) \
1985 : ((x) < 1024*1024*10 \
1986 ? (x) / 1024 \
1987 : (x) / (1024*1024))))
1988 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1990 void
1991 ggc_print_statistics (void)
1993 struct ggc_statistics stats;
1994 unsigned int i;
1995 size_t total_overhead = 0;
1997 /* Clear the statistics. */
1998 memset (&stats, 0, sizeof (stats));
2000 /* Make sure collection will really occur. */
2001 G.allocated_last_gc = 0;
2003 /* Collect and print the statistics common across collectors. */
2004 ggc_print_common_statistics (stderr, &stats);
2006 /* Release free pages so that we will not count the bytes allocated
2007 there as part of the total allocated memory. */
2008 release_pages ();
2010 /* Collect some information about the various sizes of
2011 allocation. */
2012 fprintf (stderr,
2013 "Memory still allocated at the end of the compilation process\n");
2014 fprintf (stderr, "%-5s %10s %10s %10s\n",
2015 "Size", "Allocated", "Used", "Overhead");
2016 for (i = 0; i < NUM_ORDERS; ++i)
2018 page_entry *p;
2019 size_t allocated;
2020 size_t in_use;
2021 size_t overhead;
2023 /* Skip empty entries. */
2024 if (!G.pages[i])
2025 continue;
2027 overhead = allocated = in_use = 0;
2029 /* Figure out the total number of bytes allocated for objects of
2030 this size, and how many of them are actually in use. Also figure
2031 out how much memory the page table is using. */
2032 for (p = G.pages[i]; p; p = p->next)
2034 allocated += p->bytes;
2035 in_use +=
2036 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2038 overhead += (sizeof (page_entry) - sizeof (long)
2039 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2041 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2042 (unsigned long) OBJECT_SIZE (i),
2043 SCALE (allocated), STAT_LABEL (allocated),
2044 SCALE (in_use), STAT_LABEL (in_use),
2045 SCALE (overhead), STAT_LABEL (overhead));
2046 total_overhead += overhead;
2048 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2049 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2050 SCALE (G.allocated), STAT_LABEL(G.allocated),
2051 SCALE (total_overhead), STAT_LABEL (total_overhead));
2053 #ifdef GATHER_STATISTICS
2055 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2057 fprintf (stderr, "Total Overhead: %10lld\n",
2058 G.stats.total_overhead);
2059 fprintf (stderr, "Total Allocated: %10lld\n",
2060 G.stats.total_allocated);
2062 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2063 G.stats.total_overhead_under32);
2064 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2065 G.stats.total_allocated_under32);
2066 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2067 G.stats.total_overhead_under64);
2068 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2069 G.stats.total_allocated_under64);
2070 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2071 G.stats.total_overhead_under128);
2072 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2073 G.stats.total_allocated_under128);
2075 for (i = 0; i < NUM_ORDERS; i++)
2076 if (G.stats.total_allocated_per_order[i])
2078 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2079 (unsigned long) OBJECT_SIZE (i),
2080 G.stats.total_overhead_per_order[i]);
2081 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2082 (unsigned long) OBJECT_SIZE (i),
2083 G.stats.total_allocated_per_order[i]);
2086 #endif
2089 struct ggc_pch_ondisk
2091 unsigned totals[NUM_ORDERS];
2094 struct ggc_pch_data
2096 struct ggc_pch_ondisk d;
2097 size_t base[NUM_ORDERS];
2098 size_t written[NUM_ORDERS];
2101 struct ggc_pch_data *
2102 init_ggc_pch (void)
2104 return XCNEW (struct ggc_pch_data);
2107 void
2108 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2109 size_t size, bool is_string ATTRIBUTE_UNUSED,
2110 enum gt_types_enum type ATTRIBUTE_UNUSED)
2112 unsigned order;
2114 if (size < NUM_SIZE_LOOKUP)
2115 order = size_lookup[size];
2116 else
2118 order = 10;
2119 while (size > OBJECT_SIZE (order))
2120 order++;
2123 d->d.totals[order]++;
2126 size_t
2127 ggc_pch_total_size (struct ggc_pch_data *d)
2129 size_t a = 0;
2130 unsigned i;
2132 for (i = 0; i < NUM_ORDERS; i++)
2133 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2134 return a;
2137 void
2138 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2140 size_t a = (size_t) base;
2141 unsigned i;
2143 for (i = 0; i < NUM_ORDERS; i++)
2145 d->base[i] = a;
2146 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2151 char *
2152 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2153 size_t size, bool is_string ATTRIBUTE_UNUSED,
2154 enum gt_types_enum type ATTRIBUTE_UNUSED)
2156 unsigned order;
2157 char *result;
2159 if (size < NUM_SIZE_LOOKUP)
2160 order = size_lookup[size];
2161 else
2163 order = 10;
2164 while (size > OBJECT_SIZE (order))
2165 order++;
2168 result = (char *) d->base[order];
2169 d->base[order] += OBJECT_SIZE (order);
2170 return result;
2173 void
2174 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2175 FILE *f ATTRIBUTE_UNUSED)
2177 /* Nothing to do. */
2180 void
2181 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2182 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2183 size_t size, bool is_string ATTRIBUTE_UNUSED)
2185 unsigned order;
2186 static const char emptyBytes[256] = { 0 };
2188 if (size < NUM_SIZE_LOOKUP)
2189 order = size_lookup[size];
2190 else
2192 order = 10;
2193 while (size > OBJECT_SIZE (order))
2194 order++;
2197 if (fwrite (x, size, 1, f) != 1)
2198 fatal_error ("can't write PCH file: %m");
2200 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2201 object out to OBJECT_SIZE(order). This happens for strings. */
2203 if (size != OBJECT_SIZE (order))
2205 unsigned padding = OBJECT_SIZE(order) - size;
2207 /* To speed small writes, we use a nulled-out array that's larger
2208 than most padding requests as the source for our null bytes. This
2209 permits us to do the padding with fwrite() rather than fseek(), and
2210 limits the chance the OS may try to flush any outstanding writes. */
2211 if (padding <= sizeof(emptyBytes))
2213 if (fwrite (emptyBytes, 1, padding, f) != padding)
2214 fatal_error ("can't write PCH file");
2216 else
2218 /* Larger than our buffer? Just default to fseek. */
2219 if (fseek (f, padding, SEEK_CUR) != 0)
2220 fatal_error ("can't write PCH file");
2224 d->written[order]++;
2225 if (d->written[order] == d->d.totals[order]
2226 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2227 G.pagesize),
2228 SEEK_CUR) != 0)
2229 fatal_error ("can't write PCH file: %m");
2232 void
2233 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2235 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2236 fatal_error ("can't write PCH file: %m");
2237 free (d);
2240 /* Move the PCH PTE entries just added to the end of by_depth, to the
2241 front. */
2243 static void
2244 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2246 unsigned i;
2248 /* First, we swap the new entries to the front of the varrays. */
2249 page_entry **new_by_depth;
2250 unsigned long **new_save_in_use;
2252 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2253 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2255 memcpy (&new_by_depth[0],
2256 &G.by_depth[count_old_page_tables],
2257 count_new_page_tables * sizeof (void *));
2258 memcpy (&new_by_depth[count_new_page_tables],
2259 &G.by_depth[0],
2260 count_old_page_tables * sizeof (void *));
2261 memcpy (&new_save_in_use[0],
2262 &G.save_in_use[count_old_page_tables],
2263 count_new_page_tables * sizeof (void *));
2264 memcpy (&new_save_in_use[count_new_page_tables],
2265 &G.save_in_use[0],
2266 count_old_page_tables * sizeof (void *));
2268 free (G.by_depth);
2269 free (G.save_in_use);
2271 G.by_depth = new_by_depth;
2272 G.save_in_use = new_save_in_use;
2274 /* Now update all the index_by_depth fields. */
2275 for (i = G.by_depth_in_use; i > 0; --i)
2277 page_entry *p = G.by_depth[i-1];
2278 p->index_by_depth = i-1;
2281 /* And last, we update the depth pointers in G.depth. The first
2282 entry is already 0, and context 0 entries always start at index
2283 0, so there is nothing to update in the first slot. We need a
2284 second slot, only if we have old ptes, and if we do, they start
2285 at index count_new_page_tables. */
2286 if (count_old_page_tables)
2287 push_depth (count_new_page_tables);
2290 void
2291 ggc_pch_read (FILE *f, void *addr)
2293 struct ggc_pch_ondisk d;
2294 unsigned i;
2295 char *offs = (char *) addr;
2296 unsigned long count_old_page_tables;
2297 unsigned long count_new_page_tables;
2299 count_old_page_tables = G.by_depth_in_use;
2301 /* We've just read in a PCH file. So, every object that used to be
2302 allocated is now free. */
2303 clear_marks ();
2304 #ifdef ENABLE_GC_CHECKING
2305 poison_pages ();
2306 #endif
2307 /* Since we free all the allocated objects, the free list becomes
2308 useless. Validate it now, which will also clear it. */
2309 validate_free_objects();
2311 /* No object read from a PCH file should ever be freed. So, set the
2312 context depth to 1, and set the depth of all the currently-allocated
2313 pages to be 1 too. PCH pages will have depth 0. */
2314 gcc_assert (!G.context_depth);
2315 G.context_depth = 1;
2316 for (i = 0; i < NUM_ORDERS; i++)
2318 page_entry *p;
2319 for (p = G.pages[i]; p != NULL; p = p->next)
2320 p->context_depth = G.context_depth;
2323 /* Allocate the appropriate page-table entries for the pages read from
2324 the PCH file. */
2325 if (fread (&d, sizeof (d), 1, f) != 1)
2326 fatal_error ("can't read PCH file: %m");
2328 for (i = 0; i < NUM_ORDERS; i++)
2330 struct page_entry *entry;
2331 char *pte;
2332 size_t bytes;
2333 size_t num_objs;
2334 size_t j;
2336 if (d.totals[i] == 0)
2337 continue;
2339 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2340 num_objs = bytes / OBJECT_SIZE (i);
2341 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2342 - sizeof (long)
2343 + BITMAP_SIZE (num_objs + 1)));
2344 entry->bytes = bytes;
2345 entry->page = offs;
2346 entry->context_depth = 0;
2347 offs += bytes;
2348 entry->num_free_objects = 0;
2349 entry->order = i;
2351 for (j = 0;
2352 j + HOST_BITS_PER_LONG <= num_objs + 1;
2353 j += HOST_BITS_PER_LONG)
2354 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2355 for (; j < num_objs + 1; j++)
2356 entry->in_use_p[j / HOST_BITS_PER_LONG]
2357 |= 1L << (j % HOST_BITS_PER_LONG);
2359 for (pte = entry->page;
2360 pte < entry->page + entry->bytes;
2361 pte += G.pagesize)
2362 set_page_table_entry (pte, entry);
2364 if (G.page_tails[i] != NULL)
2365 G.page_tails[i]->next = entry;
2366 else
2367 G.pages[i] = entry;
2368 G.page_tails[i] = entry;
2370 /* We start off by just adding all the new information to the
2371 end of the varrays, later, we will move the new information
2372 to the front of the varrays, as the PCH page tables are at
2373 context 0. */
2374 push_by_depth (entry, 0);
2377 /* Now, we update the various data structures that speed page table
2378 handling. */
2379 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2381 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2383 /* Update the statistics. */
2384 G.allocated = G.allocated_last_gc = offs - (char *)addr;