Create branch to syn ICI 2.0 with gcc mainline.
[official-gcc.git] / gcc / ggc-page.c
blobadb32880cfa609c94a218d21c8b9814685fbaee3
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
42 # include <sys/mman.h>
43 # ifndef MAP_FAILED
44 # define MAP_FAILED -1
45 # endif
46 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
47 # define MAP_ANONYMOUS MAP_ANON
48 # endif
49 # define USING_MMAP
51 #endif
53 #ifdef HAVE_MMAP_DEV_ZERO
55 # include <sys/mman.h>
56 # ifndef MAP_FAILED
57 # define MAP_FAILED -1
58 # endif
59 # define USING_MMAP
61 #endif
63 #ifndef USING_MMAP
64 #define USING_MALLOC_PAGE_GROUPS
65 #endif
67 /* Strategy:
69 This garbage-collecting allocator allocates objects on one of a set
70 of pages. Each page can allocate objects of a single size only;
71 available sizes are powers of two starting at four bytes. The size
72 of an allocation request is rounded up to the next power of two
73 (`order'), and satisfied from the appropriate page.
75 Each page is recorded in a page-entry, which also maintains an
76 in-use bitmap of object positions on the page. This allows the
77 allocation state of a particular object to be flipped without
78 touching the page itself.
80 Each page-entry also has a context depth, which is used to track
81 pushing and popping of allocation contexts. Only objects allocated
82 in the current (highest-numbered) context may be collected.
84 Page entries are arranged in an array of singly-linked lists. The
85 array is indexed by the allocation size, in bits, of the pages on
86 it; i.e. all pages on a list allocate objects of the same size.
87 Pages are ordered on the list such that all non-full pages precede
88 all full pages, with non-full pages arranged in order of decreasing
89 context depth.
91 Empty pages (of all orders) are kept on a single page cache list,
92 and are considered first when new pages are required; they are
93 deallocated at the start of the next collection if they haven't
94 been recycled by then. */
96 /* Define GGC_DEBUG_LEVEL to print debugging information.
97 0: No debugging output.
98 1: GC statistics only.
99 2: Page-entry allocations/deallocations as well.
100 3: Object allocations as well.
101 4: Object marks as well. */
102 #define GGC_DEBUG_LEVEL (0)
104 #ifndef HOST_BITS_PER_PTR
105 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
106 #endif
109 /* A two-level tree is used to look up the page-entry for a given
110 pointer. Two chunks of the pointer's bits are extracted to index
111 the first and second levels of the tree, as follows:
113 HOST_PAGE_SIZE_BITS
114 32 | |
115 msb +----------------+----+------+------+ lsb
116 | | |
117 PAGE_L1_BITS |
119 PAGE_L2_BITS
121 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
122 pages are aligned on system page boundaries. The next most
123 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
124 index values in the lookup table, respectively.
126 For 32-bit architectures and the settings below, there are no
127 leftover bits. For architectures with wider pointers, the lookup
128 tree points to a list of pages, which must be scanned to find the
129 correct one. */
131 #define PAGE_L1_BITS (8)
132 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
133 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
134 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
136 #define LOOKUP_L1(p) \
137 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
139 #define LOOKUP_L2(p) \
140 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
142 /* The number of objects per allocation page, for objects on a page of
143 the indicated ORDER. */
144 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
146 /* The number of objects in P. */
147 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
149 /* The size of an object on a page of the indicated ORDER. */
150 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
152 /* For speed, we avoid doing a general integer divide to locate the
153 offset in the allocation bitmap, by precalculating numbers M, S
154 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
155 within the page which is evenly divisible by the object size Z. */
156 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
157 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
158 #define OFFSET_TO_BIT(OFFSET, ORDER) \
159 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
161 /* We use this structure to determine the alignment required for
162 allocations. For power-of-two sized allocations, that's not a
163 problem, but it does matter for odd-sized allocations.
164 We do not care about alignment for floating-point types. */
166 struct max_alignment {
167 char c;
168 union {
169 HOST_WIDEST_INT i;
170 void *p;
171 } u;
174 /* The biggest alignment required. */
176 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
179 /* The number of extra orders, not corresponding to power-of-two sized
180 objects. */
182 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
184 #define RTL_SIZE(NSLOTS) \
185 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
187 #define TREE_EXP_SIZE(OPS) \
188 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
190 /* The Ith entry is the maximum size of an object to be stored in the
191 Ith extra order. Adding a new entry to this array is the *only*
192 thing you need to do to add a new special allocation size. */
194 static const size_t extra_order_size_table[] = {
195 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
196 There are a lot of structures with these sizes and explicitly
197 listing them risks orders being dropped because they changed size. */
198 MAX_ALIGNMENT * 3,
199 MAX_ALIGNMENT * 5,
200 MAX_ALIGNMENT * 6,
201 MAX_ALIGNMENT * 7,
202 MAX_ALIGNMENT * 9,
203 MAX_ALIGNMENT * 10,
204 MAX_ALIGNMENT * 11,
205 MAX_ALIGNMENT * 12,
206 MAX_ALIGNMENT * 13,
207 MAX_ALIGNMENT * 14,
208 MAX_ALIGNMENT * 15,
209 sizeof (struct tree_decl_non_common),
210 sizeof (struct tree_field_decl),
211 sizeof (struct tree_parm_decl),
212 sizeof (struct tree_var_decl),
213 sizeof (struct tree_type),
214 sizeof (struct function),
215 sizeof (struct basic_block_def),
216 sizeof (struct cgraph_node),
217 sizeof (struct loop),
220 /* The total number of orders. */
222 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
224 /* Compute the smallest nonnegative number which when added to X gives
225 a multiple of F. */
227 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
229 /* Compute the smallest multiple of F that is >= X. */
231 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
233 /* The Ith entry is the number of objects on a page or order I. */
235 static unsigned objects_per_page_table[NUM_ORDERS];
237 /* The Ith entry is the size of an object on a page of order I. */
239 static size_t object_size_table[NUM_ORDERS];
241 /* The Ith entry is a pair of numbers (mult, shift) such that
242 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
243 for all k evenly divisible by OBJECT_SIZE(I). */
245 static struct
247 size_t mult;
248 unsigned int shift;
250 inverse_table[NUM_ORDERS];
252 /* A page_entry records the status of an allocation page. This
253 structure is dynamically sized to fit the bitmap in_use_p. */
254 typedef struct page_entry
256 /* The next page-entry with objects of the same size, or NULL if
257 this is the last page-entry. */
258 struct page_entry *next;
260 /* The previous page-entry with objects of the same size, or NULL if
261 this is the first page-entry. The PREV pointer exists solely to
262 keep the cost of ggc_free manageable. */
263 struct page_entry *prev;
265 /* The number of bytes allocated. (This will always be a multiple
266 of the host system page size.) */
267 size_t bytes;
269 /* The address at which the memory is allocated. */
270 char *page;
272 #ifdef USING_MALLOC_PAGE_GROUPS
273 /* Back pointer to the page group this page came from. */
274 struct page_group *group;
275 #endif
277 /* This is the index in the by_depth varray where this page table
278 can be found. */
279 unsigned long index_by_depth;
281 /* Context depth of this page. */
282 unsigned short context_depth;
284 /* The number of free objects remaining on this page. */
285 unsigned short num_free_objects;
287 /* A likely candidate for the bit position of a free object for the
288 next allocation from this page. */
289 unsigned short next_bit_hint;
291 /* The lg of size of objects allocated from this page. */
292 unsigned char order;
294 /* A bit vector indicating whether or not objects are in use. The
295 Nth bit is one if the Nth object on this page is allocated. This
296 array is dynamically sized. */
297 unsigned long in_use_p[1];
298 } page_entry;
300 #ifdef USING_MALLOC_PAGE_GROUPS
301 /* A page_group describes a large allocation from malloc, from which
302 we parcel out aligned pages. */
303 typedef struct page_group
305 /* A linked list of all extant page groups. */
306 struct page_group *next;
308 /* The address we received from malloc. */
309 char *allocation;
311 /* The size of the block. */
312 size_t alloc_size;
314 /* A bitmask of pages in use. */
315 unsigned int in_use;
316 } page_group;
317 #endif
319 #if HOST_BITS_PER_PTR <= 32
321 /* On 32-bit hosts, we use a two level page table, as pictured above. */
322 typedef page_entry **page_table[PAGE_L1_SIZE];
324 #else
326 /* On 64-bit hosts, we use the same two level page tables plus a linked
327 list that disambiguates the top 32-bits. There will almost always be
328 exactly one entry in the list. */
329 typedef struct page_table_chain
331 struct page_table_chain *next;
332 size_t high_bits;
333 page_entry **table[PAGE_L1_SIZE];
334 } *page_table;
336 #endif
338 /* The rest of the global variables. */
339 static struct globals
341 /* The Nth element in this array is a page with objects of size 2^N.
342 If there are any pages with free objects, they will be at the
343 head of the list. NULL if there are no page-entries for this
344 object size. */
345 page_entry *pages[NUM_ORDERS];
347 /* The Nth element in this array is the last page with objects of
348 size 2^N. NULL if there are no page-entries for this object
349 size. */
350 page_entry *page_tails[NUM_ORDERS];
352 /* Lookup table for associating allocation pages with object addresses. */
353 page_table lookup;
355 /* The system's page size. */
356 size_t pagesize;
357 size_t lg_pagesize;
359 /* Bytes currently allocated. */
360 size_t allocated;
362 /* Bytes currently allocated at the end of the last collection. */
363 size_t allocated_last_gc;
365 /* Total amount of memory mapped. */
366 size_t bytes_mapped;
368 /* Bit N set if any allocations have been done at context depth N. */
369 unsigned long context_depth_allocations;
371 /* Bit N set if any collections have been done at context depth N. */
372 unsigned long context_depth_collections;
374 /* The current depth in the context stack. */
375 unsigned short context_depth;
377 /* A file descriptor open to /dev/zero for reading. */
378 #if defined (HAVE_MMAP_DEV_ZERO)
379 int dev_zero_fd;
380 #endif
382 /* A cache of free system pages. */
383 page_entry *free_pages;
385 #ifdef USING_MALLOC_PAGE_GROUPS
386 page_group *page_groups;
387 #endif
389 /* The file descriptor for debugging output. */
390 FILE *debug_file;
392 /* Current number of elements in use in depth below. */
393 unsigned int depth_in_use;
395 /* Maximum number of elements that can be used before resizing. */
396 unsigned int depth_max;
398 /* Each element of this array is an index in by_depth where the given
399 depth starts. This structure is indexed by that given depth we
400 are interested in. */
401 unsigned int *depth;
403 /* Current number of elements in use in by_depth below. */
404 unsigned int by_depth_in_use;
406 /* Maximum number of elements that can be used before resizing. */
407 unsigned int by_depth_max;
409 /* Each element of this array is a pointer to a page_entry, all
410 page_entries can be found in here by increasing depth.
411 index_by_depth in the page_entry is the index into this data
412 structure where that page_entry can be found. This is used to
413 speed up finding all page_entries at a particular depth. */
414 page_entry **by_depth;
416 /* Each element is a pointer to the saved in_use_p bits, if any,
417 zero otherwise. We allocate them all together, to enable a
418 better runtime data access pattern. */
419 unsigned long **save_in_use;
421 #ifdef ENABLE_GC_ALWAYS_COLLECT
422 /* List of free objects to be verified as actually free on the
423 next collection. */
424 struct free_object
426 void *object;
427 struct free_object *next;
428 } *free_object_list;
429 #endif
431 #ifdef GATHER_STATISTICS
432 struct
434 /* Total memory allocated with ggc_alloc. */
435 unsigned long long total_allocated;
436 /* Total overhead for memory to be allocated with ggc_alloc. */
437 unsigned long long total_overhead;
439 /* Total allocations and overhead for sizes less than 32, 64 and 128.
440 These sizes are interesting because they are typical cache line
441 sizes. */
443 unsigned long long total_allocated_under32;
444 unsigned long long total_overhead_under32;
446 unsigned long long total_allocated_under64;
447 unsigned long long total_overhead_under64;
449 unsigned long long total_allocated_under128;
450 unsigned long long total_overhead_under128;
452 /* The allocations for each of the allocation orders. */
453 unsigned long long total_allocated_per_order[NUM_ORDERS];
455 /* The overhead for each of the allocation orders. */
456 unsigned long long total_overhead_per_order[NUM_ORDERS];
457 } stats;
458 #endif
459 } G;
461 /* The size in bytes required to maintain a bitmap for the objects
462 on a page-entry. */
463 #define BITMAP_SIZE(Num_objects) \
464 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
466 /* Allocate pages in chunks of this size, to throttle calls to memory
467 allocation routines. The first page is used, the rest go onto the
468 free list. This cannot be larger than HOST_BITS_PER_INT for the
469 in_use bitmask for page_group. Hosts that need a different value
470 can override this by defining GGC_QUIRE_SIZE explicitly. */
471 #ifndef GGC_QUIRE_SIZE
472 # ifdef USING_MMAP
473 # define GGC_QUIRE_SIZE 256
474 # else
475 # define GGC_QUIRE_SIZE 16
476 # endif
477 #endif
479 /* Initial guess as to how many page table entries we might need. */
480 #define INITIAL_PTE_COUNT 128
482 static int ggc_allocated_p (const void *);
483 static page_entry *lookup_page_table_entry (const void *);
484 static void set_page_table_entry (void *, page_entry *);
485 #ifdef USING_MMAP
486 static char *alloc_anon (char *, size_t);
487 #endif
488 #ifdef USING_MALLOC_PAGE_GROUPS
489 static size_t page_group_index (char *, char *);
490 static void set_page_group_in_use (page_group *, char *);
491 static void clear_page_group_in_use (page_group *, char *);
492 #endif
493 static struct page_entry * alloc_page (unsigned);
494 static void free_page (struct page_entry *);
495 static void release_pages (void);
496 static void clear_marks (void);
497 static void sweep_pages (void);
498 static void ggc_recalculate_in_use_p (page_entry *);
499 static void compute_inverse (unsigned);
500 static inline void adjust_depth (void);
501 static void move_ptes_to_front (int, int);
503 void debug_print_page_list (int);
504 static void push_depth (unsigned int);
505 static void push_by_depth (page_entry *, unsigned long *);
507 /* Push an entry onto G.depth. */
509 inline static void
510 push_depth (unsigned int i)
512 if (G.depth_in_use >= G.depth_max)
514 G.depth_max *= 2;
515 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
517 G.depth[G.depth_in_use++] = i;
520 /* Push an entry onto G.by_depth and G.save_in_use. */
522 inline static void
523 push_by_depth (page_entry *p, unsigned long *s)
525 if (G.by_depth_in_use >= G.by_depth_max)
527 G.by_depth_max *= 2;
528 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
529 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
530 G.by_depth_max);
532 G.by_depth[G.by_depth_in_use] = p;
533 G.save_in_use[G.by_depth_in_use++] = s;
536 #if (GCC_VERSION < 3001)
537 #define prefetch(X) ((void) X)
538 #else
539 #define prefetch(X) __builtin_prefetch (X)
540 #endif
542 #define save_in_use_p_i(__i) \
543 (G.save_in_use[__i])
544 #define save_in_use_p(__p) \
545 (save_in_use_p_i (__p->index_by_depth))
547 /* Returns nonzero if P was allocated in GC'able memory. */
549 static inline int
550 ggc_allocated_p (const void *p)
552 page_entry ***base;
553 size_t L1, L2;
555 #if HOST_BITS_PER_PTR <= 32
556 base = &G.lookup[0];
557 #else
558 page_table table = G.lookup;
559 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
560 while (1)
562 if (table == NULL)
563 return 0;
564 if (table->high_bits == high_bits)
565 break;
566 table = table->next;
568 base = &table->table[0];
569 #endif
571 /* Extract the level 1 and 2 indices. */
572 L1 = LOOKUP_L1 (p);
573 L2 = LOOKUP_L2 (p);
575 return base[L1] && base[L1][L2];
578 /* Traverse the page table and find the entry for a page.
579 Die (probably) if the object wasn't allocated via GC. */
581 static inline page_entry *
582 lookup_page_table_entry (const void *p)
584 page_entry ***base;
585 size_t L1, L2;
587 #if HOST_BITS_PER_PTR <= 32
588 base = &G.lookup[0];
589 #else
590 page_table table = G.lookup;
591 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
592 while (table->high_bits != high_bits)
593 table = table->next;
594 base = &table->table[0];
595 #endif
597 /* Extract the level 1 and 2 indices. */
598 L1 = LOOKUP_L1 (p);
599 L2 = LOOKUP_L2 (p);
601 return base[L1][L2];
604 /* Set the page table entry for a page. */
606 static void
607 set_page_table_entry (void *p, page_entry *entry)
609 page_entry ***base;
610 size_t L1, L2;
612 #if HOST_BITS_PER_PTR <= 32
613 base = &G.lookup[0];
614 #else
615 page_table table;
616 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
617 for (table = G.lookup; table; table = table->next)
618 if (table->high_bits == high_bits)
619 goto found;
621 /* Not found -- allocate a new table. */
622 table = XCNEW (struct page_table_chain);
623 table->next = G.lookup;
624 table->high_bits = high_bits;
625 G.lookup = table;
626 found:
627 base = &table->table[0];
628 #endif
630 /* Extract the level 1 and 2 indices. */
631 L1 = LOOKUP_L1 (p);
632 L2 = LOOKUP_L2 (p);
634 if (base[L1] == NULL)
635 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
637 base[L1][L2] = entry;
640 /* Prints the page-entry for object size ORDER, for debugging. */
642 void
643 debug_print_page_list (int order)
645 page_entry *p;
646 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
647 (void *) G.page_tails[order]);
648 p = G.pages[order];
649 while (p != NULL)
651 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
652 p->num_free_objects);
653 p = p->next;
655 printf ("NULL\n");
656 fflush (stdout);
659 #ifdef USING_MMAP
660 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
661 (if non-null). The ifdef structure here is intended to cause a
662 compile error unless exactly one of the HAVE_* is defined. */
664 static inline char *
665 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
667 #ifdef HAVE_MMAP_ANON
668 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
669 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
670 #endif
671 #ifdef HAVE_MMAP_DEV_ZERO
672 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
673 MAP_PRIVATE, G.dev_zero_fd, 0);
674 #endif
676 if (page == (char *) MAP_FAILED)
678 perror ("virtual memory exhausted");
679 exit (FATAL_EXIT_CODE);
682 /* Remember that we allocated this memory. */
683 G.bytes_mapped += size;
685 /* Pretend we don't have access to the allocated pages. We'll enable
686 access to smaller pieces of the area in ggc_alloc. Discard the
687 handle to avoid handle leak. */
688 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
690 return page;
692 #endif
693 #ifdef USING_MALLOC_PAGE_GROUPS
694 /* Compute the index for this page into the page group. */
696 static inline size_t
697 page_group_index (char *allocation, char *page)
699 return (size_t) (page - allocation) >> G.lg_pagesize;
702 /* Set and clear the in_use bit for this page in the page group. */
704 static inline void
705 set_page_group_in_use (page_group *group, char *page)
707 group->in_use |= 1 << page_group_index (group->allocation, page);
710 static inline void
711 clear_page_group_in_use (page_group *group, char *page)
713 group->in_use &= ~(1 << page_group_index (group->allocation, page));
715 #endif
717 /* Allocate a new page for allocating objects of size 2^ORDER,
718 and return an entry for it. The entry is not added to the
719 appropriate page_table list. */
721 static inline struct page_entry *
722 alloc_page (unsigned order)
724 struct page_entry *entry, *p, **pp;
725 char *page;
726 size_t num_objects;
727 size_t bitmap_size;
728 size_t page_entry_size;
729 size_t entry_size;
730 #ifdef USING_MALLOC_PAGE_GROUPS
731 page_group *group;
732 #endif
734 num_objects = OBJECTS_PER_PAGE (order);
735 bitmap_size = BITMAP_SIZE (num_objects + 1);
736 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
737 entry_size = num_objects * OBJECT_SIZE (order);
738 if (entry_size < G.pagesize)
739 entry_size = G.pagesize;
741 entry = NULL;
742 page = NULL;
744 /* Check the list of free pages for one we can use. */
745 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
746 if (p->bytes == entry_size)
747 break;
749 if (p != NULL)
751 /* Recycle the allocated memory from this page ... */
752 *pp = p->next;
753 page = p->page;
755 #ifdef USING_MALLOC_PAGE_GROUPS
756 group = p->group;
757 #endif
759 /* ... and, if possible, the page entry itself. */
760 if (p->order == order)
762 entry = p;
763 memset (entry, 0, page_entry_size);
765 else
766 free (p);
768 #ifdef USING_MMAP
769 else if (entry_size == G.pagesize)
771 /* We want just one page. Allocate a bunch of them and put the
772 extras on the freelist. (Can only do this optimization with
773 mmap for backing store.) */
774 struct page_entry *e, *f = G.free_pages;
775 int i;
777 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
779 /* This loop counts down so that the chain will be in ascending
780 memory order. */
781 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
783 e = XCNEWVAR (struct page_entry, page_entry_size);
784 e->order = order;
785 e->bytes = G.pagesize;
786 e->page = page + (i << G.lg_pagesize);
787 e->next = f;
788 f = e;
791 G.free_pages = f;
793 else
794 page = alloc_anon (NULL, entry_size);
795 #endif
796 #ifdef USING_MALLOC_PAGE_GROUPS
797 else
799 /* Allocate a large block of memory and serve out the aligned
800 pages therein. This results in much less memory wastage
801 than the traditional implementation of valloc. */
803 char *allocation, *a, *enda;
804 size_t alloc_size, head_slop, tail_slop;
805 int multiple_pages = (entry_size == G.pagesize);
807 if (multiple_pages)
808 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
809 else
810 alloc_size = entry_size + G.pagesize - 1;
811 allocation = XNEWVEC (char, alloc_size);
813 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
814 head_slop = page - allocation;
815 if (multiple_pages)
816 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
817 else
818 tail_slop = alloc_size - entry_size - head_slop;
819 enda = allocation + alloc_size - tail_slop;
821 /* We allocated N pages, which are likely not aligned, leaving
822 us with N-1 usable pages. We plan to place the page_group
823 structure somewhere in the slop. */
824 if (head_slop >= sizeof (page_group))
825 group = (page_group *)page - 1;
826 else
828 /* We magically got an aligned allocation. Too bad, we have
829 to waste a page anyway. */
830 if (tail_slop == 0)
832 enda -= G.pagesize;
833 tail_slop += G.pagesize;
835 gcc_assert (tail_slop >= sizeof (page_group));
836 group = (page_group *)enda;
837 tail_slop -= sizeof (page_group);
840 /* Remember that we allocated this memory. */
841 group->next = G.page_groups;
842 group->allocation = allocation;
843 group->alloc_size = alloc_size;
844 group->in_use = 0;
845 G.page_groups = group;
846 G.bytes_mapped += alloc_size;
848 /* If we allocated multiple pages, put the rest on the free list. */
849 if (multiple_pages)
851 struct page_entry *e, *f = G.free_pages;
852 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
854 e = XCNEWVAR (struct page_entry, page_entry_size);
855 e->order = order;
856 e->bytes = G.pagesize;
857 e->page = a;
858 e->group = group;
859 e->next = f;
860 f = e;
862 G.free_pages = f;
865 #endif
867 if (entry == NULL)
868 entry = XCNEWVAR (struct page_entry, page_entry_size);
870 entry->bytes = entry_size;
871 entry->page = page;
872 entry->context_depth = G.context_depth;
873 entry->order = order;
874 entry->num_free_objects = num_objects;
875 entry->next_bit_hint = 1;
877 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
879 #ifdef USING_MALLOC_PAGE_GROUPS
880 entry->group = group;
881 set_page_group_in_use (group, page);
882 #endif
884 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
885 increment the hint. */
886 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
887 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
889 set_page_table_entry (page, entry);
891 if (GGC_DEBUG_LEVEL >= 2)
892 fprintf (G.debug_file,
893 "Allocating page at %p, object size=%lu, data %p-%p\n",
894 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
895 page + entry_size - 1);
897 return entry;
900 /* Adjust the size of G.depth so that no index greater than the one
901 used by the top of the G.by_depth is used. */
903 static inline void
904 adjust_depth (void)
906 page_entry *top;
908 if (G.by_depth_in_use)
910 top = G.by_depth[G.by_depth_in_use-1];
912 /* Peel back indices in depth that index into by_depth, so that
913 as new elements are added to by_depth, we note the indices
914 of those elements, if they are for new context depths. */
915 while (G.depth_in_use > (size_t)top->context_depth+1)
916 --G.depth_in_use;
920 /* For a page that is no longer needed, put it on the free page list. */
922 static void
923 free_page (page_entry *entry)
925 if (GGC_DEBUG_LEVEL >= 2)
926 fprintf (G.debug_file,
927 "Deallocating page at %p, data %p-%p\n", (void *) entry,
928 entry->page, entry->page + entry->bytes - 1);
930 /* Mark the page as inaccessible. Discard the handle to avoid handle
931 leak. */
932 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
934 set_page_table_entry (entry->page, NULL);
936 #ifdef USING_MALLOC_PAGE_GROUPS
937 clear_page_group_in_use (entry->group, entry->page);
938 #endif
940 if (G.by_depth_in_use > 1)
942 page_entry *top = G.by_depth[G.by_depth_in_use-1];
943 int i = entry->index_by_depth;
945 /* We cannot free a page from a context deeper than the current
946 one. */
947 gcc_assert (entry->context_depth == top->context_depth);
949 /* Put top element into freed slot. */
950 G.by_depth[i] = top;
951 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
952 top->index_by_depth = i;
954 --G.by_depth_in_use;
956 adjust_depth ();
958 entry->next = G.free_pages;
959 G.free_pages = entry;
962 /* Release the free page cache to the system. */
964 static void
965 release_pages (void)
967 #ifdef USING_MMAP
968 page_entry *p, *next;
969 char *start;
970 size_t len;
972 /* Gather up adjacent pages so they are unmapped together. */
973 p = G.free_pages;
975 while (p)
977 start = p->page;
978 next = p->next;
979 len = p->bytes;
980 free (p);
981 p = next;
983 while (p && p->page == start + len)
985 next = p->next;
986 len += p->bytes;
987 free (p);
988 p = next;
991 munmap (start, len);
992 G.bytes_mapped -= len;
995 G.free_pages = NULL;
996 #endif
997 #ifdef USING_MALLOC_PAGE_GROUPS
998 page_entry **pp, *p;
999 page_group **gp, *g;
1001 /* Remove all pages from free page groups from the list. */
1002 pp = &G.free_pages;
1003 while ((p = *pp) != NULL)
1004 if (p->group->in_use == 0)
1006 *pp = p->next;
1007 free (p);
1009 else
1010 pp = &p->next;
1012 /* Remove all free page groups, and release the storage. */
1013 gp = &G.page_groups;
1014 while ((g = *gp) != NULL)
1015 if (g->in_use == 0)
1017 *gp = g->next;
1018 G.bytes_mapped -= g->alloc_size;
1019 free (g->allocation);
1021 else
1022 gp = &g->next;
1023 #endif
1026 /* This table provides a fast way to determine ceil(log_2(size)) for
1027 allocation requests. The minimum allocation size is eight bytes. */
1028 #define NUM_SIZE_LOOKUP 512
1029 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1031 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1032 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1033 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1034 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1035 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1036 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1037 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1038 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1039 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1041 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1042 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1043 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1044 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1045 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1047 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1050 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1051 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1065 /* Typed allocation function. Does nothing special in this collector. */
1067 void *
1068 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1069 MEM_STAT_DECL)
1071 return ggc_alloc_stat (size PASS_MEM_STAT);
1074 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1076 void *
1077 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1079 size_t order, word, bit, object_offset, object_size;
1080 struct page_entry *entry;
1081 void *result;
1083 if (size < NUM_SIZE_LOOKUP)
1085 order = size_lookup[size];
1086 object_size = OBJECT_SIZE (order);
1088 else
1090 order = 10;
1091 while (size > (object_size = OBJECT_SIZE (order)))
1092 order++;
1095 /* If there are non-full pages for this size allocation, they are at
1096 the head of the list. */
1097 entry = G.pages[order];
1099 /* If there is no page for this object size, or all pages in this
1100 context are full, allocate a new page. */
1101 if (entry == NULL || entry->num_free_objects == 0)
1103 struct page_entry *new_entry;
1104 new_entry = alloc_page (order);
1106 new_entry->index_by_depth = G.by_depth_in_use;
1107 push_by_depth (new_entry, 0);
1109 /* We can skip context depths, if we do, make sure we go all the
1110 way to the new depth. */
1111 while (new_entry->context_depth >= G.depth_in_use)
1112 push_depth (G.by_depth_in_use-1);
1114 /* If this is the only entry, it's also the tail. If it is not
1115 the only entry, then we must update the PREV pointer of the
1116 ENTRY (G.pages[order]) to point to our new page entry. */
1117 if (entry == NULL)
1118 G.page_tails[order] = new_entry;
1119 else
1120 entry->prev = new_entry;
1122 /* Put new pages at the head of the page list. By definition the
1123 entry at the head of the list always has a NULL pointer. */
1124 new_entry->next = entry;
1125 new_entry->prev = NULL;
1126 entry = new_entry;
1127 G.pages[order] = new_entry;
1129 /* For a new page, we know the word and bit positions (in the
1130 in_use bitmap) of the first available object -- they're zero. */
1131 new_entry->next_bit_hint = 1;
1132 word = 0;
1133 bit = 0;
1134 object_offset = 0;
1136 else
1138 /* First try to use the hint left from the previous allocation
1139 to locate a clear bit in the in-use bitmap. We've made sure
1140 that the one-past-the-end bit is always set, so if the hint
1141 has run over, this test will fail. */
1142 unsigned hint = entry->next_bit_hint;
1143 word = hint / HOST_BITS_PER_LONG;
1144 bit = hint % HOST_BITS_PER_LONG;
1146 /* If the hint didn't work, scan the bitmap from the beginning. */
1147 if ((entry->in_use_p[word] >> bit) & 1)
1149 word = bit = 0;
1150 while (~entry->in_use_p[word] == 0)
1151 ++word;
1153 #if GCC_VERSION >= 3004
1154 bit = __builtin_ctzl (~entry->in_use_p[word]);
1155 #else
1156 while ((entry->in_use_p[word] >> bit) & 1)
1157 ++bit;
1158 #endif
1160 hint = word * HOST_BITS_PER_LONG + bit;
1163 /* Next time, try the next bit. */
1164 entry->next_bit_hint = hint + 1;
1166 object_offset = hint * object_size;
1169 /* Set the in-use bit. */
1170 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1172 /* Keep a running total of the number of free objects. If this page
1173 fills up, we may have to move it to the end of the list if the
1174 next page isn't full. If the next page is full, all subsequent
1175 pages are full, so there's no need to move it. */
1176 if (--entry->num_free_objects == 0
1177 && entry->next != NULL
1178 && entry->next->num_free_objects > 0)
1180 /* We have a new head for the list. */
1181 G.pages[order] = entry->next;
1183 /* We are moving ENTRY to the end of the page table list.
1184 The new page at the head of the list will have NULL in
1185 its PREV field and ENTRY will have NULL in its NEXT field. */
1186 entry->next->prev = NULL;
1187 entry->next = NULL;
1189 /* Append ENTRY to the tail of the list. */
1190 entry->prev = G.page_tails[order];
1191 G.page_tails[order]->next = entry;
1192 G.page_tails[order] = entry;
1195 /* Calculate the object's address. */
1196 result = entry->page + object_offset;
1197 #ifdef GATHER_STATISTICS
1198 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1199 result PASS_MEM_STAT);
1200 #endif
1202 #ifdef ENABLE_GC_CHECKING
1203 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1204 exact same semantics in presence of memory bugs, regardless of
1205 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1206 handle to avoid handle leak. */
1207 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1209 /* `Poison' the entire allocated object, including any padding at
1210 the end. */
1211 memset (result, 0xaf, object_size);
1213 /* Make the bytes after the end of the object unaccessible. Discard the
1214 handle to avoid handle leak. */
1215 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1216 object_size - size));
1217 #endif
1219 /* Tell Valgrind that the memory is there, but its content isn't
1220 defined. The bytes at the end of the object are still marked
1221 unaccessible. */
1222 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1224 /* Keep track of how many bytes are being allocated. This
1225 information is used in deciding when to collect. */
1226 G.allocated += object_size;
1228 /* For timevar statistics. */
1229 timevar_ggc_mem_total += object_size;
1231 #ifdef GATHER_STATISTICS
1233 size_t overhead = object_size - size;
1235 G.stats.total_overhead += overhead;
1236 G.stats.total_allocated += object_size;
1237 G.stats.total_overhead_per_order[order] += overhead;
1238 G.stats.total_allocated_per_order[order] += object_size;
1240 if (size <= 32)
1242 G.stats.total_overhead_under32 += overhead;
1243 G.stats.total_allocated_under32 += object_size;
1245 if (size <= 64)
1247 G.stats.total_overhead_under64 += overhead;
1248 G.stats.total_allocated_under64 += object_size;
1250 if (size <= 128)
1252 G.stats.total_overhead_under128 += overhead;
1253 G.stats.total_allocated_under128 += object_size;
1256 #endif
1258 if (GGC_DEBUG_LEVEL >= 3)
1259 fprintf (G.debug_file,
1260 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1261 (unsigned long) size, (unsigned long) object_size, result,
1262 (void *) entry);
1264 return result;
1267 /* Mark function for strings. */
1269 void
1270 gt_ggc_m_S (const void *p)
1272 page_entry *entry;
1273 unsigned bit, word;
1274 unsigned long mask;
1275 unsigned long offset;
1277 if (!p || !ggc_allocated_p (p))
1278 return;
1280 /* Look up the page on which the object is alloced. . */
1281 entry = lookup_page_table_entry (p);
1282 gcc_assert (entry);
1284 /* Calculate the index of the object on the page; this is its bit
1285 position in the in_use_p bitmap. Note that because a char* might
1286 point to the middle of an object, we need special code here to
1287 make sure P points to the start of an object. */
1288 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1289 if (offset)
1291 /* Here we've seen a char* which does not point to the beginning
1292 of an allocated object. We assume it points to the middle of
1293 a STRING_CST. */
1294 gcc_assert (offset == offsetof (struct tree_string, str));
1295 p = ((const char *) p) - offset;
1296 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1297 return;
1300 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1301 word = bit / HOST_BITS_PER_LONG;
1302 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1304 /* If the bit was previously set, skip it. */
1305 if (entry->in_use_p[word] & mask)
1306 return;
1308 /* Otherwise set it, and decrement the free object count. */
1309 entry->in_use_p[word] |= mask;
1310 entry->num_free_objects -= 1;
1312 if (GGC_DEBUG_LEVEL >= 4)
1313 fprintf (G.debug_file, "Marking %p\n", p);
1315 return;
1318 /* If P is not marked, marks it and return false. Otherwise return true.
1319 P must have been allocated by the GC allocator; it mustn't point to
1320 static objects, stack variables, or memory allocated with malloc. */
1323 ggc_set_mark (const void *p)
1325 page_entry *entry;
1326 unsigned bit, word;
1327 unsigned long mask;
1329 /* Look up the page on which the object is alloced. If the object
1330 wasn't allocated by the collector, we'll probably die. */
1331 entry = lookup_page_table_entry (p);
1332 gcc_assert (entry);
1334 /* Calculate the index of the object on the page; this is its bit
1335 position in the in_use_p bitmap. */
1336 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1337 word = bit / HOST_BITS_PER_LONG;
1338 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1340 /* If the bit was previously set, skip it. */
1341 if (entry->in_use_p[word] & mask)
1342 return 1;
1344 /* Otherwise set it, and decrement the free object count. */
1345 entry->in_use_p[word] |= mask;
1346 entry->num_free_objects -= 1;
1348 if (GGC_DEBUG_LEVEL >= 4)
1349 fprintf (G.debug_file, "Marking %p\n", p);
1351 return 0;
1354 /* Return 1 if P has been marked, zero otherwise.
1355 P must have been allocated by the GC allocator; it mustn't point to
1356 static objects, stack variables, or memory allocated with malloc. */
1359 ggc_marked_p (const void *p)
1361 page_entry *entry;
1362 unsigned bit, word;
1363 unsigned long mask;
1365 /* Look up the page on which the object is alloced. If the object
1366 wasn't allocated by the collector, we'll probably die. */
1367 entry = lookup_page_table_entry (p);
1368 gcc_assert (entry);
1370 /* Calculate the index of the object on the page; this is its bit
1371 position in the in_use_p bitmap. */
1372 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1373 word = bit / HOST_BITS_PER_LONG;
1374 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1376 return (entry->in_use_p[word] & mask) != 0;
1379 /* Return the size of the gc-able object P. */
1381 size_t
1382 ggc_get_size (const void *p)
1384 page_entry *pe = lookup_page_table_entry (p);
1385 return OBJECT_SIZE (pe->order);
1388 /* Release the memory for object P. */
1390 void
1391 ggc_free (void *p)
1393 page_entry *pe = lookup_page_table_entry (p);
1394 size_t order = pe->order;
1395 size_t size = OBJECT_SIZE (order);
1397 #ifdef GATHER_STATISTICS
1398 ggc_free_overhead (p);
1399 #endif
1401 if (GGC_DEBUG_LEVEL >= 3)
1402 fprintf (G.debug_file,
1403 "Freeing object, actual size=%lu, at %p on %p\n",
1404 (unsigned long) size, p, (void *) pe);
1406 #ifdef ENABLE_GC_CHECKING
1407 /* Poison the data, to indicate the data is garbage. */
1408 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1409 memset (p, 0xa5, size);
1410 #endif
1411 /* Let valgrind know the object is free. */
1412 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1414 #ifdef ENABLE_GC_ALWAYS_COLLECT
1415 /* In the completely-anal-checking mode, we do *not* immediately free
1416 the data, but instead verify that the data is *actually* not
1417 reachable the next time we collect. */
1419 struct free_object *fo = XNEW (struct free_object);
1420 fo->object = p;
1421 fo->next = G.free_object_list;
1422 G.free_object_list = fo;
1424 #else
1426 unsigned int bit_offset, word, bit;
1428 G.allocated -= size;
1430 /* Mark the object not-in-use. */
1431 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1432 word = bit_offset / HOST_BITS_PER_LONG;
1433 bit = bit_offset % HOST_BITS_PER_LONG;
1434 pe->in_use_p[word] &= ~(1UL << bit);
1436 if (pe->num_free_objects++ == 0)
1438 page_entry *p, *q;
1440 /* If the page is completely full, then it's supposed to
1441 be after all pages that aren't. Since we've freed one
1442 object from a page that was full, we need to move the
1443 page to the head of the list.
1445 PE is the node we want to move. Q is the previous node
1446 and P is the next node in the list. */
1447 q = pe->prev;
1448 if (q && q->num_free_objects == 0)
1450 p = pe->next;
1452 q->next = p;
1454 /* If PE was at the end of the list, then Q becomes the
1455 new end of the list. If PE was not the end of the
1456 list, then we need to update the PREV field for P. */
1457 if (!p)
1458 G.page_tails[order] = q;
1459 else
1460 p->prev = q;
1462 /* Move PE to the head of the list. */
1463 pe->next = G.pages[order];
1464 pe->prev = NULL;
1465 G.pages[order]->prev = pe;
1466 G.pages[order] = pe;
1469 /* Reset the hint bit to point to the only free object. */
1470 pe->next_bit_hint = bit_offset;
1473 #endif
1476 /* Subroutine of init_ggc which computes the pair of numbers used to
1477 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1479 This algorithm is taken from Granlund and Montgomery's paper
1480 "Division by Invariant Integers using Multiplication"
1481 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1482 constants). */
1484 static void
1485 compute_inverse (unsigned order)
1487 size_t size, inv;
1488 unsigned int e;
1490 size = OBJECT_SIZE (order);
1491 e = 0;
1492 while (size % 2 == 0)
1494 e++;
1495 size >>= 1;
1498 inv = size;
1499 while (inv * size != 1)
1500 inv = inv * (2 - inv*size);
1502 DIV_MULT (order) = inv;
1503 DIV_SHIFT (order) = e;
1506 /* Initialize the ggc-mmap allocator. */
1507 void
1508 init_ggc (void)
1510 unsigned order;
1512 G.pagesize = getpagesize();
1513 G.lg_pagesize = exact_log2 (G.pagesize);
1515 #ifdef HAVE_MMAP_DEV_ZERO
1516 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1517 if (G.dev_zero_fd == -1)
1518 internal_error ("open /dev/zero: %m");
1519 #endif
1521 #if 0
1522 G.debug_file = fopen ("ggc-mmap.debug", "w");
1523 #else
1524 G.debug_file = stdout;
1525 #endif
1527 #ifdef USING_MMAP
1528 /* StunOS has an amazing off-by-one error for the first mmap allocation
1529 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1530 believe, is an unaligned page allocation, which would cause us to
1531 hork badly if we tried to use it. */
1533 char *p = alloc_anon (NULL, G.pagesize);
1534 struct page_entry *e;
1535 if ((size_t)p & (G.pagesize - 1))
1537 /* How losing. Discard this one and try another. If we still
1538 can't get something useful, give up. */
1540 p = alloc_anon (NULL, G.pagesize);
1541 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1544 /* We have a good page, might as well hold onto it... */
1545 e = XCNEW (struct page_entry);
1546 e->bytes = G.pagesize;
1547 e->page = p;
1548 e->next = G.free_pages;
1549 G.free_pages = e;
1551 #endif
1553 /* Initialize the object size table. */
1554 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1555 object_size_table[order] = (size_t) 1 << order;
1556 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1558 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1560 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1561 so that we're sure of getting aligned memory. */
1562 s = ROUND_UP (s, MAX_ALIGNMENT);
1563 object_size_table[order] = s;
1566 /* Initialize the objects-per-page and inverse tables. */
1567 for (order = 0; order < NUM_ORDERS; ++order)
1569 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1570 if (objects_per_page_table[order] == 0)
1571 objects_per_page_table[order] = 1;
1572 compute_inverse (order);
1575 /* Reset the size_lookup array to put appropriately sized objects in
1576 the special orders. All objects bigger than the previous power
1577 of two, but no greater than the special size, should go in the
1578 new order. */
1579 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1581 int o;
1582 int i;
1584 i = OBJECT_SIZE (order);
1585 if (i >= NUM_SIZE_LOOKUP)
1586 continue;
1588 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1589 size_lookup[i] = order;
1592 G.depth_in_use = 0;
1593 G.depth_max = 10;
1594 G.depth = XNEWVEC (unsigned int, G.depth_max);
1596 G.by_depth_in_use = 0;
1597 G.by_depth_max = INITIAL_PTE_COUNT;
1598 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1599 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1602 /* Start a new GGC zone. */
1604 struct alloc_zone *
1605 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1607 return NULL;
1610 /* Destroy a GGC zone. */
1611 void
1612 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1616 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1617 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1619 static void
1620 ggc_recalculate_in_use_p (page_entry *p)
1622 unsigned int i;
1623 size_t num_objects;
1625 /* Because the past-the-end bit in in_use_p is always set, we
1626 pretend there is one additional object. */
1627 num_objects = OBJECTS_IN_PAGE (p) + 1;
1629 /* Reset the free object count. */
1630 p->num_free_objects = num_objects;
1632 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1633 for (i = 0;
1634 i < CEIL (BITMAP_SIZE (num_objects),
1635 sizeof (*p->in_use_p));
1636 ++i)
1638 unsigned long j;
1640 /* Something is in use if it is marked, or if it was in use in a
1641 context further down the context stack. */
1642 p->in_use_p[i] |= save_in_use_p (p)[i];
1644 /* Decrement the free object count for every object allocated. */
1645 for (j = p->in_use_p[i]; j; j >>= 1)
1646 p->num_free_objects -= (j & 1);
1649 gcc_assert (p->num_free_objects < num_objects);
1652 /* Unmark all objects. */
1654 static void
1655 clear_marks (void)
1657 unsigned order;
1659 for (order = 2; order < NUM_ORDERS; order++)
1661 page_entry *p;
1663 for (p = G.pages[order]; p != NULL; p = p->next)
1665 size_t num_objects = OBJECTS_IN_PAGE (p);
1666 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1668 /* The data should be page-aligned. */
1669 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1671 /* Pages that aren't in the topmost context are not collected;
1672 nevertheless, we need their in-use bit vectors to store GC
1673 marks. So, back them up first. */
1674 if (p->context_depth < G.context_depth)
1676 if (! save_in_use_p (p))
1677 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1678 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1681 /* Reset reset the number of free objects and clear the
1682 in-use bits. These will be adjusted by mark_obj. */
1683 p->num_free_objects = num_objects;
1684 memset (p->in_use_p, 0, bitmap_size);
1686 /* Make sure the one-past-the-end bit is always set. */
1687 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1688 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1693 /* Free all empty pages. Partially empty pages need no attention
1694 because the `mark' bit doubles as an `unused' bit. */
1696 static void
1697 sweep_pages (void)
1699 unsigned order;
1701 for (order = 2; order < NUM_ORDERS; order++)
1703 /* The last page-entry to consider, regardless of entries
1704 placed at the end of the list. */
1705 page_entry * const last = G.page_tails[order];
1707 size_t num_objects;
1708 size_t live_objects;
1709 page_entry *p, *previous;
1710 int done;
1712 p = G.pages[order];
1713 if (p == NULL)
1714 continue;
1716 previous = NULL;
1719 page_entry *next = p->next;
1721 /* Loop until all entries have been examined. */
1722 done = (p == last);
1724 num_objects = OBJECTS_IN_PAGE (p);
1726 /* Add all live objects on this page to the count of
1727 allocated memory. */
1728 live_objects = num_objects - p->num_free_objects;
1730 G.allocated += OBJECT_SIZE (order) * live_objects;
1732 /* Only objects on pages in the topmost context should get
1733 collected. */
1734 if (p->context_depth < G.context_depth)
1737 /* Remove the page if it's empty. */
1738 else if (live_objects == 0)
1740 /* If P was the first page in the list, then NEXT
1741 becomes the new first page in the list, otherwise
1742 splice P out of the forward pointers. */
1743 if (! previous)
1744 G.pages[order] = next;
1745 else
1746 previous->next = next;
1748 /* Splice P out of the back pointers too. */
1749 if (next)
1750 next->prev = previous;
1752 /* Are we removing the last element? */
1753 if (p == G.page_tails[order])
1754 G.page_tails[order] = previous;
1755 free_page (p);
1756 p = previous;
1759 /* If the page is full, move it to the end. */
1760 else if (p->num_free_objects == 0)
1762 /* Don't move it if it's already at the end. */
1763 if (p != G.page_tails[order])
1765 /* Move p to the end of the list. */
1766 p->next = NULL;
1767 p->prev = G.page_tails[order];
1768 G.page_tails[order]->next = p;
1770 /* Update the tail pointer... */
1771 G.page_tails[order] = p;
1773 /* ... and the head pointer, if necessary. */
1774 if (! previous)
1775 G.pages[order] = next;
1776 else
1777 previous->next = next;
1779 /* And update the backpointer in NEXT if necessary. */
1780 if (next)
1781 next->prev = previous;
1783 p = previous;
1787 /* If we've fallen through to here, it's a page in the
1788 topmost context that is neither full nor empty. Such a
1789 page must precede pages at lesser context depth in the
1790 list, so move it to the head. */
1791 else if (p != G.pages[order])
1793 previous->next = p->next;
1795 /* Update the backchain in the next node if it exists. */
1796 if (p->next)
1797 p->next->prev = previous;
1799 /* Move P to the head of the list. */
1800 p->next = G.pages[order];
1801 p->prev = NULL;
1802 G.pages[order]->prev = p;
1804 /* Update the head pointer. */
1805 G.pages[order] = p;
1807 /* Are we moving the last element? */
1808 if (G.page_tails[order] == p)
1809 G.page_tails[order] = previous;
1810 p = previous;
1813 previous = p;
1814 p = next;
1816 while (! done);
1818 /* Now, restore the in_use_p vectors for any pages from contexts
1819 other than the current one. */
1820 for (p = G.pages[order]; p; p = p->next)
1821 if (p->context_depth != G.context_depth)
1822 ggc_recalculate_in_use_p (p);
1826 #ifdef ENABLE_GC_CHECKING
1827 /* Clobber all free objects. */
1829 static void
1830 poison_pages (void)
1832 unsigned order;
1834 for (order = 2; order < NUM_ORDERS; order++)
1836 size_t size = OBJECT_SIZE (order);
1837 page_entry *p;
1839 for (p = G.pages[order]; p != NULL; p = p->next)
1841 size_t num_objects;
1842 size_t i;
1844 if (p->context_depth != G.context_depth)
1845 /* Since we don't do any collection for pages in pushed
1846 contexts, there's no need to do any poisoning. And
1847 besides, the IN_USE_P array isn't valid until we pop
1848 contexts. */
1849 continue;
1851 num_objects = OBJECTS_IN_PAGE (p);
1852 for (i = 0; i < num_objects; i++)
1854 size_t word, bit;
1855 word = i / HOST_BITS_PER_LONG;
1856 bit = i % HOST_BITS_PER_LONG;
1857 if (((p->in_use_p[word] >> bit) & 1) == 0)
1859 char *object = p->page + i * size;
1861 /* Keep poison-by-write when we expect to use Valgrind,
1862 so the exact same memory semantics is kept, in case
1863 there are memory errors. We override this request
1864 below. */
1865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1866 size));
1867 memset (object, 0xa5, size);
1869 /* Drop the handle to avoid handle leak. */
1870 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1876 #else
1877 #define poison_pages()
1878 #endif
1880 #ifdef ENABLE_GC_ALWAYS_COLLECT
1881 /* Validate that the reportedly free objects actually are. */
1883 static void
1884 validate_free_objects (void)
1886 struct free_object *f, *next, *still_free = NULL;
1888 for (f = G.free_object_list; f ; f = next)
1890 page_entry *pe = lookup_page_table_entry (f->object);
1891 size_t bit, word;
1893 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1894 word = bit / HOST_BITS_PER_LONG;
1895 bit = bit % HOST_BITS_PER_LONG;
1896 next = f->next;
1898 /* Make certain it isn't visible from any root. Notice that we
1899 do this check before sweep_pages merges save_in_use_p. */
1900 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1902 /* If the object comes from an outer context, then retain the
1903 free_object entry, so that we can verify that the address
1904 isn't live on the stack in some outer context. */
1905 if (pe->context_depth != G.context_depth)
1907 f->next = still_free;
1908 still_free = f;
1910 else
1911 free (f);
1914 G.free_object_list = still_free;
1916 #else
1917 #define validate_free_objects()
1918 #endif
1920 /* Top level mark-and-sweep routine. */
1922 void
1923 ggc_collect (void)
1925 /* Avoid frequent unnecessary work by skipping collection if the
1926 total allocations haven't expanded much since the last
1927 collection. */
1928 float allocated_last_gc =
1929 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1931 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1933 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1934 return;
1936 timevar_push (TV_GC);
1937 if (!quiet_flag)
1938 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1939 if (GGC_DEBUG_LEVEL >= 2)
1940 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1942 /* Zero the total allocated bytes. This will be recalculated in the
1943 sweep phase. */
1944 G.allocated = 0;
1946 /* Release the pages we freed the last time we collected, but didn't
1947 reuse in the interim. */
1948 release_pages ();
1950 /* Indicate that we've seen collections at this context depth. */
1951 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1953 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
1955 clear_marks ();
1956 ggc_mark_roots ();
1957 #ifdef GATHER_STATISTICS
1958 ggc_prune_overhead_list ();
1959 #endif
1960 poison_pages ();
1961 validate_free_objects ();
1962 sweep_pages ();
1964 G.allocated_last_gc = G.allocated;
1966 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
1968 timevar_pop (TV_GC);
1970 if (!quiet_flag)
1971 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1972 if (GGC_DEBUG_LEVEL >= 2)
1973 fprintf (G.debug_file, "END COLLECTING\n");
1976 /* Print allocation statistics. */
1977 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1978 ? (x) \
1979 : ((x) < 1024*1024*10 \
1980 ? (x) / 1024 \
1981 : (x) / (1024*1024))))
1982 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1984 void
1985 ggc_print_statistics (void)
1987 struct ggc_statistics stats;
1988 unsigned int i;
1989 size_t total_overhead = 0;
1991 /* Clear the statistics. */
1992 memset (&stats, 0, sizeof (stats));
1994 /* Make sure collection will really occur. */
1995 G.allocated_last_gc = 0;
1997 /* Collect and print the statistics common across collectors. */
1998 ggc_print_common_statistics (stderr, &stats);
2000 /* Release free pages so that we will not count the bytes allocated
2001 there as part of the total allocated memory. */
2002 release_pages ();
2004 /* Collect some information about the various sizes of
2005 allocation. */
2006 fprintf (stderr,
2007 "Memory still allocated at the end of the compilation process\n");
2008 fprintf (stderr, "%-5s %10s %10s %10s\n",
2009 "Size", "Allocated", "Used", "Overhead");
2010 for (i = 0; i < NUM_ORDERS; ++i)
2012 page_entry *p;
2013 size_t allocated;
2014 size_t in_use;
2015 size_t overhead;
2017 /* Skip empty entries. */
2018 if (!G.pages[i])
2019 continue;
2021 overhead = allocated = in_use = 0;
2023 /* Figure out the total number of bytes allocated for objects of
2024 this size, and how many of them are actually in use. Also figure
2025 out how much memory the page table is using. */
2026 for (p = G.pages[i]; p; p = p->next)
2028 allocated += p->bytes;
2029 in_use +=
2030 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2032 overhead += (sizeof (page_entry) - sizeof (long)
2033 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2035 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2036 (unsigned long) OBJECT_SIZE (i),
2037 SCALE (allocated), STAT_LABEL (allocated),
2038 SCALE (in_use), STAT_LABEL (in_use),
2039 SCALE (overhead), STAT_LABEL (overhead));
2040 total_overhead += overhead;
2042 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2043 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2044 SCALE (G.allocated), STAT_LABEL(G.allocated),
2045 SCALE (total_overhead), STAT_LABEL (total_overhead));
2047 #ifdef GATHER_STATISTICS
2049 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2051 fprintf (stderr, "Total Overhead: %10lld\n",
2052 G.stats.total_overhead);
2053 fprintf (stderr, "Total Allocated: %10lld\n",
2054 G.stats.total_allocated);
2056 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2057 G.stats.total_overhead_under32);
2058 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2059 G.stats.total_allocated_under32);
2060 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2061 G.stats.total_overhead_under64);
2062 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2063 G.stats.total_allocated_under64);
2064 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2065 G.stats.total_overhead_under128);
2066 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2067 G.stats.total_allocated_under128);
2069 for (i = 0; i < NUM_ORDERS; i++)
2070 if (G.stats.total_allocated_per_order[i])
2072 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2073 (unsigned long) OBJECT_SIZE (i),
2074 G.stats.total_overhead_per_order[i]);
2075 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2076 (unsigned long) OBJECT_SIZE (i),
2077 G.stats.total_allocated_per_order[i]);
2080 #endif
2083 struct ggc_pch_ondisk
2085 unsigned totals[NUM_ORDERS];
2088 struct ggc_pch_data
2090 struct ggc_pch_ondisk d;
2091 size_t base[NUM_ORDERS];
2092 size_t written[NUM_ORDERS];
2095 struct ggc_pch_data *
2096 init_ggc_pch (void)
2098 return XCNEW (struct ggc_pch_data);
2101 void
2102 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2103 size_t size, bool is_string ATTRIBUTE_UNUSED,
2104 enum gt_types_enum type ATTRIBUTE_UNUSED)
2106 unsigned order;
2108 if (size < NUM_SIZE_LOOKUP)
2109 order = size_lookup[size];
2110 else
2112 order = 10;
2113 while (size > OBJECT_SIZE (order))
2114 order++;
2117 d->d.totals[order]++;
2120 size_t
2121 ggc_pch_total_size (struct ggc_pch_data *d)
2123 size_t a = 0;
2124 unsigned i;
2126 for (i = 0; i < NUM_ORDERS; i++)
2127 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2128 return a;
2131 void
2132 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2134 size_t a = (size_t) base;
2135 unsigned i;
2137 for (i = 0; i < NUM_ORDERS; i++)
2139 d->base[i] = a;
2140 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2145 char *
2146 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2147 size_t size, bool is_string ATTRIBUTE_UNUSED,
2148 enum gt_types_enum type ATTRIBUTE_UNUSED)
2150 unsigned order;
2151 char *result;
2153 if (size < NUM_SIZE_LOOKUP)
2154 order = size_lookup[size];
2155 else
2157 order = 10;
2158 while (size > OBJECT_SIZE (order))
2159 order++;
2162 result = (char *) d->base[order];
2163 d->base[order] += OBJECT_SIZE (order);
2164 return result;
2167 void
2168 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2169 FILE *f ATTRIBUTE_UNUSED)
2171 /* Nothing to do. */
2174 void
2175 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2176 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2177 size_t size, bool is_string ATTRIBUTE_UNUSED)
2179 unsigned order;
2180 static const char emptyBytes[256] = { 0 };
2182 if (size < NUM_SIZE_LOOKUP)
2183 order = size_lookup[size];
2184 else
2186 order = 10;
2187 while (size > OBJECT_SIZE (order))
2188 order++;
2191 if (fwrite (x, size, 1, f) != 1)
2192 fatal_error ("can't write PCH file: %m");
2194 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2195 object out to OBJECT_SIZE(order). This happens for strings. */
2197 if (size != OBJECT_SIZE (order))
2199 unsigned padding = OBJECT_SIZE(order) - size;
2201 /* To speed small writes, we use a nulled-out array that's larger
2202 than most padding requests as the source for our null bytes. This
2203 permits us to do the padding with fwrite() rather than fseek(), and
2204 limits the chance the OS may try to flush any outstanding writes. */
2205 if (padding <= sizeof(emptyBytes))
2207 if (fwrite (emptyBytes, 1, padding, f) != padding)
2208 fatal_error ("can't write PCH file");
2210 else
2212 /* Larger than our buffer? Just default to fseek. */
2213 if (fseek (f, padding, SEEK_CUR) != 0)
2214 fatal_error ("can't write PCH file");
2218 d->written[order]++;
2219 if (d->written[order] == d->d.totals[order]
2220 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2221 G.pagesize),
2222 SEEK_CUR) != 0)
2223 fatal_error ("can't write PCH file: %m");
2226 void
2227 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2229 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2230 fatal_error ("can't write PCH file: %m");
2231 free (d);
2234 /* Move the PCH PTE entries just added to the end of by_depth, to the
2235 front. */
2237 static void
2238 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2240 unsigned i;
2242 /* First, we swap the new entries to the front of the varrays. */
2243 page_entry **new_by_depth;
2244 unsigned long **new_save_in_use;
2246 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2247 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2249 memcpy (&new_by_depth[0],
2250 &G.by_depth[count_old_page_tables],
2251 count_new_page_tables * sizeof (void *));
2252 memcpy (&new_by_depth[count_new_page_tables],
2253 &G.by_depth[0],
2254 count_old_page_tables * sizeof (void *));
2255 memcpy (&new_save_in_use[0],
2256 &G.save_in_use[count_old_page_tables],
2257 count_new_page_tables * sizeof (void *));
2258 memcpy (&new_save_in_use[count_new_page_tables],
2259 &G.save_in_use[0],
2260 count_old_page_tables * sizeof (void *));
2262 free (G.by_depth);
2263 free (G.save_in_use);
2265 G.by_depth = new_by_depth;
2266 G.save_in_use = new_save_in_use;
2268 /* Now update all the index_by_depth fields. */
2269 for (i = G.by_depth_in_use; i > 0; --i)
2271 page_entry *p = G.by_depth[i-1];
2272 p->index_by_depth = i-1;
2275 /* And last, we update the depth pointers in G.depth. The first
2276 entry is already 0, and context 0 entries always start at index
2277 0, so there is nothing to update in the first slot. We need a
2278 second slot, only if we have old ptes, and if we do, they start
2279 at index count_new_page_tables. */
2280 if (count_old_page_tables)
2281 push_depth (count_new_page_tables);
2284 void
2285 ggc_pch_read (FILE *f, void *addr)
2287 struct ggc_pch_ondisk d;
2288 unsigned i;
2289 char *offs = (char *) addr;
2290 unsigned long count_old_page_tables;
2291 unsigned long count_new_page_tables;
2293 count_old_page_tables = G.by_depth_in_use;
2295 /* We've just read in a PCH file. So, every object that used to be
2296 allocated is now free. */
2297 clear_marks ();
2298 #ifdef ENABLE_GC_CHECKING
2299 poison_pages ();
2300 #endif
2301 /* Since we free all the allocated objects, the free list becomes
2302 useless. Validate it now, which will also clear it. */
2303 validate_free_objects();
2305 /* No object read from a PCH file should ever be freed. So, set the
2306 context depth to 1, and set the depth of all the currently-allocated
2307 pages to be 1 too. PCH pages will have depth 0. */
2308 gcc_assert (!G.context_depth);
2309 G.context_depth = 1;
2310 for (i = 0; i < NUM_ORDERS; i++)
2312 page_entry *p;
2313 for (p = G.pages[i]; p != NULL; p = p->next)
2314 p->context_depth = G.context_depth;
2317 /* Allocate the appropriate page-table entries for the pages read from
2318 the PCH file. */
2319 if (fread (&d, sizeof (d), 1, f) != 1)
2320 fatal_error ("can't read PCH file: %m");
2322 for (i = 0; i < NUM_ORDERS; i++)
2324 struct page_entry *entry;
2325 char *pte;
2326 size_t bytes;
2327 size_t num_objs;
2328 size_t j;
2330 if (d.totals[i] == 0)
2331 continue;
2333 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2334 num_objs = bytes / OBJECT_SIZE (i);
2335 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2336 - sizeof (long)
2337 + BITMAP_SIZE (num_objs + 1)));
2338 entry->bytes = bytes;
2339 entry->page = offs;
2340 entry->context_depth = 0;
2341 offs += bytes;
2342 entry->num_free_objects = 0;
2343 entry->order = i;
2345 for (j = 0;
2346 j + HOST_BITS_PER_LONG <= num_objs + 1;
2347 j += HOST_BITS_PER_LONG)
2348 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2349 for (; j < num_objs + 1; j++)
2350 entry->in_use_p[j / HOST_BITS_PER_LONG]
2351 |= 1L << (j % HOST_BITS_PER_LONG);
2353 for (pte = entry->page;
2354 pte < entry->page + entry->bytes;
2355 pte += G.pagesize)
2356 set_page_table_entry (pte, entry);
2358 if (G.page_tails[i] != NULL)
2359 G.page_tails[i]->next = entry;
2360 else
2361 G.pages[i] = entry;
2362 G.page_tails[i] = entry;
2364 /* We start off by just adding all the new information to the
2365 end of the varrays, later, we will move the new information
2366 to the front of the varrays, as the PCH page tables are at
2367 context 0. */
2368 push_by_depth (entry, 0);
2371 /* Now, we update the various data structures that speed page table
2372 handling. */
2373 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2375 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2377 /* Update the statistics. */
2378 G.allocated = G.allocated_last_gc = offs - (char *)addr;