Properly test for madvise on Solaris 10 (PR bootstrap/50777)
[official-gcc.git] / gcc / ggc-page.c
blob617a49348d77dd3155f336e1f8d6ed8787c0af9f
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010, 2011 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "tree-flow.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
56 #endif
58 /* Strategy:
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
80 context depth.
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
95 #ifndef HOST_BITS_PER_PTR
96 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
97 #endif
100 /* A two-level tree is used to look up the page-entry for a given
101 pointer. Two chunks of the pointer's bits are extracted to index
102 the first and second levels of the tree, as follows:
104 HOST_PAGE_SIZE_BITS
105 32 | |
106 msb +----------------+----+------+------+ lsb
107 | | |
108 PAGE_L1_BITS |
110 PAGE_L2_BITS
112 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
113 pages are aligned on system page boundaries. The next most
114 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
115 index values in the lookup table, respectively.
117 For 32-bit architectures and the settings below, there are no
118 leftover bits. For architectures with wider pointers, the lookup
119 tree points to a list of pages, which must be scanned to find the
120 correct one. */
122 #define PAGE_L1_BITS (8)
123 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
124 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
125 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
127 #define LOOKUP_L1(p) \
128 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
130 #define LOOKUP_L2(p) \
131 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
133 /* The number of objects per allocation page, for objects on a page of
134 the indicated ORDER. */
135 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
137 /* The number of objects in P. */
138 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
143 /* For speed, we avoid doing a general integer divide to locate the
144 offset in the allocation bitmap, by precalculating numbers M, S
145 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
146 within the page which is evenly divisible by the object size Z. */
147 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
148 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
149 #define OFFSET_TO_BIT(OFFSET, ORDER) \
150 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
152 /* We use this structure to determine the alignment required for
153 allocations. For power-of-two sized allocations, that's not a
154 problem, but it does matter for odd-sized allocations.
155 We do not care about alignment for floating-point types. */
157 struct max_alignment {
158 char c;
159 union {
160 HOST_WIDEST_INT i;
161 void *p;
162 } u;
165 /* The biggest alignment required. */
167 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
170 /* The number of extra orders, not corresponding to power-of-two sized
171 objects. */
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
185 static const size_t extra_order_size_table[] = {
186 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
187 There are a lot of structures with these sizes and explicitly
188 listing them risks orders being dropped because they changed size. */
189 MAX_ALIGNMENT * 3,
190 MAX_ALIGNMENT * 5,
191 MAX_ALIGNMENT * 6,
192 MAX_ALIGNMENT * 7,
193 MAX_ALIGNMENT * 9,
194 MAX_ALIGNMENT * 10,
195 MAX_ALIGNMENT * 11,
196 MAX_ALIGNMENT * 12,
197 MAX_ALIGNMENT * 13,
198 MAX_ALIGNMENT * 14,
199 MAX_ALIGNMENT * 15,
200 sizeof (struct tree_decl_non_common),
201 sizeof (struct tree_field_decl),
202 sizeof (struct tree_parm_decl),
203 sizeof (struct tree_var_decl),
204 sizeof (struct tree_type_non_common),
205 sizeof (struct function),
206 sizeof (struct basic_block_def),
207 sizeof (struct cgraph_node),
208 sizeof (struct loop),
211 /* The total number of orders. */
213 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
215 /* Compute the smallest nonnegative number which when added to X gives
216 a multiple of F. */
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220 /* Compute the smallest multiple of F that is >= X. */
222 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
224 /* The Ith entry is the number of objects on a page or order I. */
226 static unsigned objects_per_page_table[NUM_ORDERS];
228 /* The Ith entry is the size of an object on a page of order I. */
230 static size_t object_size_table[NUM_ORDERS];
232 /* The Ith entry is a pair of numbers (mult, shift) such that
233 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
234 for all k evenly divisible by OBJECT_SIZE(I). */
236 static struct
238 size_t mult;
239 unsigned int shift;
241 inverse_table[NUM_ORDERS];
243 /* A page_entry records the status of an allocation page. This
244 structure is dynamically sized to fit the bitmap in_use_p. */
245 typedef struct page_entry
247 /* The next page-entry with objects of the same size, or NULL if
248 this is the last page-entry. */
249 struct page_entry *next;
251 /* The previous page-entry with objects of the same size, or NULL if
252 this is the first page-entry. The PREV pointer exists solely to
253 keep the cost of ggc_free manageable. */
254 struct page_entry *prev;
256 /* The number of bytes allocated. (This will always be a multiple
257 of the host system page size.) */
258 size_t bytes;
260 /* The address at which the memory is allocated. */
261 char *page;
263 #ifdef USING_MALLOC_PAGE_GROUPS
264 /* Back pointer to the page group this page came from. */
265 struct page_group *group;
266 #endif
268 /* This is the index in the by_depth varray where this page table
269 can be found. */
270 unsigned long index_by_depth;
272 /* Context depth of this page. */
273 unsigned short context_depth;
275 /* The number of free objects remaining on this page. */
276 unsigned short num_free_objects;
278 /* A likely candidate for the bit position of a free object for the
279 next allocation from this page. */
280 unsigned short next_bit_hint;
282 /* The lg of size of objects allocated from this page. */
283 unsigned char order;
285 /* Discarded page? */
286 bool discarded;
288 /* A bit vector indicating whether or not objects are in use. The
289 Nth bit is one if the Nth object on this page is allocated. This
290 array is dynamically sized. */
291 unsigned long in_use_p[1];
292 } page_entry;
294 #ifdef USING_MALLOC_PAGE_GROUPS
295 /* A page_group describes a large allocation from malloc, from which
296 we parcel out aligned pages. */
297 typedef struct page_group
299 /* A linked list of all extant page groups. */
300 struct page_group *next;
302 /* The address we received from malloc. */
303 char *allocation;
305 /* The size of the block. */
306 size_t alloc_size;
308 /* A bitmask of pages in use. */
309 unsigned int in_use;
310 } page_group;
311 #endif
313 #if HOST_BITS_PER_PTR <= 32
315 /* On 32-bit hosts, we use a two level page table, as pictured above. */
316 typedef page_entry **page_table[PAGE_L1_SIZE];
318 #else
320 /* On 64-bit hosts, we use the same two level page tables plus a linked
321 list that disambiguates the top 32-bits. There will almost always be
322 exactly one entry in the list. */
323 typedef struct page_table_chain
325 struct page_table_chain *next;
326 size_t high_bits;
327 page_entry **table[PAGE_L1_SIZE];
328 } *page_table;
330 #endif
332 #ifdef ENABLE_GC_ALWAYS_COLLECT
333 /* List of free objects to be verified as actually free on the
334 next collection. */
335 struct free_object
337 void *object;
338 struct free_object *next;
340 #endif
342 /* The rest of the global variables. */
343 static struct globals
345 /* The Nth element in this array is a page with objects of size 2^N.
346 If there are any pages with free objects, they will be at the
347 head of the list. NULL if there are no page-entries for this
348 object size. */
349 page_entry *pages[NUM_ORDERS];
351 /* The Nth element in this array is the last page with objects of
352 size 2^N. NULL if there are no page-entries for this object
353 size. */
354 page_entry *page_tails[NUM_ORDERS];
356 /* Lookup table for associating allocation pages with object addresses. */
357 page_table lookup;
359 /* The system's page size. */
360 size_t pagesize;
361 size_t lg_pagesize;
363 /* Bytes currently allocated. */
364 size_t allocated;
366 /* Bytes currently allocated at the end of the last collection. */
367 size_t allocated_last_gc;
369 /* Total amount of memory mapped. */
370 size_t bytes_mapped;
372 /* Bit N set if any allocations have been done at context depth N. */
373 unsigned long context_depth_allocations;
375 /* Bit N set if any collections have been done at context depth N. */
376 unsigned long context_depth_collections;
378 /* The current depth in the context stack. */
379 unsigned short context_depth;
381 /* A file descriptor open to /dev/zero for reading. */
382 #if defined (HAVE_MMAP_DEV_ZERO)
383 int dev_zero_fd;
384 #endif
386 /* A cache of free system pages. */
387 page_entry *free_pages;
389 #ifdef USING_MALLOC_PAGE_GROUPS
390 page_group *page_groups;
391 #endif
393 /* The file descriptor for debugging output. */
394 FILE *debug_file;
396 /* Current number of elements in use in depth below. */
397 unsigned int depth_in_use;
399 /* Maximum number of elements that can be used before resizing. */
400 unsigned int depth_max;
402 /* Each element of this array is an index in by_depth where the given
403 depth starts. This structure is indexed by that given depth we
404 are interested in. */
405 unsigned int *depth;
407 /* Current number of elements in use in by_depth below. */
408 unsigned int by_depth_in_use;
410 /* Maximum number of elements that can be used before resizing. */
411 unsigned int by_depth_max;
413 /* Each element of this array is a pointer to a page_entry, all
414 page_entries can be found in here by increasing depth.
415 index_by_depth in the page_entry is the index into this data
416 structure where that page_entry can be found. This is used to
417 speed up finding all page_entries at a particular depth. */
418 page_entry **by_depth;
420 /* Each element is a pointer to the saved in_use_p bits, if any,
421 zero otherwise. We allocate them all together, to enable a
422 better runtime data access pattern. */
423 unsigned long **save_in_use;
425 #ifdef ENABLE_GC_ALWAYS_COLLECT
426 /* List of free objects to be verified as actually free on the
427 next collection. */
428 struct free_object *free_object_list;
429 #endif
431 #ifdef GATHER_STATISTICS
432 struct
434 /* Total GC-allocated memory. */
435 unsigned long long total_allocated;
436 /* Total overhead for GC-allocated memory. */
437 unsigned long long total_overhead;
439 /* Total allocations and overhead for sizes less than 32, 64 and 128.
440 These sizes are interesting because they are typical cache line
441 sizes. */
443 unsigned long long total_allocated_under32;
444 unsigned long long total_overhead_under32;
446 unsigned long long total_allocated_under64;
447 unsigned long long total_overhead_under64;
449 unsigned long long total_allocated_under128;
450 unsigned long long total_overhead_under128;
452 /* The allocations for each of the allocation orders. */
453 unsigned long long total_allocated_per_order[NUM_ORDERS];
455 /* The overhead for each of the allocation orders. */
456 unsigned long long total_overhead_per_order[NUM_ORDERS];
457 } stats;
458 #endif
459 } G;
461 /* The size in bytes required to maintain a bitmap for the objects
462 on a page-entry. */
463 #define BITMAP_SIZE(Num_objects) \
464 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
466 /* Allocate pages in chunks of this size, to throttle calls to memory
467 allocation routines. The first page is used, the rest go onto the
468 free list. This cannot be larger than HOST_BITS_PER_INT for the
469 in_use bitmask for page_group. Hosts that need a different value
470 can override this by defining GGC_QUIRE_SIZE explicitly. */
471 #ifndef GGC_QUIRE_SIZE
472 # ifdef USING_MMAP
473 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
474 # else
475 # define GGC_QUIRE_SIZE 16
476 # endif
477 #endif
479 /* Initial guess as to how many page table entries we might need. */
480 #define INITIAL_PTE_COUNT 128
482 static int ggc_allocated_p (const void *);
483 static page_entry *lookup_page_table_entry (const void *);
484 static void set_page_table_entry (void *, page_entry *);
485 #ifdef USING_MMAP
486 static char *alloc_anon (char *, size_t);
487 #endif
488 #ifdef USING_MALLOC_PAGE_GROUPS
489 static size_t page_group_index (char *, char *);
490 static void set_page_group_in_use (page_group *, char *);
491 static void clear_page_group_in_use (page_group *, char *);
492 #endif
493 static struct page_entry * alloc_page (unsigned);
494 static void free_page (struct page_entry *);
495 static void release_pages (void);
496 static void clear_marks (void);
497 static void sweep_pages (void);
498 static void ggc_recalculate_in_use_p (page_entry *);
499 static void compute_inverse (unsigned);
500 static inline void adjust_depth (void);
501 static void move_ptes_to_front (int, int);
503 void debug_print_page_list (int);
504 static void push_depth (unsigned int);
505 static void push_by_depth (page_entry *, unsigned long *);
507 /* Push an entry onto G.depth. */
509 inline static void
510 push_depth (unsigned int i)
512 if (G.depth_in_use >= G.depth_max)
514 G.depth_max *= 2;
515 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
517 G.depth[G.depth_in_use++] = i;
520 /* Push an entry onto G.by_depth and G.save_in_use. */
522 inline static void
523 push_by_depth (page_entry *p, unsigned long *s)
525 if (G.by_depth_in_use >= G.by_depth_max)
527 G.by_depth_max *= 2;
528 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
529 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
530 G.by_depth_max);
532 G.by_depth[G.by_depth_in_use] = p;
533 G.save_in_use[G.by_depth_in_use++] = s;
536 #if (GCC_VERSION < 3001)
537 #define prefetch(X) ((void) X)
538 #else
539 #define prefetch(X) __builtin_prefetch (X)
540 #endif
542 #define save_in_use_p_i(__i) \
543 (G.save_in_use[__i])
544 #define save_in_use_p(__p) \
545 (save_in_use_p_i (__p->index_by_depth))
547 /* Returns nonzero if P was allocated in GC'able memory. */
549 static inline int
550 ggc_allocated_p (const void *p)
552 page_entry ***base;
553 size_t L1, L2;
555 #if HOST_BITS_PER_PTR <= 32
556 base = &G.lookup[0];
557 #else
558 page_table table = G.lookup;
559 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
560 while (1)
562 if (table == NULL)
563 return 0;
564 if (table->high_bits == high_bits)
565 break;
566 table = table->next;
568 base = &table->table[0];
569 #endif
571 /* Extract the level 1 and 2 indices. */
572 L1 = LOOKUP_L1 (p);
573 L2 = LOOKUP_L2 (p);
575 return base[L1] && base[L1][L2];
578 /* Traverse the page table and find the entry for a page.
579 Die (probably) if the object wasn't allocated via GC. */
581 static inline page_entry *
582 lookup_page_table_entry (const void *p)
584 page_entry ***base;
585 size_t L1, L2;
587 #if HOST_BITS_PER_PTR <= 32
588 base = &G.lookup[0];
589 #else
590 page_table table = G.lookup;
591 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
592 while (table->high_bits != high_bits)
593 table = table->next;
594 base = &table->table[0];
595 #endif
597 /* Extract the level 1 and 2 indices. */
598 L1 = LOOKUP_L1 (p);
599 L2 = LOOKUP_L2 (p);
601 return base[L1][L2];
604 /* Set the page table entry for a page. */
606 static void
607 set_page_table_entry (void *p, page_entry *entry)
609 page_entry ***base;
610 size_t L1, L2;
612 #if HOST_BITS_PER_PTR <= 32
613 base = &G.lookup[0];
614 #else
615 page_table table;
616 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
617 for (table = G.lookup; table; table = table->next)
618 if (table->high_bits == high_bits)
619 goto found;
621 /* Not found -- allocate a new table. */
622 table = XCNEW (struct page_table_chain);
623 table->next = G.lookup;
624 table->high_bits = high_bits;
625 G.lookup = table;
626 found:
627 base = &table->table[0];
628 #endif
630 /* Extract the level 1 and 2 indices. */
631 L1 = LOOKUP_L1 (p);
632 L2 = LOOKUP_L2 (p);
634 if (base[L1] == NULL)
635 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
637 base[L1][L2] = entry;
640 /* Prints the page-entry for object size ORDER, for debugging. */
642 DEBUG_FUNCTION void
643 debug_print_page_list (int order)
645 page_entry *p;
646 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
647 (void *) G.page_tails[order]);
648 p = G.pages[order];
649 while (p != NULL)
651 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
652 p->num_free_objects);
653 p = p->next;
655 printf ("NULL\n");
656 fflush (stdout);
659 #ifdef USING_MMAP
660 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
661 (if non-null). The ifdef structure here is intended to cause a
662 compile error unless exactly one of the HAVE_* is defined. */
664 static inline char *
665 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
667 #ifdef HAVE_MMAP_ANON
668 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
669 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
670 #endif
671 #ifdef HAVE_MMAP_DEV_ZERO
672 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
673 MAP_PRIVATE, G.dev_zero_fd, 0);
674 #endif
676 if (page == (char *) MAP_FAILED)
678 perror ("virtual memory exhausted");
679 exit (FATAL_EXIT_CODE);
682 /* Remember that we allocated this memory. */
683 G.bytes_mapped += size;
685 /* Pretend we don't have access to the allocated pages. We'll enable
686 access to smaller pieces of the area in ggc_internal_alloc. Discard the
687 handle to avoid handle leak. */
688 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
690 return page;
692 #endif
693 #ifdef USING_MALLOC_PAGE_GROUPS
694 /* Compute the index for this page into the page group. */
696 static inline size_t
697 page_group_index (char *allocation, char *page)
699 return (size_t) (page - allocation) >> G.lg_pagesize;
702 /* Set and clear the in_use bit for this page in the page group. */
704 static inline void
705 set_page_group_in_use (page_group *group, char *page)
707 group->in_use |= 1 << page_group_index (group->allocation, page);
710 static inline void
711 clear_page_group_in_use (page_group *group, char *page)
713 group->in_use &= ~(1 << page_group_index (group->allocation, page));
715 #endif
717 /* Allocate a new page for allocating objects of size 2^ORDER,
718 and return an entry for it. The entry is not added to the
719 appropriate page_table list. */
721 static inline struct page_entry *
722 alloc_page (unsigned order)
724 struct page_entry *entry, *p, **pp;
725 char *page;
726 size_t num_objects;
727 size_t bitmap_size;
728 size_t page_entry_size;
729 size_t entry_size;
730 #ifdef USING_MALLOC_PAGE_GROUPS
731 page_group *group;
732 #endif
734 num_objects = OBJECTS_PER_PAGE (order);
735 bitmap_size = BITMAP_SIZE (num_objects + 1);
736 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
737 entry_size = num_objects * OBJECT_SIZE (order);
738 if (entry_size < G.pagesize)
739 entry_size = G.pagesize;
741 entry = NULL;
742 page = NULL;
744 /* Check the list of free pages for one we can use. */
745 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
746 if (p->bytes == entry_size)
747 break;
749 if (p != NULL)
751 if (p->discarded)
752 G.bytes_mapped += p->bytes;
753 p->discarded = false;
755 /* Recycle the allocated memory from this page ... */
756 *pp = p->next;
757 page = p->page;
759 #ifdef USING_MALLOC_PAGE_GROUPS
760 group = p->group;
761 #endif
763 /* ... and, if possible, the page entry itself. */
764 if (p->order == order)
766 entry = p;
767 memset (entry, 0, page_entry_size);
769 else
770 free (p);
772 #ifdef USING_MMAP
773 else if (entry_size == G.pagesize)
775 /* We want just one page. Allocate a bunch of them and put the
776 extras on the freelist. (Can only do this optimization with
777 mmap for backing store.) */
778 struct page_entry *e, *f = G.free_pages;
779 int i;
781 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
783 /* This loop counts down so that the chain will be in ascending
784 memory order. */
785 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
787 e = XCNEWVAR (struct page_entry, page_entry_size);
788 e->order = order;
789 e->bytes = G.pagesize;
790 e->page = page + (i << G.lg_pagesize);
791 e->next = f;
792 f = e;
795 G.free_pages = f;
797 else
798 page = alloc_anon (NULL, entry_size);
799 #endif
800 #ifdef USING_MALLOC_PAGE_GROUPS
801 else
803 /* Allocate a large block of memory and serve out the aligned
804 pages therein. This results in much less memory wastage
805 than the traditional implementation of valloc. */
807 char *allocation, *a, *enda;
808 size_t alloc_size, head_slop, tail_slop;
809 int multiple_pages = (entry_size == G.pagesize);
811 if (multiple_pages)
812 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
813 else
814 alloc_size = entry_size + G.pagesize - 1;
815 allocation = XNEWVEC (char, alloc_size);
817 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
818 head_slop = page - allocation;
819 if (multiple_pages)
820 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
821 else
822 tail_slop = alloc_size - entry_size - head_slop;
823 enda = allocation + alloc_size - tail_slop;
825 /* We allocated N pages, which are likely not aligned, leaving
826 us with N-1 usable pages. We plan to place the page_group
827 structure somewhere in the slop. */
828 if (head_slop >= sizeof (page_group))
829 group = (page_group *)page - 1;
830 else
832 /* We magically got an aligned allocation. Too bad, we have
833 to waste a page anyway. */
834 if (tail_slop == 0)
836 enda -= G.pagesize;
837 tail_slop += G.pagesize;
839 gcc_assert (tail_slop >= sizeof (page_group));
840 group = (page_group *)enda;
841 tail_slop -= sizeof (page_group);
844 /* Remember that we allocated this memory. */
845 group->next = G.page_groups;
846 group->allocation = allocation;
847 group->alloc_size = alloc_size;
848 group->in_use = 0;
849 G.page_groups = group;
850 G.bytes_mapped += alloc_size;
852 /* If we allocated multiple pages, put the rest on the free list. */
853 if (multiple_pages)
855 struct page_entry *e, *f = G.free_pages;
856 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
858 e = XCNEWVAR (struct page_entry, page_entry_size);
859 e->order = order;
860 e->bytes = G.pagesize;
861 e->page = a;
862 e->group = group;
863 e->next = f;
864 f = e;
866 G.free_pages = f;
869 #endif
871 if (entry == NULL)
872 entry = XCNEWVAR (struct page_entry, page_entry_size);
874 entry->bytes = entry_size;
875 entry->page = page;
876 entry->context_depth = G.context_depth;
877 entry->order = order;
878 entry->num_free_objects = num_objects;
879 entry->next_bit_hint = 1;
881 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
883 #ifdef USING_MALLOC_PAGE_GROUPS
884 entry->group = group;
885 set_page_group_in_use (group, page);
886 #endif
888 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
889 increment the hint. */
890 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
891 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
893 set_page_table_entry (page, entry);
895 if (GGC_DEBUG_LEVEL >= 2)
896 fprintf (G.debug_file,
897 "Allocating page at %p, object size=%lu, data %p-%p\n",
898 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
899 page + entry_size - 1);
901 return entry;
904 /* Adjust the size of G.depth so that no index greater than the one
905 used by the top of the G.by_depth is used. */
907 static inline void
908 adjust_depth (void)
910 page_entry *top;
912 if (G.by_depth_in_use)
914 top = G.by_depth[G.by_depth_in_use-1];
916 /* Peel back indices in depth that index into by_depth, so that
917 as new elements are added to by_depth, we note the indices
918 of those elements, if they are for new context depths. */
919 while (G.depth_in_use > (size_t)top->context_depth+1)
920 --G.depth_in_use;
924 /* For a page that is no longer needed, put it on the free page list. */
926 static void
927 free_page (page_entry *entry)
929 if (GGC_DEBUG_LEVEL >= 2)
930 fprintf (G.debug_file,
931 "Deallocating page at %p, data %p-%p\n", (void *) entry,
932 entry->page, entry->page + entry->bytes - 1);
934 /* Mark the page as inaccessible. Discard the handle to avoid handle
935 leak. */
936 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
938 set_page_table_entry (entry->page, NULL);
940 #ifdef USING_MALLOC_PAGE_GROUPS
941 clear_page_group_in_use (entry->group, entry->page);
942 #endif
944 if (G.by_depth_in_use > 1)
946 page_entry *top = G.by_depth[G.by_depth_in_use-1];
947 int i = entry->index_by_depth;
949 /* We cannot free a page from a context deeper than the current
950 one. */
951 gcc_assert (entry->context_depth == top->context_depth);
953 /* Put top element into freed slot. */
954 G.by_depth[i] = top;
955 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
956 top->index_by_depth = i;
958 --G.by_depth_in_use;
960 adjust_depth ();
962 entry->next = G.free_pages;
963 G.free_pages = entry;
966 /* Release the free page cache to the system. */
968 static void
969 release_pages (void)
971 #ifdef USING_MADVISE
972 page_entry *p, *start_p;
973 char *start;
974 size_t len;
976 for (p = G.free_pages; p; )
978 if (p->discarded)
980 p = p->next;
981 continue;
983 start = p->page;
984 len = p->bytes;
985 start_p = p;
986 p = p->next;
987 while (p && p->page == start + len)
989 len += p->bytes;
990 p = p->next;
992 /* Give the page back to the kernel, but don't free the mapping.
993 This avoids fragmentation in the virtual memory map of the
994 process. Next time we can reuse it by just touching it. */
995 madvise (start, len, MADV_DONTNEED);
996 /* Don't count those pages as mapped to not touch the garbage collector
997 unnecessarily. */
998 G.bytes_mapped -= len;
999 while (start_p != p)
1001 start_p->discarded = true;
1002 start_p = start_p->next;
1005 #endif
1006 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1007 page_entry *p, *next;
1008 char *start;
1009 size_t len;
1011 /* Gather up adjacent pages so they are unmapped together. */
1012 p = G.free_pages;
1014 while (p)
1016 start = p->page;
1017 next = p->next;
1018 len = p->bytes;
1019 free (p);
1020 p = next;
1022 while (p && p->page == start + len)
1024 next = p->next;
1025 len += p->bytes;
1026 free (p);
1027 p = next;
1030 munmap (start, len);
1031 G.bytes_mapped -= len;
1034 G.free_pages = NULL;
1035 #endif
1036 #ifdef USING_MALLOC_PAGE_GROUPS
1037 page_entry **pp, *p;
1038 page_group **gp, *g;
1040 /* Remove all pages from free page groups from the list. */
1041 pp = &G.free_pages;
1042 while ((p = *pp) != NULL)
1043 if (p->group->in_use == 0)
1045 *pp = p->next;
1046 free (p);
1048 else
1049 pp = &p->next;
1051 /* Remove all free page groups, and release the storage. */
1052 gp = &G.page_groups;
1053 while ((g = *gp) != NULL)
1054 if (g->in_use == 0)
1056 *gp = g->next;
1057 G.bytes_mapped -= g->alloc_size;
1058 free (g->allocation);
1060 else
1061 gp = &g->next;
1062 #endif
1065 /* This table provides a fast way to determine ceil(log_2(size)) for
1066 allocation requests. The minimum allocation size is eight bytes. */
1067 #define NUM_SIZE_LOOKUP 512
1068 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1070 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1071 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1072 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1073 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1074 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1075 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1076 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1077 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1078 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1085 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1086 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1087 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1088 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1089 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1090 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1091 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1092 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1093 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1094 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1095 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1096 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1097 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1098 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1099 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1100 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1101 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1104 /* For a given size of memory requested for allocation, return the
1105 actual size that is going to be allocated, as well as the size
1106 order. */
1108 static void
1109 ggc_round_alloc_size_1 (size_t requested_size,
1110 size_t *size_order,
1111 size_t *alloced_size)
1113 size_t order, object_size;
1115 if (requested_size < NUM_SIZE_LOOKUP)
1117 order = size_lookup[requested_size];
1118 object_size = OBJECT_SIZE (order);
1120 else
1122 order = 10;
1123 while (requested_size > (object_size = OBJECT_SIZE (order)))
1124 order++;
1127 if (size_order)
1128 *size_order = order;
1129 if (alloced_size)
1130 *alloced_size = object_size;
1133 /* For a given size of memory requested for allocation, return the
1134 actual size that is going to be allocated. */
1136 size_t
1137 ggc_round_alloc_size (size_t requested_size)
1139 size_t size = 0;
1141 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1142 return size;
1145 /* Typed allocation function. Does nothing special in this collector. */
1147 void *
1148 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1149 MEM_STAT_DECL)
1151 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1154 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1156 void *
1157 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1159 size_t order, word, bit, object_offset, object_size;
1160 struct page_entry *entry;
1161 void *result;
1163 ggc_round_alloc_size_1 (size, &order, &object_size);
1165 /* If there are non-full pages for this size allocation, they are at
1166 the head of the list. */
1167 entry = G.pages[order];
1169 /* If there is no page for this object size, or all pages in this
1170 context are full, allocate a new page. */
1171 if (entry == NULL || entry->num_free_objects == 0)
1173 struct page_entry *new_entry;
1174 new_entry = alloc_page (order);
1176 new_entry->index_by_depth = G.by_depth_in_use;
1177 push_by_depth (new_entry, 0);
1179 /* We can skip context depths, if we do, make sure we go all the
1180 way to the new depth. */
1181 while (new_entry->context_depth >= G.depth_in_use)
1182 push_depth (G.by_depth_in_use-1);
1184 /* If this is the only entry, it's also the tail. If it is not
1185 the only entry, then we must update the PREV pointer of the
1186 ENTRY (G.pages[order]) to point to our new page entry. */
1187 if (entry == NULL)
1188 G.page_tails[order] = new_entry;
1189 else
1190 entry->prev = new_entry;
1192 /* Put new pages at the head of the page list. By definition the
1193 entry at the head of the list always has a NULL pointer. */
1194 new_entry->next = entry;
1195 new_entry->prev = NULL;
1196 entry = new_entry;
1197 G.pages[order] = new_entry;
1199 /* For a new page, we know the word and bit positions (in the
1200 in_use bitmap) of the first available object -- they're zero. */
1201 new_entry->next_bit_hint = 1;
1202 word = 0;
1203 bit = 0;
1204 object_offset = 0;
1206 else
1208 /* First try to use the hint left from the previous allocation
1209 to locate a clear bit in the in-use bitmap. We've made sure
1210 that the one-past-the-end bit is always set, so if the hint
1211 has run over, this test will fail. */
1212 unsigned hint = entry->next_bit_hint;
1213 word = hint / HOST_BITS_PER_LONG;
1214 bit = hint % HOST_BITS_PER_LONG;
1216 /* If the hint didn't work, scan the bitmap from the beginning. */
1217 if ((entry->in_use_p[word] >> bit) & 1)
1219 word = bit = 0;
1220 while (~entry->in_use_p[word] == 0)
1221 ++word;
1223 #if GCC_VERSION >= 3004
1224 bit = __builtin_ctzl (~entry->in_use_p[word]);
1225 #else
1226 while ((entry->in_use_p[word] >> bit) & 1)
1227 ++bit;
1228 #endif
1230 hint = word * HOST_BITS_PER_LONG + bit;
1233 /* Next time, try the next bit. */
1234 entry->next_bit_hint = hint + 1;
1236 object_offset = hint * object_size;
1239 /* Set the in-use bit. */
1240 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1242 /* Keep a running total of the number of free objects. If this page
1243 fills up, we may have to move it to the end of the list if the
1244 next page isn't full. If the next page is full, all subsequent
1245 pages are full, so there's no need to move it. */
1246 if (--entry->num_free_objects == 0
1247 && entry->next != NULL
1248 && entry->next->num_free_objects > 0)
1250 /* We have a new head for the list. */
1251 G.pages[order] = entry->next;
1253 /* We are moving ENTRY to the end of the page table list.
1254 The new page at the head of the list will have NULL in
1255 its PREV field and ENTRY will have NULL in its NEXT field. */
1256 entry->next->prev = NULL;
1257 entry->next = NULL;
1259 /* Append ENTRY to the tail of the list. */
1260 entry->prev = G.page_tails[order];
1261 G.page_tails[order]->next = entry;
1262 G.page_tails[order] = entry;
1265 /* Calculate the object's address. */
1266 result = entry->page + object_offset;
1267 #ifdef GATHER_STATISTICS
1268 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1269 result PASS_MEM_STAT);
1270 #endif
1272 #ifdef ENABLE_GC_CHECKING
1273 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1274 exact same semantics in presence of memory bugs, regardless of
1275 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1276 handle to avoid handle leak. */
1277 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1279 /* `Poison' the entire allocated object, including any padding at
1280 the end. */
1281 memset (result, 0xaf, object_size);
1283 /* Make the bytes after the end of the object unaccessible. Discard the
1284 handle to avoid handle leak. */
1285 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1286 object_size - size));
1287 #endif
1289 /* Tell Valgrind that the memory is there, but its content isn't
1290 defined. The bytes at the end of the object are still marked
1291 unaccessible. */
1292 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1294 /* Keep track of how many bytes are being allocated. This
1295 information is used in deciding when to collect. */
1296 G.allocated += object_size;
1298 /* For timevar statistics. */
1299 timevar_ggc_mem_total += object_size;
1301 #ifdef GATHER_STATISTICS
1303 size_t overhead = object_size - size;
1305 G.stats.total_overhead += overhead;
1306 G.stats.total_allocated += object_size;
1307 G.stats.total_overhead_per_order[order] += overhead;
1308 G.stats.total_allocated_per_order[order] += object_size;
1310 if (size <= 32)
1312 G.stats.total_overhead_under32 += overhead;
1313 G.stats.total_allocated_under32 += object_size;
1315 if (size <= 64)
1317 G.stats.total_overhead_under64 += overhead;
1318 G.stats.total_allocated_under64 += object_size;
1320 if (size <= 128)
1322 G.stats.total_overhead_under128 += overhead;
1323 G.stats.total_allocated_under128 += object_size;
1326 #endif
1328 if (GGC_DEBUG_LEVEL >= 3)
1329 fprintf (G.debug_file,
1330 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1331 (unsigned long) size, (unsigned long) object_size, result,
1332 (void *) entry);
1334 return result;
1337 /* Mark function for strings. */
1339 void
1340 gt_ggc_m_S (const void *p)
1342 page_entry *entry;
1343 unsigned bit, word;
1344 unsigned long mask;
1345 unsigned long offset;
1347 if (!p || !ggc_allocated_p (p))
1348 return;
1350 /* Look up the page on which the object is alloced. . */
1351 entry = lookup_page_table_entry (p);
1352 gcc_assert (entry);
1354 /* Calculate the index of the object on the page; this is its bit
1355 position in the in_use_p bitmap. Note that because a char* might
1356 point to the middle of an object, we need special code here to
1357 make sure P points to the start of an object. */
1358 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1359 if (offset)
1361 /* Here we've seen a char* which does not point to the beginning
1362 of an allocated object. We assume it points to the middle of
1363 a STRING_CST. */
1364 gcc_assert (offset == offsetof (struct tree_string, str));
1365 p = ((const char *) p) - offset;
1366 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1367 return;
1370 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1371 word = bit / HOST_BITS_PER_LONG;
1372 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1374 /* If the bit was previously set, skip it. */
1375 if (entry->in_use_p[word] & mask)
1376 return;
1378 /* Otherwise set it, and decrement the free object count. */
1379 entry->in_use_p[word] |= mask;
1380 entry->num_free_objects -= 1;
1382 if (GGC_DEBUG_LEVEL >= 4)
1383 fprintf (G.debug_file, "Marking %p\n", p);
1385 return;
1388 /* If P is not marked, marks it and return false. Otherwise return true.
1389 P must have been allocated by the GC allocator; it mustn't point to
1390 static objects, stack variables, or memory allocated with malloc. */
1393 ggc_set_mark (const void *p)
1395 page_entry *entry;
1396 unsigned bit, word;
1397 unsigned long mask;
1399 /* Look up the page on which the object is alloced. If the object
1400 wasn't allocated by the collector, we'll probably die. */
1401 entry = lookup_page_table_entry (p);
1402 gcc_assert (entry);
1404 /* Calculate the index of the object on the page; this is its bit
1405 position in the in_use_p bitmap. */
1406 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1407 word = bit / HOST_BITS_PER_LONG;
1408 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1410 /* If the bit was previously set, skip it. */
1411 if (entry->in_use_p[word] & mask)
1412 return 1;
1414 /* Otherwise set it, and decrement the free object count. */
1415 entry->in_use_p[word] |= mask;
1416 entry->num_free_objects -= 1;
1418 if (GGC_DEBUG_LEVEL >= 4)
1419 fprintf (G.debug_file, "Marking %p\n", p);
1421 return 0;
1424 /* Return 1 if P has been marked, zero otherwise.
1425 P must have been allocated by the GC allocator; it mustn't point to
1426 static objects, stack variables, or memory allocated with malloc. */
1429 ggc_marked_p (const void *p)
1431 page_entry *entry;
1432 unsigned bit, word;
1433 unsigned long mask;
1435 /* Look up the page on which the object is alloced. If the object
1436 wasn't allocated by the collector, we'll probably die. */
1437 entry = lookup_page_table_entry (p);
1438 gcc_assert (entry);
1440 /* Calculate the index of the object on the page; this is its bit
1441 position in the in_use_p bitmap. */
1442 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1443 word = bit / HOST_BITS_PER_LONG;
1444 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1446 return (entry->in_use_p[word] & mask) != 0;
1449 /* Return the size of the gc-able object P. */
1451 size_t
1452 ggc_get_size (const void *p)
1454 page_entry *pe = lookup_page_table_entry (p);
1455 return OBJECT_SIZE (pe->order);
1458 /* Release the memory for object P. */
1460 void
1461 ggc_free (void *p)
1463 page_entry *pe = lookup_page_table_entry (p);
1464 size_t order = pe->order;
1465 size_t size = OBJECT_SIZE (order);
1467 #ifdef GATHER_STATISTICS
1468 ggc_free_overhead (p);
1469 #endif
1471 if (GGC_DEBUG_LEVEL >= 3)
1472 fprintf (G.debug_file,
1473 "Freeing object, actual size=%lu, at %p on %p\n",
1474 (unsigned long) size, p, (void *) pe);
1476 #ifdef ENABLE_GC_CHECKING
1477 /* Poison the data, to indicate the data is garbage. */
1478 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1479 memset (p, 0xa5, size);
1480 #endif
1481 /* Let valgrind know the object is free. */
1482 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1484 #ifdef ENABLE_GC_ALWAYS_COLLECT
1485 /* In the completely-anal-checking mode, we do *not* immediately free
1486 the data, but instead verify that the data is *actually* not
1487 reachable the next time we collect. */
1489 struct free_object *fo = XNEW (struct free_object);
1490 fo->object = p;
1491 fo->next = G.free_object_list;
1492 G.free_object_list = fo;
1494 #else
1496 unsigned int bit_offset, word, bit;
1498 G.allocated -= size;
1500 /* Mark the object not-in-use. */
1501 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1502 word = bit_offset / HOST_BITS_PER_LONG;
1503 bit = bit_offset % HOST_BITS_PER_LONG;
1504 pe->in_use_p[word] &= ~(1UL << bit);
1506 if (pe->num_free_objects++ == 0)
1508 page_entry *p, *q;
1510 /* If the page is completely full, then it's supposed to
1511 be after all pages that aren't. Since we've freed one
1512 object from a page that was full, we need to move the
1513 page to the head of the list.
1515 PE is the node we want to move. Q is the previous node
1516 and P is the next node in the list. */
1517 q = pe->prev;
1518 if (q && q->num_free_objects == 0)
1520 p = pe->next;
1522 q->next = p;
1524 /* If PE was at the end of the list, then Q becomes the
1525 new end of the list. If PE was not the end of the
1526 list, then we need to update the PREV field for P. */
1527 if (!p)
1528 G.page_tails[order] = q;
1529 else
1530 p->prev = q;
1532 /* Move PE to the head of the list. */
1533 pe->next = G.pages[order];
1534 pe->prev = NULL;
1535 G.pages[order]->prev = pe;
1536 G.pages[order] = pe;
1539 /* Reset the hint bit to point to the only free object. */
1540 pe->next_bit_hint = bit_offset;
1543 #endif
1546 /* Subroutine of init_ggc which computes the pair of numbers used to
1547 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1549 This algorithm is taken from Granlund and Montgomery's paper
1550 "Division by Invariant Integers using Multiplication"
1551 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1552 constants). */
1554 static void
1555 compute_inverse (unsigned order)
1557 size_t size, inv;
1558 unsigned int e;
1560 size = OBJECT_SIZE (order);
1561 e = 0;
1562 while (size % 2 == 0)
1564 e++;
1565 size >>= 1;
1568 inv = size;
1569 while (inv * size != 1)
1570 inv = inv * (2 - inv*size);
1572 DIV_MULT (order) = inv;
1573 DIV_SHIFT (order) = e;
1576 /* Initialize the ggc-mmap allocator. */
1577 void
1578 init_ggc (void)
1580 unsigned order;
1582 G.pagesize = getpagesize();
1583 G.lg_pagesize = exact_log2 (G.pagesize);
1585 #ifdef HAVE_MMAP_DEV_ZERO
1586 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1587 if (G.dev_zero_fd == -1)
1588 internal_error ("open /dev/zero: %m");
1589 #endif
1591 #if 0
1592 G.debug_file = fopen ("ggc-mmap.debug", "w");
1593 #else
1594 G.debug_file = stdout;
1595 #endif
1597 #ifdef USING_MMAP
1598 /* StunOS has an amazing off-by-one error for the first mmap allocation
1599 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1600 believe, is an unaligned page allocation, which would cause us to
1601 hork badly if we tried to use it. */
1603 char *p = alloc_anon (NULL, G.pagesize);
1604 struct page_entry *e;
1605 if ((size_t)p & (G.pagesize - 1))
1607 /* How losing. Discard this one and try another. If we still
1608 can't get something useful, give up. */
1610 p = alloc_anon (NULL, G.pagesize);
1611 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1614 /* We have a good page, might as well hold onto it... */
1615 e = XCNEW (struct page_entry);
1616 e->bytes = G.pagesize;
1617 e->page = p;
1618 e->next = G.free_pages;
1619 G.free_pages = e;
1621 #endif
1623 /* Initialize the object size table. */
1624 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1625 object_size_table[order] = (size_t) 1 << order;
1626 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1628 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1630 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1631 so that we're sure of getting aligned memory. */
1632 s = ROUND_UP (s, MAX_ALIGNMENT);
1633 object_size_table[order] = s;
1636 /* Initialize the objects-per-page and inverse tables. */
1637 for (order = 0; order < NUM_ORDERS; ++order)
1639 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1640 if (objects_per_page_table[order] == 0)
1641 objects_per_page_table[order] = 1;
1642 compute_inverse (order);
1645 /* Reset the size_lookup array to put appropriately sized objects in
1646 the special orders. All objects bigger than the previous power
1647 of two, but no greater than the special size, should go in the
1648 new order. */
1649 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1651 int o;
1652 int i;
1654 i = OBJECT_SIZE (order);
1655 if (i >= NUM_SIZE_LOOKUP)
1656 continue;
1658 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1659 size_lookup[i] = order;
1662 G.depth_in_use = 0;
1663 G.depth_max = 10;
1664 G.depth = XNEWVEC (unsigned int, G.depth_max);
1666 G.by_depth_in_use = 0;
1667 G.by_depth_max = INITIAL_PTE_COUNT;
1668 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1669 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1672 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1673 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1675 static void
1676 ggc_recalculate_in_use_p (page_entry *p)
1678 unsigned int i;
1679 size_t num_objects;
1681 /* Because the past-the-end bit in in_use_p is always set, we
1682 pretend there is one additional object. */
1683 num_objects = OBJECTS_IN_PAGE (p) + 1;
1685 /* Reset the free object count. */
1686 p->num_free_objects = num_objects;
1688 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1689 for (i = 0;
1690 i < CEIL (BITMAP_SIZE (num_objects),
1691 sizeof (*p->in_use_p));
1692 ++i)
1694 unsigned long j;
1696 /* Something is in use if it is marked, or if it was in use in a
1697 context further down the context stack. */
1698 p->in_use_p[i] |= save_in_use_p (p)[i];
1700 /* Decrement the free object count for every object allocated. */
1701 for (j = p->in_use_p[i]; j; j >>= 1)
1702 p->num_free_objects -= (j & 1);
1705 gcc_assert (p->num_free_objects < num_objects);
1708 /* Unmark all objects. */
1710 static void
1711 clear_marks (void)
1713 unsigned order;
1715 for (order = 2; order < NUM_ORDERS; order++)
1717 page_entry *p;
1719 for (p = G.pages[order]; p != NULL; p = p->next)
1721 size_t num_objects = OBJECTS_IN_PAGE (p);
1722 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1724 /* The data should be page-aligned. */
1725 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1727 /* Pages that aren't in the topmost context are not collected;
1728 nevertheless, we need their in-use bit vectors to store GC
1729 marks. So, back them up first. */
1730 if (p->context_depth < G.context_depth)
1732 if (! save_in_use_p (p))
1733 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1734 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1737 /* Reset reset the number of free objects and clear the
1738 in-use bits. These will be adjusted by mark_obj. */
1739 p->num_free_objects = num_objects;
1740 memset (p->in_use_p, 0, bitmap_size);
1742 /* Make sure the one-past-the-end bit is always set. */
1743 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1744 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1749 /* Free all empty pages. Partially empty pages need no attention
1750 because the `mark' bit doubles as an `unused' bit. */
1752 static void
1753 sweep_pages (void)
1755 unsigned order;
1757 for (order = 2; order < NUM_ORDERS; order++)
1759 /* The last page-entry to consider, regardless of entries
1760 placed at the end of the list. */
1761 page_entry * const last = G.page_tails[order];
1763 size_t num_objects;
1764 size_t live_objects;
1765 page_entry *p, *previous;
1766 int done;
1768 p = G.pages[order];
1769 if (p == NULL)
1770 continue;
1772 previous = NULL;
1775 page_entry *next = p->next;
1777 /* Loop until all entries have been examined. */
1778 done = (p == last);
1780 num_objects = OBJECTS_IN_PAGE (p);
1782 /* Add all live objects on this page to the count of
1783 allocated memory. */
1784 live_objects = num_objects - p->num_free_objects;
1786 G.allocated += OBJECT_SIZE (order) * live_objects;
1788 /* Only objects on pages in the topmost context should get
1789 collected. */
1790 if (p->context_depth < G.context_depth)
1793 /* Remove the page if it's empty. */
1794 else if (live_objects == 0)
1796 /* If P was the first page in the list, then NEXT
1797 becomes the new first page in the list, otherwise
1798 splice P out of the forward pointers. */
1799 if (! previous)
1800 G.pages[order] = next;
1801 else
1802 previous->next = next;
1804 /* Splice P out of the back pointers too. */
1805 if (next)
1806 next->prev = previous;
1808 /* Are we removing the last element? */
1809 if (p == G.page_tails[order])
1810 G.page_tails[order] = previous;
1811 free_page (p);
1812 p = previous;
1815 /* If the page is full, move it to the end. */
1816 else if (p->num_free_objects == 0)
1818 /* Don't move it if it's already at the end. */
1819 if (p != G.page_tails[order])
1821 /* Move p to the end of the list. */
1822 p->next = NULL;
1823 p->prev = G.page_tails[order];
1824 G.page_tails[order]->next = p;
1826 /* Update the tail pointer... */
1827 G.page_tails[order] = p;
1829 /* ... and the head pointer, if necessary. */
1830 if (! previous)
1831 G.pages[order] = next;
1832 else
1833 previous->next = next;
1835 /* And update the backpointer in NEXT if necessary. */
1836 if (next)
1837 next->prev = previous;
1839 p = previous;
1843 /* If we've fallen through to here, it's a page in the
1844 topmost context that is neither full nor empty. Such a
1845 page must precede pages at lesser context depth in the
1846 list, so move it to the head. */
1847 else if (p != G.pages[order])
1849 previous->next = p->next;
1851 /* Update the backchain in the next node if it exists. */
1852 if (p->next)
1853 p->next->prev = previous;
1855 /* Move P to the head of the list. */
1856 p->next = G.pages[order];
1857 p->prev = NULL;
1858 G.pages[order]->prev = p;
1860 /* Update the head pointer. */
1861 G.pages[order] = p;
1863 /* Are we moving the last element? */
1864 if (G.page_tails[order] == p)
1865 G.page_tails[order] = previous;
1866 p = previous;
1869 previous = p;
1870 p = next;
1872 while (! done);
1874 /* Now, restore the in_use_p vectors for any pages from contexts
1875 other than the current one. */
1876 for (p = G.pages[order]; p; p = p->next)
1877 if (p->context_depth != G.context_depth)
1878 ggc_recalculate_in_use_p (p);
1882 #ifdef ENABLE_GC_CHECKING
1883 /* Clobber all free objects. */
1885 static void
1886 poison_pages (void)
1888 unsigned order;
1890 for (order = 2; order < NUM_ORDERS; order++)
1892 size_t size = OBJECT_SIZE (order);
1893 page_entry *p;
1895 for (p = G.pages[order]; p != NULL; p = p->next)
1897 size_t num_objects;
1898 size_t i;
1900 if (p->context_depth != G.context_depth)
1901 /* Since we don't do any collection for pages in pushed
1902 contexts, there's no need to do any poisoning. And
1903 besides, the IN_USE_P array isn't valid until we pop
1904 contexts. */
1905 continue;
1907 num_objects = OBJECTS_IN_PAGE (p);
1908 for (i = 0; i < num_objects; i++)
1910 size_t word, bit;
1911 word = i / HOST_BITS_PER_LONG;
1912 bit = i % HOST_BITS_PER_LONG;
1913 if (((p->in_use_p[word] >> bit) & 1) == 0)
1915 char *object = p->page + i * size;
1917 /* Keep poison-by-write when we expect to use Valgrind,
1918 so the exact same memory semantics is kept, in case
1919 there are memory errors. We override this request
1920 below. */
1921 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1922 size));
1923 memset (object, 0xa5, size);
1925 /* Drop the handle to avoid handle leak. */
1926 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1932 #else
1933 #define poison_pages()
1934 #endif
1936 #ifdef ENABLE_GC_ALWAYS_COLLECT
1937 /* Validate that the reportedly free objects actually are. */
1939 static void
1940 validate_free_objects (void)
1942 struct free_object *f, *next, *still_free = NULL;
1944 for (f = G.free_object_list; f ; f = next)
1946 page_entry *pe = lookup_page_table_entry (f->object);
1947 size_t bit, word;
1949 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1950 word = bit / HOST_BITS_PER_LONG;
1951 bit = bit % HOST_BITS_PER_LONG;
1952 next = f->next;
1954 /* Make certain it isn't visible from any root. Notice that we
1955 do this check before sweep_pages merges save_in_use_p. */
1956 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1958 /* If the object comes from an outer context, then retain the
1959 free_object entry, so that we can verify that the address
1960 isn't live on the stack in some outer context. */
1961 if (pe->context_depth != G.context_depth)
1963 f->next = still_free;
1964 still_free = f;
1966 else
1967 free (f);
1970 G.free_object_list = still_free;
1972 #else
1973 #define validate_free_objects()
1974 #endif
1976 /* Top level mark-and-sweep routine. */
1978 void
1979 ggc_collect (void)
1981 /* Avoid frequent unnecessary work by skipping collection if the
1982 total allocations haven't expanded much since the last
1983 collection. */
1984 float allocated_last_gc =
1985 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1987 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1989 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1990 return;
1992 timevar_push (TV_GC);
1993 if (!quiet_flag)
1994 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1995 if (GGC_DEBUG_LEVEL >= 2)
1996 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1998 /* Zero the total allocated bytes. This will be recalculated in the
1999 sweep phase. */
2000 G.allocated = 0;
2002 /* Release the pages we freed the last time we collected, but didn't
2003 reuse in the interim. */
2004 release_pages ();
2006 /* Indicate that we've seen collections at this context depth. */
2007 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2009 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2011 clear_marks ();
2012 ggc_mark_roots ();
2013 #ifdef GATHER_STATISTICS
2014 ggc_prune_overhead_list ();
2015 #endif
2016 poison_pages ();
2017 validate_free_objects ();
2018 sweep_pages ();
2020 G.allocated_last_gc = G.allocated;
2022 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2024 timevar_pop (TV_GC);
2026 if (!quiet_flag)
2027 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2028 if (GGC_DEBUG_LEVEL >= 2)
2029 fprintf (G.debug_file, "END COLLECTING\n");
2032 /* Print allocation statistics. */
2033 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2034 ? (x) \
2035 : ((x) < 1024*1024*10 \
2036 ? (x) / 1024 \
2037 : (x) / (1024*1024))))
2038 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2040 void
2041 ggc_print_statistics (void)
2043 struct ggc_statistics stats;
2044 unsigned int i;
2045 size_t total_overhead = 0;
2047 /* Clear the statistics. */
2048 memset (&stats, 0, sizeof (stats));
2050 /* Make sure collection will really occur. */
2051 G.allocated_last_gc = 0;
2053 /* Collect and print the statistics common across collectors. */
2054 ggc_print_common_statistics (stderr, &stats);
2056 /* Release free pages so that we will not count the bytes allocated
2057 there as part of the total allocated memory. */
2058 release_pages ();
2060 /* Collect some information about the various sizes of
2061 allocation. */
2062 fprintf (stderr,
2063 "Memory still allocated at the end of the compilation process\n");
2064 fprintf (stderr, "%-5s %10s %10s %10s\n",
2065 "Size", "Allocated", "Used", "Overhead");
2066 for (i = 0; i < NUM_ORDERS; ++i)
2068 page_entry *p;
2069 size_t allocated;
2070 size_t in_use;
2071 size_t overhead;
2073 /* Skip empty entries. */
2074 if (!G.pages[i])
2075 continue;
2077 overhead = allocated = in_use = 0;
2079 /* Figure out the total number of bytes allocated for objects of
2080 this size, and how many of them are actually in use. Also figure
2081 out how much memory the page table is using. */
2082 for (p = G.pages[i]; p; p = p->next)
2084 allocated += p->bytes;
2085 in_use +=
2086 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2088 overhead += (sizeof (page_entry) - sizeof (long)
2089 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2091 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2092 (unsigned long) OBJECT_SIZE (i),
2093 SCALE (allocated), STAT_LABEL (allocated),
2094 SCALE (in_use), STAT_LABEL (in_use),
2095 SCALE (overhead), STAT_LABEL (overhead));
2096 total_overhead += overhead;
2098 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2099 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2100 SCALE (G.allocated), STAT_LABEL(G.allocated),
2101 SCALE (total_overhead), STAT_LABEL (total_overhead));
2103 #ifdef GATHER_STATISTICS
2105 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2107 fprintf (stderr, "Total Overhead: %10lld\n",
2108 G.stats.total_overhead);
2109 fprintf (stderr, "Total Allocated: %10lld\n",
2110 G.stats.total_allocated);
2112 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2113 G.stats.total_overhead_under32);
2114 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2115 G.stats.total_allocated_under32);
2116 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2117 G.stats.total_overhead_under64);
2118 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2119 G.stats.total_allocated_under64);
2120 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2121 G.stats.total_overhead_under128);
2122 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2123 G.stats.total_allocated_under128);
2125 for (i = 0; i < NUM_ORDERS; i++)
2126 if (G.stats.total_allocated_per_order[i])
2128 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2129 (unsigned long) OBJECT_SIZE (i),
2130 G.stats.total_overhead_per_order[i]);
2131 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2132 (unsigned long) OBJECT_SIZE (i),
2133 G.stats.total_allocated_per_order[i]);
2136 #endif
2139 struct ggc_pch_ondisk
2141 unsigned totals[NUM_ORDERS];
2144 struct ggc_pch_data
2146 struct ggc_pch_ondisk d;
2147 size_t base[NUM_ORDERS];
2148 size_t written[NUM_ORDERS];
2151 struct ggc_pch_data *
2152 init_ggc_pch (void)
2154 return XCNEW (struct ggc_pch_data);
2157 void
2158 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2159 size_t size, bool is_string ATTRIBUTE_UNUSED,
2160 enum gt_types_enum type ATTRIBUTE_UNUSED)
2162 unsigned order;
2164 if (size < NUM_SIZE_LOOKUP)
2165 order = size_lookup[size];
2166 else
2168 order = 10;
2169 while (size > OBJECT_SIZE (order))
2170 order++;
2173 d->d.totals[order]++;
2176 size_t
2177 ggc_pch_total_size (struct ggc_pch_data *d)
2179 size_t a = 0;
2180 unsigned i;
2182 for (i = 0; i < NUM_ORDERS; i++)
2183 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2184 return a;
2187 void
2188 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2190 size_t a = (size_t) base;
2191 unsigned i;
2193 for (i = 0; i < NUM_ORDERS; i++)
2195 d->base[i] = a;
2196 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2201 char *
2202 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2203 size_t size, bool is_string ATTRIBUTE_UNUSED,
2204 enum gt_types_enum type ATTRIBUTE_UNUSED)
2206 unsigned order;
2207 char *result;
2209 if (size < NUM_SIZE_LOOKUP)
2210 order = size_lookup[size];
2211 else
2213 order = 10;
2214 while (size > OBJECT_SIZE (order))
2215 order++;
2218 result = (char *) d->base[order];
2219 d->base[order] += OBJECT_SIZE (order);
2220 return result;
2223 void
2224 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2225 FILE *f ATTRIBUTE_UNUSED)
2227 /* Nothing to do. */
2230 void
2231 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2232 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2233 size_t size, bool is_string ATTRIBUTE_UNUSED)
2235 unsigned order;
2236 static const char emptyBytes[256] = { 0 };
2238 if (size < NUM_SIZE_LOOKUP)
2239 order = size_lookup[size];
2240 else
2242 order = 10;
2243 while (size > OBJECT_SIZE (order))
2244 order++;
2247 if (fwrite (x, size, 1, f) != 1)
2248 fatal_error ("can%'t write PCH file: %m");
2250 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2251 object out to OBJECT_SIZE(order). This happens for strings. */
2253 if (size != OBJECT_SIZE (order))
2255 unsigned padding = OBJECT_SIZE(order) - size;
2257 /* To speed small writes, we use a nulled-out array that's larger
2258 than most padding requests as the source for our null bytes. This
2259 permits us to do the padding with fwrite() rather than fseek(), and
2260 limits the chance the OS may try to flush any outstanding writes. */
2261 if (padding <= sizeof(emptyBytes))
2263 if (fwrite (emptyBytes, 1, padding, f) != padding)
2264 fatal_error ("can%'t write PCH file");
2266 else
2268 /* Larger than our buffer? Just default to fseek. */
2269 if (fseek (f, padding, SEEK_CUR) != 0)
2270 fatal_error ("can%'t write PCH file");
2274 d->written[order]++;
2275 if (d->written[order] == d->d.totals[order]
2276 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2277 G.pagesize),
2278 SEEK_CUR) != 0)
2279 fatal_error ("can%'t write PCH file: %m");
2282 void
2283 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2285 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2286 fatal_error ("can%'t write PCH file: %m");
2287 free (d);
2290 /* Move the PCH PTE entries just added to the end of by_depth, to the
2291 front. */
2293 static void
2294 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2296 unsigned i;
2298 /* First, we swap the new entries to the front of the varrays. */
2299 page_entry **new_by_depth;
2300 unsigned long **new_save_in_use;
2302 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2303 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2305 memcpy (&new_by_depth[0],
2306 &G.by_depth[count_old_page_tables],
2307 count_new_page_tables * sizeof (void *));
2308 memcpy (&new_by_depth[count_new_page_tables],
2309 &G.by_depth[0],
2310 count_old_page_tables * sizeof (void *));
2311 memcpy (&new_save_in_use[0],
2312 &G.save_in_use[count_old_page_tables],
2313 count_new_page_tables * sizeof (void *));
2314 memcpy (&new_save_in_use[count_new_page_tables],
2315 &G.save_in_use[0],
2316 count_old_page_tables * sizeof (void *));
2318 free (G.by_depth);
2319 free (G.save_in_use);
2321 G.by_depth = new_by_depth;
2322 G.save_in_use = new_save_in_use;
2324 /* Now update all the index_by_depth fields. */
2325 for (i = G.by_depth_in_use; i > 0; --i)
2327 page_entry *p = G.by_depth[i-1];
2328 p->index_by_depth = i-1;
2331 /* And last, we update the depth pointers in G.depth. The first
2332 entry is already 0, and context 0 entries always start at index
2333 0, so there is nothing to update in the first slot. We need a
2334 second slot, only if we have old ptes, and if we do, they start
2335 at index count_new_page_tables. */
2336 if (count_old_page_tables)
2337 push_depth (count_new_page_tables);
2340 void
2341 ggc_pch_read (FILE *f, void *addr)
2343 struct ggc_pch_ondisk d;
2344 unsigned i;
2345 char *offs = (char *) addr;
2346 unsigned long count_old_page_tables;
2347 unsigned long count_new_page_tables;
2349 count_old_page_tables = G.by_depth_in_use;
2351 /* We've just read in a PCH file. So, every object that used to be
2352 allocated is now free. */
2353 clear_marks ();
2354 #ifdef ENABLE_GC_CHECKING
2355 poison_pages ();
2356 #endif
2357 /* Since we free all the allocated objects, the free list becomes
2358 useless. Validate it now, which will also clear it. */
2359 validate_free_objects();
2361 /* No object read from a PCH file should ever be freed. So, set the
2362 context depth to 1, and set the depth of all the currently-allocated
2363 pages to be 1 too. PCH pages will have depth 0. */
2364 gcc_assert (!G.context_depth);
2365 G.context_depth = 1;
2366 for (i = 0; i < NUM_ORDERS; i++)
2368 page_entry *p;
2369 for (p = G.pages[i]; p != NULL; p = p->next)
2370 p->context_depth = G.context_depth;
2373 /* Allocate the appropriate page-table entries for the pages read from
2374 the PCH file. */
2375 if (fread (&d, sizeof (d), 1, f) != 1)
2376 fatal_error ("can%'t read PCH file: %m");
2378 for (i = 0; i < NUM_ORDERS; i++)
2380 struct page_entry *entry;
2381 char *pte;
2382 size_t bytes;
2383 size_t num_objs;
2384 size_t j;
2386 if (d.totals[i] == 0)
2387 continue;
2389 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2390 num_objs = bytes / OBJECT_SIZE (i);
2391 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2392 - sizeof (long)
2393 + BITMAP_SIZE (num_objs + 1)));
2394 entry->bytes = bytes;
2395 entry->page = offs;
2396 entry->context_depth = 0;
2397 offs += bytes;
2398 entry->num_free_objects = 0;
2399 entry->order = i;
2401 for (j = 0;
2402 j + HOST_BITS_PER_LONG <= num_objs + 1;
2403 j += HOST_BITS_PER_LONG)
2404 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2405 for (; j < num_objs + 1; j++)
2406 entry->in_use_p[j / HOST_BITS_PER_LONG]
2407 |= 1L << (j % HOST_BITS_PER_LONG);
2409 for (pte = entry->page;
2410 pte < entry->page + entry->bytes;
2411 pte += G.pagesize)
2412 set_page_table_entry (pte, entry);
2414 if (G.page_tails[i] != NULL)
2415 G.page_tails[i]->next = entry;
2416 else
2417 G.pages[i] = entry;
2418 G.page_tails[i] = entry;
2420 /* We start off by just adding all the new information to the
2421 end of the varrays, later, we will move the new information
2422 to the front of the varrays, as the PCH page tables are at
2423 context 0. */
2424 push_by_depth (entry, 0);
2427 /* Now, we update the various data structures that speed page table
2428 handling. */
2429 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2431 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2433 /* Update the statistics. */
2434 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2437 struct alloc_zone
2439 int dummy;
2442 struct alloc_zone rtl_zone;
2443 struct alloc_zone tree_zone;
2444 struct alloc_zone tree_id_zone;