re PR c++/44524 (improve diagnostic for . vs -> typo)
[official-gcc.git] / gcc / ggc-page.c
blob9b352913eb43adf643e724e6d88ec2216e6dc786
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3 2010 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "tree-flow.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
53 #if defined(HAVE_MADVISE) && defined(MADV_DONTNEED)
54 # define USING_MADVISE
55 #endif
57 /* Strategy:
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92 #define GGC_DEBUG_LEVEL (0)
94 #ifndef HOST_BITS_PER_PTR
95 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
96 #endif
99 /* A two-level tree is used to look up the page-entry for a given
100 pointer. Two chunks of the pointer's bits are extracted to index
101 the first and second levels of the tree, as follows:
103 HOST_PAGE_SIZE_BITS
104 32 | |
105 msb +----------------+----+------+------+ lsb
106 | | |
107 PAGE_L1_BITS |
109 PAGE_L2_BITS
111 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112 pages are aligned on system page boundaries. The next most
113 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
114 index values in the lookup table, respectively.
116 For 32-bit architectures and the settings below, there are no
117 leftover bits. For architectures with wider pointers, the lookup
118 tree points to a list of pages, which must be scanned to find the
119 correct one. */
121 #define PAGE_L1_BITS (8)
122 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
123 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
124 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
126 #define LOOKUP_L1(p) \
127 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
129 #define LOOKUP_L2(p) \
130 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
132 /* The number of objects per allocation page, for objects on a page of
133 the indicated ORDER. */
134 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
136 /* The number of objects in P. */
137 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
139 /* The size of an object on a page of the indicated ORDER. */
140 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
142 /* For speed, we avoid doing a general integer divide to locate the
143 offset in the allocation bitmap, by precalculating numbers M, S
144 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145 within the page which is evenly divisible by the object size Z. */
146 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
147 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148 #define OFFSET_TO_BIT(OFFSET, ORDER) \
149 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
151 /* We use this structure to determine the alignment required for
152 allocations. For power-of-two sized allocations, that's not a
153 problem, but it does matter for odd-sized allocations.
154 We do not care about alignment for floating-point types. */
156 struct max_alignment {
157 char c;
158 union {
159 HOST_WIDEST_INT i;
160 void *p;
161 } u;
164 /* The biggest alignment required. */
166 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
169 /* The number of extra orders, not corresponding to power-of-two sized
170 objects. */
172 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174 #define RTL_SIZE(NSLOTS) \
175 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
177 #define TREE_EXP_SIZE(OPS) \
178 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
180 /* The Ith entry is the maximum size of an object to be stored in the
181 Ith extra order. Adding a new entry to this array is the *only*
182 thing you need to do to add a new special allocation size. */
184 static const size_t extra_order_size_table[] = {
185 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186 There are a lot of structures with these sizes and explicitly
187 listing them risks orders being dropped because they changed size. */
188 MAX_ALIGNMENT * 3,
189 MAX_ALIGNMENT * 5,
190 MAX_ALIGNMENT * 6,
191 MAX_ALIGNMENT * 7,
192 MAX_ALIGNMENT * 9,
193 MAX_ALIGNMENT * 10,
194 MAX_ALIGNMENT * 11,
195 MAX_ALIGNMENT * 12,
196 MAX_ALIGNMENT * 13,
197 MAX_ALIGNMENT * 14,
198 MAX_ALIGNMENT * 15,
199 sizeof (struct tree_decl_non_common),
200 sizeof (struct tree_field_decl),
201 sizeof (struct tree_parm_decl),
202 sizeof (struct tree_var_decl),
203 sizeof (struct tree_type_non_common),
204 sizeof (struct function),
205 sizeof (struct basic_block_def),
206 sizeof (struct cgraph_node),
207 sizeof (struct loop),
210 /* The total number of orders. */
212 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
214 /* Compute the smallest nonnegative number which when added to X gives
215 a multiple of F. */
217 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
219 /* Compute the smallest multiple of F that is >= X. */
221 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
223 /* The Ith entry is the number of objects on a page or order I. */
225 static unsigned objects_per_page_table[NUM_ORDERS];
227 /* The Ith entry is the size of an object on a page of order I. */
229 static size_t object_size_table[NUM_ORDERS];
231 /* The Ith entry is a pair of numbers (mult, shift) such that
232 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
233 for all k evenly divisible by OBJECT_SIZE(I). */
235 static struct
237 size_t mult;
238 unsigned int shift;
240 inverse_table[NUM_ORDERS];
242 /* A page_entry records the status of an allocation page. This
243 structure is dynamically sized to fit the bitmap in_use_p. */
244 typedef struct page_entry
246 /* The next page-entry with objects of the same size, or NULL if
247 this is the last page-entry. */
248 struct page_entry *next;
250 /* The previous page-entry with objects of the same size, or NULL if
251 this is the first page-entry. The PREV pointer exists solely to
252 keep the cost of ggc_free manageable. */
253 struct page_entry *prev;
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
257 size_t bytes;
259 /* The address at which the memory is allocated. */
260 char *page;
262 #ifdef USING_MALLOC_PAGE_GROUPS
263 /* Back pointer to the page group this page came from. */
264 struct page_group *group;
265 #endif
267 /* This is the index in the by_depth varray where this page table
268 can be found. */
269 unsigned long index_by_depth;
271 /* Context depth of this page. */
272 unsigned short context_depth;
274 /* The number of free objects remaining on this page. */
275 unsigned short num_free_objects;
277 /* A likely candidate for the bit position of a free object for the
278 next allocation from this page. */
279 unsigned short next_bit_hint;
281 /* The lg of size of objects allocated from this page. */
282 unsigned char order;
284 /* Discarded page? */
285 bool discarded;
287 /* A bit vector indicating whether or not objects are in use. The
288 Nth bit is one if the Nth object on this page is allocated. This
289 array is dynamically sized. */
290 unsigned long in_use_p[1];
291 } page_entry;
293 #ifdef USING_MALLOC_PAGE_GROUPS
294 /* A page_group describes a large allocation from malloc, from which
295 we parcel out aligned pages. */
296 typedef struct page_group
298 /* A linked list of all extant page groups. */
299 struct page_group *next;
301 /* The address we received from malloc. */
302 char *allocation;
304 /* The size of the block. */
305 size_t alloc_size;
307 /* A bitmask of pages in use. */
308 unsigned int in_use;
309 } page_group;
310 #endif
312 #if HOST_BITS_PER_PTR <= 32
314 /* On 32-bit hosts, we use a two level page table, as pictured above. */
315 typedef page_entry **page_table[PAGE_L1_SIZE];
317 #else
319 /* On 64-bit hosts, we use the same two level page tables plus a linked
320 list that disambiguates the top 32-bits. There will almost always be
321 exactly one entry in the list. */
322 typedef struct page_table_chain
324 struct page_table_chain *next;
325 size_t high_bits;
326 page_entry **table[PAGE_L1_SIZE];
327 } *page_table;
329 #endif
331 #ifdef ENABLE_GC_ALWAYS_COLLECT
332 /* List of free objects to be verified as actually free on the
333 next collection. */
334 struct free_object
336 void *object;
337 struct free_object *next;
339 #endif
341 /* The rest of the global variables. */
342 static struct globals
344 /* The Nth element in this array is a page with objects of size 2^N.
345 If there are any pages with free objects, they will be at the
346 head of the list. NULL if there are no page-entries for this
347 object size. */
348 page_entry *pages[NUM_ORDERS];
350 /* The Nth element in this array is the last page with objects of
351 size 2^N. NULL if there are no page-entries for this object
352 size. */
353 page_entry *page_tails[NUM_ORDERS];
355 /* Lookup table for associating allocation pages with object addresses. */
356 page_table lookup;
358 /* The system's page size. */
359 size_t pagesize;
360 size_t lg_pagesize;
362 /* Bytes currently allocated. */
363 size_t allocated;
365 /* Bytes currently allocated at the end of the last collection. */
366 size_t allocated_last_gc;
368 /* Total amount of memory mapped. */
369 size_t bytes_mapped;
371 /* Bit N set if any allocations have been done at context depth N. */
372 unsigned long context_depth_allocations;
374 /* Bit N set if any collections have been done at context depth N. */
375 unsigned long context_depth_collections;
377 /* The current depth in the context stack. */
378 unsigned short context_depth;
380 /* A file descriptor open to /dev/zero for reading. */
381 #if defined (HAVE_MMAP_DEV_ZERO)
382 int dev_zero_fd;
383 #endif
385 /* A cache of free system pages. */
386 page_entry *free_pages;
388 #ifdef USING_MALLOC_PAGE_GROUPS
389 page_group *page_groups;
390 #endif
392 /* The file descriptor for debugging output. */
393 FILE *debug_file;
395 /* Current number of elements in use in depth below. */
396 unsigned int depth_in_use;
398 /* Maximum number of elements that can be used before resizing. */
399 unsigned int depth_max;
401 /* Each element of this array is an index in by_depth where the given
402 depth starts. This structure is indexed by that given depth we
403 are interested in. */
404 unsigned int *depth;
406 /* Current number of elements in use in by_depth below. */
407 unsigned int by_depth_in_use;
409 /* Maximum number of elements that can be used before resizing. */
410 unsigned int by_depth_max;
412 /* Each element of this array is a pointer to a page_entry, all
413 page_entries can be found in here by increasing depth.
414 index_by_depth in the page_entry is the index into this data
415 structure where that page_entry can be found. This is used to
416 speed up finding all page_entries at a particular depth. */
417 page_entry **by_depth;
419 /* Each element is a pointer to the saved in_use_p bits, if any,
420 zero otherwise. We allocate them all together, to enable a
421 better runtime data access pattern. */
422 unsigned long **save_in_use;
424 #ifdef ENABLE_GC_ALWAYS_COLLECT
425 /* List of free objects to be verified as actually free on the
426 next collection. */
427 struct free_object *free_object_list;
428 #endif
430 #ifdef GATHER_STATISTICS
431 struct
433 /* Total GC-allocated memory. */
434 unsigned long long total_allocated;
435 /* Total overhead for GC-allocated memory. */
436 unsigned long long total_overhead;
438 /* Total allocations and overhead for sizes less than 32, 64 and 128.
439 These sizes are interesting because they are typical cache line
440 sizes. */
442 unsigned long long total_allocated_under32;
443 unsigned long long total_overhead_under32;
445 unsigned long long total_allocated_under64;
446 unsigned long long total_overhead_under64;
448 unsigned long long total_allocated_under128;
449 unsigned long long total_overhead_under128;
451 /* The allocations for each of the allocation orders. */
452 unsigned long long total_allocated_per_order[NUM_ORDERS];
454 /* The overhead for each of the allocation orders. */
455 unsigned long long total_overhead_per_order[NUM_ORDERS];
456 } stats;
457 #endif
458 } G;
460 /* The size in bytes required to maintain a bitmap for the objects
461 on a page-entry. */
462 #define BITMAP_SIZE(Num_objects) \
463 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
465 /* Allocate pages in chunks of this size, to throttle calls to memory
466 allocation routines. The first page is used, the rest go onto the
467 free list. This cannot be larger than HOST_BITS_PER_INT for the
468 in_use bitmask for page_group. Hosts that need a different value
469 can override this by defining GGC_QUIRE_SIZE explicitly. */
470 #ifndef GGC_QUIRE_SIZE
471 # ifdef USING_MMAP
472 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
473 # else
474 # define GGC_QUIRE_SIZE 16
475 # endif
476 #endif
478 /* Initial guess as to how many page table entries we might need. */
479 #define INITIAL_PTE_COUNT 128
481 static int ggc_allocated_p (const void *);
482 static page_entry *lookup_page_table_entry (const void *);
483 static void set_page_table_entry (void *, page_entry *);
484 #ifdef USING_MMAP
485 static char *alloc_anon (char *, size_t);
486 #endif
487 #ifdef USING_MALLOC_PAGE_GROUPS
488 static size_t page_group_index (char *, char *);
489 static void set_page_group_in_use (page_group *, char *);
490 static void clear_page_group_in_use (page_group *, char *);
491 #endif
492 static struct page_entry * alloc_page (unsigned);
493 static void free_page (struct page_entry *);
494 static void release_pages (void);
495 static void clear_marks (void);
496 static void sweep_pages (void);
497 static void ggc_recalculate_in_use_p (page_entry *);
498 static void compute_inverse (unsigned);
499 static inline void adjust_depth (void);
500 static void move_ptes_to_front (int, int);
502 void debug_print_page_list (int);
503 static void push_depth (unsigned int);
504 static void push_by_depth (page_entry *, unsigned long *);
506 /* Push an entry onto G.depth. */
508 inline static void
509 push_depth (unsigned int i)
511 if (G.depth_in_use >= G.depth_max)
513 G.depth_max *= 2;
514 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
516 G.depth[G.depth_in_use++] = i;
519 /* Push an entry onto G.by_depth and G.save_in_use. */
521 inline static void
522 push_by_depth (page_entry *p, unsigned long *s)
524 if (G.by_depth_in_use >= G.by_depth_max)
526 G.by_depth_max *= 2;
527 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
528 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
529 G.by_depth_max);
531 G.by_depth[G.by_depth_in_use] = p;
532 G.save_in_use[G.by_depth_in_use++] = s;
535 #if (GCC_VERSION < 3001)
536 #define prefetch(X) ((void) X)
537 #else
538 #define prefetch(X) __builtin_prefetch (X)
539 #endif
541 #define save_in_use_p_i(__i) \
542 (G.save_in_use[__i])
543 #define save_in_use_p(__p) \
544 (save_in_use_p_i (__p->index_by_depth))
546 /* Returns nonzero if P was allocated in GC'able memory. */
548 static inline int
549 ggc_allocated_p (const void *p)
551 page_entry ***base;
552 size_t L1, L2;
554 #if HOST_BITS_PER_PTR <= 32
555 base = &G.lookup[0];
556 #else
557 page_table table = G.lookup;
558 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
559 while (1)
561 if (table == NULL)
562 return 0;
563 if (table->high_bits == high_bits)
564 break;
565 table = table->next;
567 base = &table->table[0];
568 #endif
570 /* Extract the level 1 and 2 indices. */
571 L1 = LOOKUP_L1 (p);
572 L2 = LOOKUP_L2 (p);
574 return base[L1] && base[L1][L2];
577 /* Traverse the page table and find the entry for a page.
578 Die (probably) if the object wasn't allocated via GC. */
580 static inline page_entry *
581 lookup_page_table_entry (const void *p)
583 page_entry ***base;
584 size_t L1, L2;
586 #if HOST_BITS_PER_PTR <= 32
587 base = &G.lookup[0];
588 #else
589 page_table table = G.lookup;
590 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
591 while (table->high_bits != high_bits)
592 table = table->next;
593 base = &table->table[0];
594 #endif
596 /* Extract the level 1 and 2 indices. */
597 L1 = LOOKUP_L1 (p);
598 L2 = LOOKUP_L2 (p);
600 return base[L1][L2];
603 /* Set the page table entry for a page. */
605 static void
606 set_page_table_entry (void *p, page_entry *entry)
608 page_entry ***base;
609 size_t L1, L2;
611 #if HOST_BITS_PER_PTR <= 32
612 base = &G.lookup[0];
613 #else
614 page_table table;
615 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
616 for (table = G.lookup; table; table = table->next)
617 if (table->high_bits == high_bits)
618 goto found;
620 /* Not found -- allocate a new table. */
621 table = XCNEW (struct page_table_chain);
622 table->next = G.lookup;
623 table->high_bits = high_bits;
624 G.lookup = table;
625 found:
626 base = &table->table[0];
627 #endif
629 /* Extract the level 1 and 2 indices. */
630 L1 = LOOKUP_L1 (p);
631 L2 = LOOKUP_L2 (p);
633 if (base[L1] == NULL)
634 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
636 base[L1][L2] = entry;
639 /* Prints the page-entry for object size ORDER, for debugging. */
641 DEBUG_FUNCTION void
642 debug_print_page_list (int order)
644 page_entry *p;
645 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
646 (void *) G.page_tails[order]);
647 p = G.pages[order];
648 while (p != NULL)
650 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
651 p->num_free_objects);
652 p = p->next;
654 printf ("NULL\n");
655 fflush (stdout);
658 #ifdef USING_MMAP
659 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
660 (if non-null). The ifdef structure here is intended to cause a
661 compile error unless exactly one of the HAVE_* is defined. */
663 static inline char *
664 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
666 #ifdef HAVE_MMAP_ANON
667 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
668 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
669 #endif
670 #ifdef HAVE_MMAP_DEV_ZERO
671 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
672 MAP_PRIVATE, G.dev_zero_fd, 0);
673 #endif
675 if (page == (char *) MAP_FAILED)
677 perror ("virtual memory exhausted");
678 exit (FATAL_EXIT_CODE);
681 /* Remember that we allocated this memory. */
682 G.bytes_mapped += size;
684 /* Pretend we don't have access to the allocated pages. We'll enable
685 access to smaller pieces of the area in ggc_internal_alloc. Discard the
686 handle to avoid handle leak. */
687 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
689 return page;
691 #endif
692 #ifdef USING_MALLOC_PAGE_GROUPS
693 /* Compute the index for this page into the page group. */
695 static inline size_t
696 page_group_index (char *allocation, char *page)
698 return (size_t) (page - allocation) >> G.lg_pagesize;
701 /* Set and clear the in_use bit for this page in the page group. */
703 static inline void
704 set_page_group_in_use (page_group *group, char *page)
706 group->in_use |= 1 << page_group_index (group->allocation, page);
709 static inline void
710 clear_page_group_in_use (page_group *group, char *page)
712 group->in_use &= ~(1 << page_group_index (group->allocation, page));
714 #endif
716 /* Allocate a new page for allocating objects of size 2^ORDER,
717 and return an entry for it. The entry is not added to the
718 appropriate page_table list. */
720 static inline struct page_entry *
721 alloc_page (unsigned order)
723 struct page_entry *entry, *p, **pp;
724 char *page;
725 size_t num_objects;
726 size_t bitmap_size;
727 size_t page_entry_size;
728 size_t entry_size;
729 #ifdef USING_MALLOC_PAGE_GROUPS
730 page_group *group;
731 #endif
733 num_objects = OBJECTS_PER_PAGE (order);
734 bitmap_size = BITMAP_SIZE (num_objects + 1);
735 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
736 entry_size = num_objects * OBJECT_SIZE (order);
737 if (entry_size < G.pagesize)
738 entry_size = G.pagesize;
740 entry = NULL;
741 page = NULL;
743 /* Check the list of free pages for one we can use. */
744 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
745 if (p->bytes == entry_size)
746 break;
748 if (p != NULL)
750 if (p->discarded)
751 G.bytes_mapped += p->bytes;
752 p->discarded = false;
754 /* Recycle the allocated memory from this page ... */
755 *pp = p->next;
756 page = p->page;
758 #ifdef USING_MALLOC_PAGE_GROUPS
759 group = p->group;
760 #endif
762 /* ... and, if possible, the page entry itself. */
763 if (p->order == order)
765 entry = p;
766 memset (entry, 0, page_entry_size);
768 else
769 free (p);
771 #ifdef USING_MMAP
772 else if (entry_size == G.pagesize)
774 /* We want just one page. Allocate a bunch of them and put the
775 extras on the freelist. (Can only do this optimization with
776 mmap for backing store.) */
777 struct page_entry *e, *f = G.free_pages;
778 int i;
780 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
782 /* This loop counts down so that the chain will be in ascending
783 memory order. */
784 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
786 e = XCNEWVAR (struct page_entry, page_entry_size);
787 e->order = order;
788 e->bytes = G.pagesize;
789 e->page = page + (i << G.lg_pagesize);
790 e->next = f;
791 f = e;
794 G.free_pages = f;
796 else
797 page = alloc_anon (NULL, entry_size);
798 #endif
799 #ifdef USING_MALLOC_PAGE_GROUPS
800 else
802 /* Allocate a large block of memory and serve out the aligned
803 pages therein. This results in much less memory wastage
804 than the traditional implementation of valloc. */
806 char *allocation, *a, *enda;
807 size_t alloc_size, head_slop, tail_slop;
808 int multiple_pages = (entry_size == G.pagesize);
810 if (multiple_pages)
811 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
812 else
813 alloc_size = entry_size + G.pagesize - 1;
814 allocation = XNEWVEC (char, alloc_size);
816 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
817 head_slop = page - allocation;
818 if (multiple_pages)
819 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
820 else
821 tail_slop = alloc_size - entry_size - head_slop;
822 enda = allocation + alloc_size - tail_slop;
824 /* We allocated N pages, which are likely not aligned, leaving
825 us with N-1 usable pages. We plan to place the page_group
826 structure somewhere in the slop. */
827 if (head_slop >= sizeof (page_group))
828 group = (page_group *)page - 1;
829 else
831 /* We magically got an aligned allocation. Too bad, we have
832 to waste a page anyway. */
833 if (tail_slop == 0)
835 enda -= G.pagesize;
836 tail_slop += G.pagesize;
838 gcc_assert (tail_slop >= sizeof (page_group));
839 group = (page_group *)enda;
840 tail_slop -= sizeof (page_group);
843 /* Remember that we allocated this memory. */
844 group->next = G.page_groups;
845 group->allocation = allocation;
846 group->alloc_size = alloc_size;
847 group->in_use = 0;
848 G.page_groups = group;
849 G.bytes_mapped += alloc_size;
851 /* If we allocated multiple pages, put the rest on the free list. */
852 if (multiple_pages)
854 struct page_entry *e, *f = G.free_pages;
855 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
857 e = XCNEWVAR (struct page_entry, page_entry_size);
858 e->order = order;
859 e->bytes = G.pagesize;
860 e->page = a;
861 e->group = group;
862 e->next = f;
863 f = e;
865 G.free_pages = f;
868 #endif
870 if (entry == NULL)
871 entry = XCNEWVAR (struct page_entry, page_entry_size);
873 entry->bytes = entry_size;
874 entry->page = page;
875 entry->context_depth = G.context_depth;
876 entry->order = order;
877 entry->num_free_objects = num_objects;
878 entry->next_bit_hint = 1;
880 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
882 #ifdef USING_MALLOC_PAGE_GROUPS
883 entry->group = group;
884 set_page_group_in_use (group, page);
885 #endif
887 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
888 increment the hint. */
889 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
890 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
892 set_page_table_entry (page, entry);
894 if (GGC_DEBUG_LEVEL >= 2)
895 fprintf (G.debug_file,
896 "Allocating page at %p, object size=%lu, data %p-%p\n",
897 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
898 page + entry_size - 1);
900 return entry;
903 /* Adjust the size of G.depth so that no index greater than the one
904 used by the top of the G.by_depth is used. */
906 static inline void
907 adjust_depth (void)
909 page_entry *top;
911 if (G.by_depth_in_use)
913 top = G.by_depth[G.by_depth_in_use-1];
915 /* Peel back indices in depth that index into by_depth, so that
916 as new elements are added to by_depth, we note the indices
917 of those elements, if they are for new context depths. */
918 while (G.depth_in_use > (size_t)top->context_depth+1)
919 --G.depth_in_use;
923 /* For a page that is no longer needed, put it on the free page list. */
925 static void
926 free_page (page_entry *entry)
928 if (GGC_DEBUG_LEVEL >= 2)
929 fprintf (G.debug_file,
930 "Deallocating page at %p, data %p-%p\n", (void *) entry,
931 entry->page, entry->page + entry->bytes - 1);
933 /* Mark the page as inaccessible. Discard the handle to avoid handle
934 leak. */
935 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
937 set_page_table_entry (entry->page, NULL);
939 #ifdef USING_MALLOC_PAGE_GROUPS
940 clear_page_group_in_use (entry->group, entry->page);
941 #endif
943 if (G.by_depth_in_use > 1)
945 page_entry *top = G.by_depth[G.by_depth_in_use-1];
946 int i = entry->index_by_depth;
948 /* We cannot free a page from a context deeper than the current
949 one. */
950 gcc_assert (entry->context_depth == top->context_depth);
952 /* Put top element into freed slot. */
953 G.by_depth[i] = top;
954 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
955 top->index_by_depth = i;
957 --G.by_depth_in_use;
959 adjust_depth ();
961 entry->next = G.free_pages;
962 G.free_pages = entry;
965 /* Release the free page cache to the system. */
967 static void
968 release_pages (void)
970 #ifdef USING_MADVISE
971 page_entry *p, *start_p;
972 char *start;
973 size_t len;
975 for (p = G.free_pages; p; )
977 if (p->discarded)
979 p = p->next;
980 continue;
982 start = p->page;
983 len = p->bytes;
984 start_p = p;
985 p = p->next;
986 while (p && p->page == start + len)
988 len += p->bytes;
989 p = p->next;
991 /* Give the page back to the kernel, but don't free the mapping.
992 This avoids fragmentation in the virtual memory map of the
993 process. Next time we can reuse it by just touching it. */
994 madvise (start, len, MADV_DONTNEED);
995 /* Don't count those pages as mapped to not touch the garbage collector
996 unnecessarily. */
997 G.bytes_mapped -= len;
998 while (start_p != p)
1000 start_p->discarded = true;
1001 start_p = start_p->next;
1004 #endif
1005 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1006 page_entry *p, *next;
1007 char *start;
1008 size_t len;
1010 /* Gather up adjacent pages so they are unmapped together. */
1011 p = G.free_pages;
1013 while (p)
1015 start = p->page;
1016 next = p->next;
1017 len = p->bytes;
1018 free (p);
1019 p = next;
1021 while (p && p->page == start + len)
1023 next = p->next;
1024 len += p->bytes;
1025 free (p);
1026 p = next;
1029 munmap (start, len);
1030 G.bytes_mapped -= len;
1033 G.free_pages = NULL;
1034 #endif
1035 #ifdef USING_MALLOC_PAGE_GROUPS
1036 page_entry **pp, *p;
1037 page_group **gp, *g;
1039 /* Remove all pages from free page groups from the list. */
1040 pp = &G.free_pages;
1041 while ((p = *pp) != NULL)
1042 if (p->group->in_use == 0)
1044 *pp = p->next;
1045 free (p);
1047 else
1048 pp = &p->next;
1050 /* Remove all free page groups, and release the storage. */
1051 gp = &G.page_groups;
1052 while ((g = *gp) != NULL)
1053 if (g->in_use == 0)
1055 *gp = g->next;
1056 G.bytes_mapped -= g->alloc_size;
1057 free (g->allocation);
1059 else
1060 gp = &g->next;
1061 #endif
1064 /* This table provides a fast way to determine ceil(log_2(size)) for
1065 allocation requests. The minimum allocation size is eight bytes. */
1066 #define NUM_SIZE_LOOKUP 512
1067 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1069 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1070 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1071 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1072 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1073 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1074 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1075 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1076 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1077 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1080 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1081 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1082 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1083 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1084 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1085 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1086 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1087 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1088 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1089 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1090 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1091 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1092 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1093 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1094 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1095 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1096 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1097 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1098 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1099 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1100 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1103 /* For a given size of memory requested for allocation, return the
1104 actual size that is going to be allocated, as well as the size
1105 order. */
1107 static void
1108 ggc_round_alloc_size_1 (size_t requested_size,
1109 size_t *size_order,
1110 size_t *alloced_size)
1112 size_t order, object_size;
1114 if (requested_size < NUM_SIZE_LOOKUP)
1116 order = size_lookup[requested_size];
1117 object_size = OBJECT_SIZE (order);
1119 else
1121 order = 10;
1122 while (requested_size > (object_size = OBJECT_SIZE (order)))
1123 order++;
1126 if (size_order)
1127 *size_order = order;
1128 if (alloced_size)
1129 *alloced_size = object_size;
1132 /* For a given size of memory requested for allocation, return the
1133 actual size that is going to be allocated. */
1135 size_t
1136 ggc_round_alloc_size (size_t requested_size)
1138 size_t size = 0;
1140 ggc_round_alloc_size_1 (requested_size, NULL, &size);
1141 return size;
1144 /* Typed allocation function. Does nothing special in this collector. */
1146 void *
1147 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1148 MEM_STAT_DECL)
1150 return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1153 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1155 void *
1156 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1158 size_t order, word, bit, object_offset, object_size;
1159 struct page_entry *entry;
1160 void *result;
1162 ggc_round_alloc_size_1 (size, &order, &object_size);
1164 /* If there are non-full pages for this size allocation, they are at
1165 the head of the list. */
1166 entry = G.pages[order];
1168 /* If there is no page for this object size, or all pages in this
1169 context are full, allocate a new page. */
1170 if (entry == NULL || entry->num_free_objects == 0)
1172 struct page_entry *new_entry;
1173 new_entry = alloc_page (order);
1175 new_entry->index_by_depth = G.by_depth_in_use;
1176 push_by_depth (new_entry, 0);
1178 /* We can skip context depths, if we do, make sure we go all the
1179 way to the new depth. */
1180 while (new_entry->context_depth >= G.depth_in_use)
1181 push_depth (G.by_depth_in_use-1);
1183 /* If this is the only entry, it's also the tail. If it is not
1184 the only entry, then we must update the PREV pointer of the
1185 ENTRY (G.pages[order]) to point to our new page entry. */
1186 if (entry == NULL)
1187 G.page_tails[order] = new_entry;
1188 else
1189 entry->prev = new_entry;
1191 /* Put new pages at the head of the page list. By definition the
1192 entry at the head of the list always has a NULL pointer. */
1193 new_entry->next = entry;
1194 new_entry->prev = NULL;
1195 entry = new_entry;
1196 G.pages[order] = new_entry;
1198 /* For a new page, we know the word and bit positions (in the
1199 in_use bitmap) of the first available object -- they're zero. */
1200 new_entry->next_bit_hint = 1;
1201 word = 0;
1202 bit = 0;
1203 object_offset = 0;
1205 else
1207 /* First try to use the hint left from the previous allocation
1208 to locate a clear bit in the in-use bitmap. We've made sure
1209 that the one-past-the-end bit is always set, so if the hint
1210 has run over, this test will fail. */
1211 unsigned hint = entry->next_bit_hint;
1212 word = hint / HOST_BITS_PER_LONG;
1213 bit = hint % HOST_BITS_PER_LONG;
1215 /* If the hint didn't work, scan the bitmap from the beginning. */
1216 if ((entry->in_use_p[word] >> bit) & 1)
1218 word = bit = 0;
1219 while (~entry->in_use_p[word] == 0)
1220 ++word;
1222 #if GCC_VERSION >= 3004
1223 bit = __builtin_ctzl (~entry->in_use_p[word]);
1224 #else
1225 while ((entry->in_use_p[word] >> bit) & 1)
1226 ++bit;
1227 #endif
1229 hint = word * HOST_BITS_PER_LONG + bit;
1232 /* Next time, try the next bit. */
1233 entry->next_bit_hint = hint + 1;
1235 object_offset = hint * object_size;
1238 /* Set the in-use bit. */
1239 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1241 /* Keep a running total of the number of free objects. If this page
1242 fills up, we may have to move it to the end of the list if the
1243 next page isn't full. If the next page is full, all subsequent
1244 pages are full, so there's no need to move it. */
1245 if (--entry->num_free_objects == 0
1246 && entry->next != NULL
1247 && entry->next->num_free_objects > 0)
1249 /* We have a new head for the list. */
1250 G.pages[order] = entry->next;
1252 /* We are moving ENTRY to the end of the page table list.
1253 The new page at the head of the list will have NULL in
1254 its PREV field and ENTRY will have NULL in its NEXT field. */
1255 entry->next->prev = NULL;
1256 entry->next = NULL;
1258 /* Append ENTRY to the tail of the list. */
1259 entry->prev = G.page_tails[order];
1260 G.page_tails[order]->next = entry;
1261 G.page_tails[order] = entry;
1264 /* Calculate the object's address. */
1265 result = entry->page + object_offset;
1266 #ifdef GATHER_STATISTICS
1267 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1268 result PASS_MEM_STAT);
1269 #endif
1271 #ifdef ENABLE_GC_CHECKING
1272 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1273 exact same semantics in presence of memory bugs, regardless of
1274 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1275 handle to avoid handle leak. */
1276 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1278 /* `Poison' the entire allocated object, including any padding at
1279 the end. */
1280 memset (result, 0xaf, object_size);
1282 /* Make the bytes after the end of the object unaccessible. Discard the
1283 handle to avoid handle leak. */
1284 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1285 object_size - size));
1286 #endif
1288 /* Tell Valgrind that the memory is there, but its content isn't
1289 defined. The bytes at the end of the object are still marked
1290 unaccessible. */
1291 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1293 /* Keep track of how many bytes are being allocated. This
1294 information is used in deciding when to collect. */
1295 G.allocated += object_size;
1297 /* For timevar statistics. */
1298 timevar_ggc_mem_total += object_size;
1300 #ifdef GATHER_STATISTICS
1302 size_t overhead = object_size - size;
1304 G.stats.total_overhead += overhead;
1305 G.stats.total_allocated += object_size;
1306 G.stats.total_overhead_per_order[order] += overhead;
1307 G.stats.total_allocated_per_order[order] += object_size;
1309 if (size <= 32)
1311 G.stats.total_overhead_under32 += overhead;
1312 G.stats.total_allocated_under32 += object_size;
1314 if (size <= 64)
1316 G.stats.total_overhead_under64 += overhead;
1317 G.stats.total_allocated_under64 += object_size;
1319 if (size <= 128)
1321 G.stats.total_overhead_under128 += overhead;
1322 G.stats.total_allocated_under128 += object_size;
1325 #endif
1327 if (GGC_DEBUG_LEVEL >= 3)
1328 fprintf (G.debug_file,
1329 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1330 (unsigned long) size, (unsigned long) object_size, result,
1331 (void *) entry);
1333 return result;
1336 /* Mark function for strings. */
1338 void
1339 gt_ggc_m_S (const void *p)
1341 page_entry *entry;
1342 unsigned bit, word;
1343 unsigned long mask;
1344 unsigned long offset;
1346 if (!p || !ggc_allocated_p (p))
1347 return;
1349 /* Look up the page on which the object is alloced. . */
1350 entry = lookup_page_table_entry (p);
1351 gcc_assert (entry);
1353 /* Calculate the index of the object on the page; this is its bit
1354 position in the in_use_p bitmap. Note that because a char* might
1355 point to the middle of an object, we need special code here to
1356 make sure P points to the start of an object. */
1357 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1358 if (offset)
1360 /* Here we've seen a char* which does not point to the beginning
1361 of an allocated object. We assume it points to the middle of
1362 a STRING_CST. */
1363 gcc_assert (offset == offsetof (struct tree_string, str));
1364 p = ((const char *) p) - offset;
1365 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1366 return;
1369 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1370 word = bit / HOST_BITS_PER_LONG;
1371 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1373 /* If the bit was previously set, skip it. */
1374 if (entry->in_use_p[word] & mask)
1375 return;
1377 /* Otherwise set it, and decrement the free object count. */
1378 entry->in_use_p[word] |= mask;
1379 entry->num_free_objects -= 1;
1381 if (GGC_DEBUG_LEVEL >= 4)
1382 fprintf (G.debug_file, "Marking %p\n", p);
1384 return;
1387 /* If P is not marked, marks it and return false. Otherwise return true.
1388 P must have been allocated by the GC allocator; it mustn't point to
1389 static objects, stack variables, or memory allocated with malloc. */
1392 ggc_set_mark (const void *p)
1394 page_entry *entry;
1395 unsigned bit, word;
1396 unsigned long mask;
1398 /* Look up the page on which the object is alloced. If the object
1399 wasn't allocated by the collector, we'll probably die. */
1400 entry = lookup_page_table_entry (p);
1401 gcc_assert (entry);
1403 /* Calculate the index of the object on the page; this is its bit
1404 position in the in_use_p bitmap. */
1405 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1406 word = bit / HOST_BITS_PER_LONG;
1407 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1409 /* If the bit was previously set, skip it. */
1410 if (entry->in_use_p[word] & mask)
1411 return 1;
1413 /* Otherwise set it, and decrement the free object count. */
1414 entry->in_use_p[word] |= mask;
1415 entry->num_free_objects -= 1;
1417 if (GGC_DEBUG_LEVEL >= 4)
1418 fprintf (G.debug_file, "Marking %p\n", p);
1420 return 0;
1423 /* Return 1 if P has been marked, zero otherwise.
1424 P must have been allocated by the GC allocator; it mustn't point to
1425 static objects, stack variables, or memory allocated with malloc. */
1428 ggc_marked_p (const void *p)
1430 page_entry *entry;
1431 unsigned bit, word;
1432 unsigned long mask;
1434 /* Look up the page on which the object is alloced. If the object
1435 wasn't allocated by the collector, we'll probably die. */
1436 entry = lookup_page_table_entry (p);
1437 gcc_assert (entry);
1439 /* Calculate the index of the object on the page; this is its bit
1440 position in the in_use_p bitmap. */
1441 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1442 word = bit / HOST_BITS_PER_LONG;
1443 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1445 return (entry->in_use_p[word] & mask) != 0;
1448 /* Return the size of the gc-able object P. */
1450 size_t
1451 ggc_get_size (const void *p)
1453 page_entry *pe = lookup_page_table_entry (p);
1454 return OBJECT_SIZE (pe->order);
1457 /* Release the memory for object P. */
1459 void
1460 ggc_free (void *p)
1462 page_entry *pe = lookup_page_table_entry (p);
1463 size_t order = pe->order;
1464 size_t size = OBJECT_SIZE (order);
1466 #ifdef GATHER_STATISTICS
1467 ggc_free_overhead (p);
1468 #endif
1470 if (GGC_DEBUG_LEVEL >= 3)
1471 fprintf (G.debug_file,
1472 "Freeing object, actual size=%lu, at %p on %p\n",
1473 (unsigned long) size, p, (void *) pe);
1475 #ifdef ENABLE_GC_CHECKING
1476 /* Poison the data, to indicate the data is garbage. */
1477 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1478 memset (p, 0xa5, size);
1479 #endif
1480 /* Let valgrind know the object is free. */
1481 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1483 #ifdef ENABLE_GC_ALWAYS_COLLECT
1484 /* In the completely-anal-checking mode, we do *not* immediately free
1485 the data, but instead verify that the data is *actually* not
1486 reachable the next time we collect. */
1488 struct free_object *fo = XNEW (struct free_object);
1489 fo->object = p;
1490 fo->next = G.free_object_list;
1491 G.free_object_list = fo;
1493 #else
1495 unsigned int bit_offset, word, bit;
1497 G.allocated -= size;
1499 /* Mark the object not-in-use. */
1500 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1501 word = bit_offset / HOST_BITS_PER_LONG;
1502 bit = bit_offset % HOST_BITS_PER_LONG;
1503 pe->in_use_p[word] &= ~(1UL << bit);
1505 if (pe->num_free_objects++ == 0)
1507 page_entry *p, *q;
1509 /* If the page is completely full, then it's supposed to
1510 be after all pages that aren't. Since we've freed one
1511 object from a page that was full, we need to move the
1512 page to the head of the list.
1514 PE is the node we want to move. Q is the previous node
1515 and P is the next node in the list. */
1516 q = pe->prev;
1517 if (q && q->num_free_objects == 0)
1519 p = pe->next;
1521 q->next = p;
1523 /* If PE was at the end of the list, then Q becomes the
1524 new end of the list. If PE was not the end of the
1525 list, then we need to update the PREV field for P. */
1526 if (!p)
1527 G.page_tails[order] = q;
1528 else
1529 p->prev = q;
1531 /* Move PE to the head of the list. */
1532 pe->next = G.pages[order];
1533 pe->prev = NULL;
1534 G.pages[order]->prev = pe;
1535 G.pages[order] = pe;
1538 /* Reset the hint bit to point to the only free object. */
1539 pe->next_bit_hint = bit_offset;
1542 #endif
1545 /* Subroutine of init_ggc which computes the pair of numbers used to
1546 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1548 This algorithm is taken from Granlund and Montgomery's paper
1549 "Division by Invariant Integers using Multiplication"
1550 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1551 constants). */
1553 static void
1554 compute_inverse (unsigned order)
1556 size_t size, inv;
1557 unsigned int e;
1559 size = OBJECT_SIZE (order);
1560 e = 0;
1561 while (size % 2 == 0)
1563 e++;
1564 size >>= 1;
1567 inv = size;
1568 while (inv * size != 1)
1569 inv = inv * (2 - inv*size);
1571 DIV_MULT (order) = inv;
1572 DIV_SHIFT (order) = e;
1575 /* Initialize the ggc-mmap allocator. */
1576 void
1577 init_ggc (void)
1579 unsigned order;
1581 G.pagesize = getpagesize();
1582 G.lg_pagesize = exact_log2 (G.pagesize);
1584 #ifdef HAVE_MMAP_DEV_ZERO
1585 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1586 if (G.dev_zero_fd == -1)
1587 internal_error ("open /dev/zero: %m");
1588 #endif
1590 #if 0
1591 G.debug_file = fopen ("ggc-mmap.debug", "w");
1592 #else
1593 G.debug_file = stdout;
1594 #endif
1596 #ifdef USING_MMAP
1597 /* StunOS has an amazing off-by-one error for the first mmap allocation
1598 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1599 believe, is an unaligned page allocation, which would cause us to
1600 hork badly if we tried to use it. */
1602 char *p = alloc_anon (NULL, G.pagesize);
1603 struct page_entry *e;
1604 if ((size_t)p & (G.pagesize - 1))
1606 /* How losing. Discard this one and try another. If we still
1607 can't get something useful, give up. */
1609 p = alloc_anon (NULL, G.pagesize);
1610 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1613 /* We have a good page, might as well hold onto it... */
1614 e = XCNEW (struct page_entry);
1615 e->bytes = G.pagesize;
1616 e->page = p;
1617 e->next = G.free_pages;
1618 G.free_pages = e;
1620 #endif
1622 /* Initialize the object size table. */
1623 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1624 object_size_table[order] = (size_t) 1 << order;
1625 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1627 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1629 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1630 so that we're sure of getting aligned memory. */
1631 s = ROUND_UP (s, MAX_ALIGNMENT);
1632 object_size_table[order] = s;
1635 /* Initialize the objects-per-page and inverse tables. */
1636 for (order = 0; order < NUM_ORDERS; ++order)
1638 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1639 if (objects_per_page_table[order] == 0)
1640 objects_per_page_table[order] = 1;
1641 compute_inverse (order);
1644 /* Reset the size_lookup array to put appropriately sized objects in
1645 the special orders. All objects bigger than the previous power
1646 of two, but no greater than the special size, should go in the
1647 new order. */
1648 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1650 int o;
1651 int i;
1653 i = OBJECT_SIZE (order);
1654 if (i >= NUM_SIZE_LOOKUP)
1655 continue;
1657 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1658 size_lookup[i] = order;
1661 G.depth_in_use = 0;
1662 G.depth_max = 10;
1663 G.depth = XNEWVEC (unsigned int, G.depth_max);
1665 G.by_depth_in_use = 0;
1666 G.by_depth_max = INITIAL_PTE_COUNT;
1667 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1668 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1671 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1672 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1674 static void
1675 ggc_recalculate_in_use_p (page_entry *p)
1677 unsigned int i;
1678 size_t num_objects;
1680 /* Because the past-the-end bit in in_use_p is always set, we
1681 pretend there is one additional object. */
1682 num_objects = OBJECTS_IN_PAGE (p) + 1;
1684 /* Reset the free object count. */
1685 p->num_free_objects = num_objects;
1687 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1688 for (i = 0;
1689 i < CEIL (BITMAP_SIZE (num_objects),
1690 sizeof (*p->in_use_p));
1691 ++i)
1693 unsigned long j;
1695 /* Something is in use if it is marked, or if it was in use in a
1696 context further down the context stack. */
1697 p->in_use_p[i] |= save_in_use_p (p)[i];
1699 /* Decrement the free object count for every object allocated. */
1700 for (j = p->in_use_p[i]; j; j >>= 1)
1701 p->num_free_objects -= (j & 1);
1704 gcc_assert (p->num_free_objects < num_objects);
1707 /* Unmark all objects. */
1709 static void
1710 clear_marks (void)
1712 unsigned order;
1714 for (order = 2; order < NUM_ORDERS; order++)
1716 page_entry *p;
1718 for (p = G.pages[order]; p != NULL; p = p->next)
1720 size_t num_objects = OBJECTS_IN_PAGE (p);
1721 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1723 /* The data should be page-aligned. */
1724 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1726 /* Pages that aren't in the topmost context are not collected;
1727 nevertheless, we need their in-use bit vectors to store GC
1728 marks. So, back them up first. */
1729 if (p->context_depth < G.context_depth)
1731 if (! save_in_use_p (p))
1732 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1733 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1736 /* Reset reset the number of free objects and clear the
1737 in-use bits. These will be adjusted by mark_obj. */
1738 p->num_free_objects = num_objects;
1739 memset (p->in_use_p, 0, bitmap_size);
1741 /* Make sure the one-past-the-end bit is always set. */
1742 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1743 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1748 /* Free all empty pages. Partially empty pages need no attention
1749 because the `mark' bit doubles as an `unused' bit. */
1751 static void
1752 sweep_pages (void)
1754 unsigned order;
1756 for (order = 2; order < NUM_ORDERS; order++)
1758 /* The last page-entry to consider, regardless of entries
1759 placed at the end of the list. */
1760 page_entry * const last = G.page_tails[order];
1762 size_t num_objects;
1763 size_t live_objects;
1764 page_entry *p, *previous;
1765 int done;
1767 p = G.pages[order];
1768 if (p == NULL)
1769 continue;
1771 previous = NULL;
1774 page_entry *next = p->next;
1776 /* Loop until all entries have been examined. */
1777 done = (p == last);
1779 num_objects = OBJECTS_IN_PAGE (p);
1781 /* Add all live objects on this page to the count of
1782 allocated memory. */
1783 live_objects = num_objects - p->num_free_objects;
1785 G.allocated += OBJECT_SIZE (order) * live_objects;
1787 /* Only objects on pages in the topmost context should get
1788 collected. */
1789 if (p->context_depth < G.context_depth)
1792 /* Remove the page if it's empty. */
1793 else if (live_objects == 0)
1795 /* If P was the first page in the list, then NEXT
1796 becomes the new first page in the list, otherwise
1797 splice P out of the forward pointers. */
1798 if (! previous)
1799 G.pages[order] = next;
1800 else
1801 previous->next = next;
1803 /* Splice P out of the back pointers too. */
1804 if (next)
1805 next->prev = previous;
1807 /* Are we removing the last element? */
1808 if (p == G.page_tails[order])
1809 G.page_tails[order] = previous;
1810 free_page (p);
1811 p = previous;
1814 /* If the page is full, move it to the end. */
1815 else if (p->num_free_objects == 0)
1817 /* Don't move it if it's already at the end. */
1818 if (p != G.page_tails[order])
1820 /* Move p to the end of the list. */
1821 p->next = NULL;
1822 p->prev = G.page_tails[order];
1823 G.page_tails[order]->next = p;
1825 /* Update the tail pointer... */
1826 G.page_tails[order] = p;
1828 /* ... and the head pointer, if necessary. */
1829 if (! previous)
1830 G.pages[order] = next;
1831 else
1832 previous->next = next;
1834 /* And update the backpointer in NEXT if necessary. */
1835 if (next)
1836 next->prev = previous;
1838 p = previous;
1842 /* If we've fallen through to here, it's a page in the
1843 topmost context that is neither full nor empty. Such a
1844 page must precede pages at lesser context depth in the
1845 list, so move it to the head. */
1846 else if (p != G.pages[order])
1848 previous->next = p->next;
1850 /* Update the backchain in the next node if it exists. */
1851 if (p->next)
1852 p->next->prev = previous;
1854 /* Move P to the head of the list. */
1855 p->next = G.pages[order];
1856 p->prev = NULL;
1857 G.pages[order]->prev = p;
1859 /* Update the head pointer. */
1860 G.pages[order] = p;
1862 /* Are we moving the last element? */
1863 if (G.page_tails[order] == p)
1864 G.page_tails[order] = previous;
1865 p = previous;
1868 previous = p;
1869 p = next;
1871 while (! done);
1873 /* Now, restore the in_use_p vectors for any pages from contexts
1874 other than the current one. */
1875 for (p = G.pages[order]; p; p = p->next)
1876 if (p->context_depth != G.context_depth)
1877 ggc_recalculate_in_use_p (p);
1881 #ifdef ENABLE_GC_CHECKING
1882 /* Clobber all free objects. */
1884 static void
1885 poison_pages (void)
1887 unsigned order;
1889 for (order = 2; order < NUM_ORDERS; order++)
1891 size_t size = OBJECT_SIZE (order);
1892 page_entry *p;
1894 for (p = G.pages[order]; p != NULL; p = p->next)
1896 size_t num_objects;
1897 size_t i;
1899 if (p->context_depth != G.context_depth)
1900 /* Since we don't do any collection for pages in pushed
1901 contexts, there's no need to do any poisoning. And
1902 besides, the IN_USE_P array isn't valid until we pop
1903 contexts. */
1904 continue;
1906 num_objects = OBJECTS_IN_PAGE (p);
1907 for (i = 0; i < num_objects; i++)
1909 size_t word, bit;
1910 word = i / HOST_BITS_PER_LONG;
1911 bit = i % HOST_BITS_PER_LONG;
1912 if (((p->in_use_p[word] >> bit) & 1) == 0)
1914 char *object = p->page + i * size;
1916 /* Keep poison-by-write when we expect to use Valgrind,
1917 so the exact same memory semantics is kept, in case
1918 there are memory errors. We override this request
1919 below. */
1920 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1921 size));
1922 memset (object, 0xa5, size);
1924 /* Drop the handle to avoid handle leak. */
1925 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1931 #else
1932 #define poison_pages()
1933 #endif
1935 #ifdef ENABLE_GC_ALWAYS_COLLECT
1936 /* Validate that the reportedly free objects actually are. */
1938 static void
1939 validate_free_objects (void)
1941 struct free_object *f, *next, *still_free = NULL;
1943 for (f = G.free_object_list; f ; f = next)
1945 page_entry *pe = lookup_page_table_entry (f->object);
1946 size_t bit, word;
1948 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1949 word = bit / HOST_BITS_PER_LONG;
1950 bit = bit % HOST_BITS_PER_LONG;
1951 next = f->next;
1953 /* Make certain it isn't visible from any root. Notice that we
1954 do this check before sweep_pages merges save_in_use_p. */
1955 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1957 /* If the object comes from an outer context, then retain the
1958 free_object entry, so that we can verify that the address
1959 isn't live on the stack in some outer context. */
1960 if (pe->context_depth != G.context_depth)
1962 f->next = still_free;
1963 still_free = f;
1965 else
1966 free (f);
1969 G.free_object_list = still_free;
1971 #else
1972 #define validate_free_objects()
1973 #endif
1975 /* Top level mark-and-sweep routine. */
1977 void
1978 ggc_collect (void)
1980 /* Avoid frequent unnecessary work by skipping collection if the
1981 total allocations haven't expanded much since the last
1982 collection. */
1983 float allocated_last_gc =
1984 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1986 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1988 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1989 return;
1991 timevar_push (TV_GC);
1992 if (!quiet_flag)
1993 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1994 if (GGC_DEBUG_LEVEL >= 2)
1995 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1997 /* Zero the total allocated bytes. This will be recalculated in the
1998 sweep phase. */
1999 G.allocated = 0;
2001 /* Release the pages we freed the last time we collected, but didn't
2002 reuse in the interim. */
2003 release_pages ();
2005 /* Indicate that we've seen collections at this context depth. */
2006 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2008 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2010 clear_marks ();
2011 ggc_mark_roots ();
2012 #ifdef GATHER_STATISTICS
2013 ggc_prune_overhead_list ();
2014 #endif
2015 poison_pages ();
2016 validate_free_objects ();
2017 sweep_pages ();
2019 G.allocated_last_gc = G.allocated;
2021 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2023 timevar_pop (TV_GC);
2025 if (!quiet_flag)
2026 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2027 if (GGC_DEBUG_LEVEL >= 2)
2028 fprintf (G.debug_file, "END COLLECTING\n");
2031 /* Print allocation statistics. */
2032 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2033 ? (x) \
2034 : ((x) < 1024*1024*10 \
2035 ? (x) / 1024 \
2036 : (x) / (1024*1024))))
2037 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2039 void
2040 ggc_print_statistics (void)
2042 struct ggc_statistics stats;
2043 unsigned int i;
2044 size_t total_overhead = 0;
2046 /* Clear the statistics. */
2047 memset (&stats, 0, sizeof (stats));
2049 /* Make sure collection will really occur. */
2050 G.allocated_last_gc = 0;
2052 /* Collect and print the statistics common across collectors. */
2053 ggc_print_common_statistics (stderr, &stats);
2055 /* Release free pages so that we will not count the bytes allocated
2056 there as part of the total allocated memory. */
2057 release_pages ();
2059 /* Collect some information about the various sizes of
2060 allocation. */
2061 fprintf (stderr,
2062 "Memory still allocated at the end of the compilation process\n");
2063 fprintf (stderr, "%-5s %10s %10s %10s\n",
2064 "Size", "Allocated", "Used", "Overhead");
2065 for (i = 0; i < NUM_ORDERS; ++i)
2067 page_entry *p;
2068 size_t allocated;
2069 size_t in_use;
2070 size_t overhead;
2072 /* Skip empty entries. */
2073 if (!G.pages[i])
2074 continue;
2076 overhead = allocated = in_use = 0;
2078 /* Figure out the total number of bytes allocated for objects of
2079 this size, and how many of them are actually in use. Also figure
2080 out how much memory the page table is using. */
2081 for (p = G.pages[i]; p; p = p->next)
2083 allocated += p->bytes;
2084 in_use +=
2085 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2087 overhead += (sizeof (page_entry) - sizeof (long)
2088 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2090 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2091 (unsigned long) OBJECT_SIZE (i),
2092 SCALE (allocated), STAT_LABEL (allocated),
2093 SCALE (in_use), STAT_LABEL (in_use),
2094 SCALE (overhead), STAT_LABEL (overhead));
2095 total_overhead += overhead;
2097 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2098 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2099 SCALE (G.allocated), STAT_LABEL(G.allocated),
2100 SCALE (total_overhead), STAT_LABEL (total_overhead));
2102 #ifdef GATHER_STATISTICS
2104 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2106 fprintf (stderr, "Total Overhead: %10lld\n",
2107 G.stats.total_overhead);
2108 fprintf (stderr, "Total Allocated: %10lld\n",
2109 G.stats.total_allocated);
2111 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2112 G.stats.total_overhead_under32);
2113 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2114 G.stats.total_allocated_under32);
2115 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2116 G.stats.total_overhead_under64);
2117 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2118 G.stats.total_allocated_under64);
2119 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2120 G.stats.total_overhead_under128);
2121 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2122 G.stats.total_allocated_under128);
2124 for (i = 0; i < NUM_ORDERS; i++)
2125 if (G.stats.total_allocated_per_order[i])
2127 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
2128 (unsigned long) OBJECT_SIZE (i),
2129 G.stats.total_overhead_per_order[i]);
2130 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
2131 (unsigned long) OBJECT_SIZE (i),
2132 G.stats.total_allocated_per_order[i]);
2135 #endif
2138 struct ggc_pch_ondisk
2140 unsigned totals[NUM_ORDERS];
2143 struct ggc_pch_data
2145 struct ggc_pch_ondisk d;
2146 size_t base[NUM_ORDERS];
2147 size_t written[NUM_ORDERS];
2150 struct ggc_pch_data *
2151 init_ggc_pch (void)
2153 return XCNEW (struct ggc_pch_data);
2156 void
2157 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2158 size_t size, bool is_string ATTRIBUTE_UNUSED,
2159 enum gt_types_enum type ATTRIBUTE_UNUSED)
2161 unsigned order;
2163 if (size < NUM_SIZE_LOOKUP)
2164 order = size_lookup[size];
2165 else
2167 order = 10;
2168 while (size > OBJECT_SIZE (order))
2169 order++;
2172 d->d.totals[order]++;
2175 size_t
2176 ggc_pch_total_size (struct ggc_pch_data *d)
2178 size_t a = 0;
2179 unsigned i;
2181 for (i = 0; i < NUM_ORDERS; i++)
2182 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2183 return a;
2186 void
2187 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2189 size_t a = (size_t) base;
2190 unsigned i;
2192 for (i = 0; i < NUM_ORDERS; i++)
2194 d->base[i] = a;
2195 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2200 char *
2201 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2202 size_t size, bool is_string ATTRIBUTE_UNUSED,
2203 enum gt_types_enum type ATTRIBUTE_UNUSED)
2205 unsigned order;
2206 char *result;
2208 if (size < NUM_SIZE_LOOKUP)
2209 order = size_lookup[size];
2210 else
2212 order = 10;
2213 while (size > OBJECT_SIZE (order))
2214 order++;
2217 result = (char *) d->base[order];
2218 d->base[order] += OBJECT_SIZE (order);
2219 return result;
2222 void
2223 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2224 FILE *f ATTRIBUTE_UNUSED)
2226 /* Nothing to do. */
2229 void
2230 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2231 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2232 size_t size, bool is_string ATTRIBUTE_UNUSED)
2234 unsigned order;
2235 static const char emptyBytes[256] = { 0 };
2237 if (size < NUM_SIZE_LOOKUP)
2238 order = size_lookup[size];
2239 else
2241 order = 10;
2242 while (size > OBJECT_SIZE (order))
2243 order++;
2246 if (fwrite (x, size, 1, f) != 1)
2247 fatal_error ("can%'t write PCH file: %m");
2249 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2250 object out to OBJECT_SIZE(order). This happens for strings. */
2252 if (size != OBJECT_SIZE (order))
2254 unsigned padding = OBJECT_SIZE(order) - size;
2256 /* To speed small writes, we use a nulled-out array that's larger
2257 than most padding requests as the source for our null bytes. This
2258 permits us to do the padding with fwrite() rather than fseek(), and
2259 limits the chance the OS may try to flush any outstanding writes. */
2260 if (padding <= sizeof(emptyBytes))
2262 if (fwrite (emptyBytes, 1, padding, f) != padding)
2263 fatal_error ("can%'t write PCH file");
2265 else
2267 /* Larger than our buffer? Just default to fseek. */
2268 if (fseek (f, padding, SEEK_CUR) != 0)
2269 fatal_error ("can%'t write PCH file");
2273 d->written[order]++;
2274 if (d->written[order] == d->d.totals[order]
2275 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2276 G.pagesize),
2277 SEEK_CUR) != 0)
2278 fatal_error ("can%'t write PCH file: %m");
2281 void
2282 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2284 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2285 fatal_error ("can%'t write PCH file: %m");
2286 free (d);
2289 /* Move the PCH PTE entries just added to the end of by_depth, to the
2290 front. */
2292 static void
2293 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2295 unsigned i;
2297 /* First, we swap the new entries to the front of the varrays. */
2298 page_entry **new_by_depth;
2299 unsigned long **new_save_in_use;
2301 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2302 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2304 memcpy (&new_by_depth[0],
2305 &G.by_depth[count_old_page_tables],
2306 count_new_page_tables * sizeof (void *));
2307 memcpy (&new_by_depth[count_new_page_tables],
2308 &G.by_depth[0],
2309 count_old_page_tables * sizeof (void *));
2310 memcpy (&new_save_in_use[0],
2311 &G.save_in_use[count_old_page_tables],
2312 count_new_page_tables * sizeof (void *));
2313 memcpy (&new_save_in_use[count_new_page_tables],
2314 &G.save_in_use[0],
2315 count_old_page_tables * sizeof (void *));
2317 free (G.by_depth);
2318 free (G.save_in_use);
2320 G.by_depth = new_by_depth;
2321 G.save_in_use = new_save_in_use;
2323 /* Now update all the index_by_depth fields. */
2324 for (i = G.by_depth_in_use; i > 0; --i)
2326 page_entry *p = G.by_depth[i-1];
2327 p->index_by_depth = i-1;
2330 /* And last, we update the depth pointers in G.depth. The first
2331 entry is already 0, and context 0 entries always start at index
2332 0, so there is nothing to update in the first slot. We need a
2333 second slot, only if we have old ptes, and if we do, they start
2334 at index count_new_page_tables. */
2335 if (count_old_page_tables)
2336 push_depth (count_new_page_tables);
2339 void
2340 ggc_pch_read (FILE *f, void *addr)
2342 struct ggc_pch_ondisk d;
2343 unsigned i;
2344 char *offs = (char *) addr;
2345 unsigned long count_old_page_tables;
2346 unsigned long count_new_page_tables;
2348 count_old_page_tables = G.by_depth_in_use;
2350 /* We've just read in a PCH file. So, every object that used to be
2351 allocated is now free. */
2352 clear_marks ();
2353 #ifdef ENABLE_GC_CHECKING
2354 poison_pages ();
2355 #endif
2356 /* Since we free all the allocated objects, the free list becomes
2357 useless. Validate it now, which will also clear it. */
2358 validate_free_objects();
2360 /* No object read from a PCH file should ever be freed. So, set the
2361 context depth to 1, and set the depth of all the currently-allocated
2362 pages to be 1 too. PCH pages will have depth 0. */
2363 gcc_assert (!G.context_depth);
2364 G.context_depth = 1;
2365 for (i = 0; i < NUM_ORDERS; i++)
2367 page_entry *p;
2368 for (p = G.pages[i]; p != NULL; p = p->next)
2369 p->context_depth = G.context_depth;
2372 /* Allocate the appropriate page-table entries for the pages read from
2373 the PCH file. */
2374 if (fread (&d, sizeof (d), 1, f) != 1)
2375 fatal_error ("can%'t read PCH file: %m");
2377 for (i = 0; i < NUM_ORDERS; i++)
2379 struct page_entry *entry;
2380 char *pte;
2381 size_t bytes;
2382 size_t num_objs;
2383 size_t j;
2385 if (d.totals[i] == 0)
2386 continue;
2388 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2389 num_objs = bytes / OBJECT_SIZE (i);
2390 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2391 - sizeof (long)
2392 + BITMAP_SIZE (num_objs + 1)));
2393 entry->bytes = bytes;
2394 entry->page = offs;
2395 entry->context_depth = 0;
2396 offs += bytes;
2397 entry->num_free_objects = 0;
2398 entry->order = i;
2400 for (j = 0;
2401 j + HOST_BITS_PER_LONG <= num_objs + 1;
2402 j += HOST_BITS_PER_LONG)
2403 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2404 for (; j < num_objs + 1; j++)
2405 entry->in_use_p[j / HOST_BITS_PER_LONG]
2406 |= 1L << (j % HOST_BITS_PER_LONG);
2408 for (pte = entry->page;
2409 pte < entry->page + entry->bytes;
2410 pte += G.pagesize)
2411 set_page_table_entry (pte, entry);
2413 if (G.page_tails[i] != NULL)
2414 G.page_tails[i]->next = entry;
2415 else
2416 G.pages[i] = entry;
2417 G.page_tails[i] = entry;
2419 /* We start off by just adding all the new information to the
2420 end of the varrays, later, we will move the new information
2421 to the front of the varrays, as the PCH page tables are at
2422 context 0. */
2423 push_by_depth (entry, 0);
2426 /* Now, we update the various data structures that speed page table
2427 handling. */
2428 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2430 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2432 /* Update the statistics. */
2433 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2436 struct alloc_zone
2438 int dummy;
2441 struct alloc_zone rtl_zone;
2442 struct alloc_zone tree_zone;
2443 struct alloc_zone tree_id_zone;