1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
41 # undef HAVE_MMAP_DEV_ZERO
45 #ifdef HAVE_MMAP_DEV_ZERO
50 #define USING_MALLOC_PAGE_GROUPS
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
95 #ifndef HOST_BITS_PER_PTR
96 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 /* A two-level tree is used to look up the page-entry for a given
101 pointer. Two chunks of the pointer's bits are extracted to index
102 the first and second levels of the tree, as follows:
106 msb +----------------+----+------+------+ lsb
112 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
113 pages are aligned on system page boundaries. The next most
114 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
115 index values in the lookup table, respectively.
117 For 32-bit architectures and the settings below, there are no
118 leftover bits. For architectures with wider pointers, the lookup
119 tree points to a list of pages, which must be scanned to find the
122 #define PAGE_L1_BITS (8)
123 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
124 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
125 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
127 #define LOOKUP_L1(p) \
128 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
130 #define LOOKUP_L2(p) \
131 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
133 /* The number of objects per allocation page, for objects on a page of
134 the indicated ORDER. */
135 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
137 /* The number of objects in P. */
138 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
140 /* The size of an object on a page of the indicated ORDER. */
141 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
143 /* For speed, we avoid doing a general integer divide to locate the
144 offset in the allocation bitmap, by precalculating numbers M, S
145 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
146 within the page which is evenly divisible by the object size Z. */
147 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
148 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
149 #define OFFSET_TO_BIT(OFFSET, ORDER) \
150 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
152 /* We use this structure to determine the alignment required for
153 allocations. For power-of-two sized allocations, that's not a
154 problem, but it does matter for odd-sized allocations.
155 We do not care about alignment for floating-point types. */
157 struct max_alignment
{
165 /* The biggest alignment required. */
167 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
170 /* The number of extra orders, not corresponding to power-of-two sized
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175 #define RTL_SIZE(NSLOTS) \
176 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
178 #define TREE_EXP_SIZE(OPS) \
179 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
181 /* The Ith entry is the maximum size of an object to be stored in the
182 Ith extra order. Adding a new entry to this array is the *only*
183 thing you need to do to add a new special allocation size. */
185 static const size_t extra_order_size_table
[] = {
186 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
187 There are a lot of structures with these sizes and explicitly
188 listing them risks orders being dropped because they changed size. */
200 sizeof (struct tree_decl_non_common
),
201 sizeof (struct tree_field_decl
),
202 sizeof (struct tree_parm_decl
),
203 sizeof (struct tree_var_decl
),
204 sizeof (struct tree_type_non_common
),
205 sizeof (struct function
),
206 sizeof (struct basic_block_def
),
207 sizeof (struct cgraph_node
),
208 sizeof (struct loop
),
211 /* The total number of orders. */
213 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
215 /* Compute the smallest nonnegative number which when added to X gives
218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
220 /* Round X to next multiple of the page size */
222 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
224 /* The Ith entry is the number of objects on a page or order I. */
226 static unsigned objects_per_page_table
[NUM_ORDERS
];
228 /* The Ith entry is the size of an object on a page of order I. */
230 static size_t object_size_table
[NUM_ORDERS
];
232 /* The Ith entry is a pair of numbers (mult, shift) such that
233 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
234 for all k evenly divisible by OBJECT_SIZE(I). */
241 inverse_table
[NUM_ORDERS
];
243 /* A page_entry records the status of an allocation page. This
244 structure is dynamically sized to fit the bitmap in_use_p. */
247 /* The next page-entry with objects of the same size, or NULL if
248 this is the last page-entry. */
249 struct page_entry
*next
;
251 /* The previous page-entry with objects of the same size, or NULL if
252 this is the first page-entry. The PREV pointer exists solely to
253 keep the cost of ggc_free manageable. */
254 struct page_entry
*prev
;
256 /* The number of bytes allocated. (This will always be a multiple
257 of the host system page size.) */
260 /* The address at which the memory is allocated. */
263 #ifdef USING_MALLOC_PAGE_GROUPS
264 /* Back pointer to the page group this page came from. */
265 struct page_group
*group
;
268 /* This is the index in the by_depth varray where this page table
270 unsigned long index_by_depth
;
272 /* Context depth of this page. */
273 unsigned short context_depth
;
275 /* The number of free objects remaining on this page. */
276 unsigned short num_free_objects
;
278 /* A likely candidate for the bit position of a free object for the
279 next allocation from this page. */
280 unsigned short next_bit_hint
;
282 /* The lg of size of objects allocated from this page. */
285 /* Discarded page? */
288 /* A bit vector indicating whether or not objects are in use. The
289 Nth bit is one if the Nth object on this page is allocated. This
290 array is dynamically sized. */
291 unsigned long in_use_p
[1];
294 #ifdef USING_MALLOC_PAGE_GROUPS
295 /* A page_group describes a large allocation from malloc, from which
296 we parcel out aligned pages. */
299 /* A linked list of all extant page groups. */
300 struct page_group
*next
;
302 /* The address we received from malloc. */
305 /* The size of the block. */
308 /* A bitmask of pages in use. */
313 #if HOST_BITS_PER_PTR <= 32
315 /* On 32-bit hosts, we use a two level page table, as pictured above. */
316 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
320 /* On 64-bit hosts, we use the same two level page tables plus a linked
321 list that disambiguates the top 32-bits. There will almost always be
322 exactly one entry in the list. */
323 typedef struct page_table_chain
325 struct page_table_chain
*next
;
327 page_entry
**table
[PAGE_L1_SIZE
];
335 finalizer (void *addr
, void (*f
)(void *)) : m_addr (addr
), m_function (f
) {}
337 void *addr () const { return m_addr
; }
339 void call () const { m_function (m_addr
); }
343 void (*m_function
)(void *);
349 vec_finalizer (uintptr_t addr
, void (*f
)(void *), size_t s
, size_t n
) :
350 m_addr (addr
), m_function (f
), m_object_size (s
), m_n_objects (n
) {}
354 for (size_t i
= 0; i
< m_n_objects
; i
++)
355 m_function (reinterpret_cast<void *> (m_addr
+ (i
* m_object_size
)));
358 void *addr () const { return reinterpret_cast<void *> (m_addr
); }
362 void (*m_function
)(void *);
363 size_t m_object_size
;
367 #ifdef ENABLE_GC_ALWAYS_COLLECT
368 /* List of free objects to be verified as actually free on the
373 struct free_object
*next
;
377 /* The rest of the global variables. */
378 static struct ggc_globals
380 /* The Nth element in this array is a page with objects of size 2^N.
381 If there are any pages with free objects, they will be at the
382 head of the list. NULL if there are no page-entries for this
384 page_entry
*pages
[NUM_ORDERS
];
386 /* The Nth element in this array is the last page with objects of
387 size 2^N. NULL if there are no page-entries for this object
389 page_entry
*page_tails
[NUM_ORDERS
];
391 /* Lookup table for associating allocation pages with object addresses. */
394 /* The system's page size. */
398 /* Bytes currently allocated. */
401 /* Bytes currently allocated at the end of the last collection. */
402 size_t allocated_last_gc
;
404 /* Total amount of memory mapped. */
407 /* Bit N set if any allocations have been done at context depth N. */
408 unsigned long context_depth_allocations
;
410 /* Bit N set if any collections have been done at context depth N. */
411 unsigned long context_depth_collections
;
413 /* The current depth in the context stack. */
414 unsigned short context_depth
;
416 /* A file descriptor open to /dev/zero for reading. */
417 #if defined (HAVE_MMAP_DEV_ZERO)
421 /* A cache of free system pages. */
422 page_entry
*free_pages
;
424 #ifdef USING_MALLOC_PAGE_GROUPS
425 page_group
*page_groups
;
428 /* The file descriptor for debugging output. */
431 /* Current number of elements in use in depth below. */
432 unsigned int depth_in_use
;
434 /* Maximum number of elements that can be used before resizing. */
435 unsigned int depth_max
;
437 /* Each element of this array is an index in by_depth where the given
438 depth starts. This structure is indexed by that given depth we
439 are interested in. */
442 /* Current number of elements in use in by_depth below. */
443 unsigned int by_depth_in_use
;
445 /* Maximum number of elements that can be used before resizing. */
446 unsigned int by_depth_max
;
448 /* Each element of this array is a pointer to a page_entry, all
449 page_entries can be found in here by increasing depth.
450 index_by_depth in the page_entry is the index into this data
451 structure where that page_entry can be found. This is used to
452 speed up finding all page_entries at a particular depth. */
453 page_entry
**by_depth
;
455 /* Each element is a pointer to the saved in_use_p bits, if any,
456 zero otherwise. We allocate them all together, to enable a
457 better runtime data access pattern. */
458 unsigned long **save_in_use
;
460 /* Finalizers for single objects. The first index is collection_depth. */
461 vec
<vec
<finalizer
> > finalizers
;
463 /* Finalizers for vectors of objects. */
464 vec
<vec
<vec_finalizer
> > vec_finalizers
;
466 #ifdef ENABLE_GC_ALWAYS_COLLECT
467 /* List of free objects to be verified as actually free on the
469 struct free_object
*free_object_list
;
474 /* Total GC-allocated memory. */
475 unsigned long long total_allocated
;
476 /* Total overhead for GC-allocated memory. */
477 unsigned long long total_overhead
;
479 /* Total allocations and overhead for sizes less than 32, 64 and 128.
480 These sizes are interesting because they are typical cache line
483 unsigned long long total_allocated_under32
;
484 unsigned long long total_overhead_under32
;
486 unsigned long long total_allocated_under64
;
487 unsigned long long total_overhead_under64
;
489 unsigned long long total_allocated_under128
;
490 unsigned long long total_overhead_under128
;
492 /* The allocations for each of the allocation orders. */
493 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
495 /* The overhead for each of the allocation orders. */
496 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
500 /* True if a gc is currently taking place. */
502 static bool in_gc
= false;
504 /* The size in bytes required to maintain a bitmap for the objects
506 #define BITMAP_SIZE(Num_objects) \
507 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
509 /* Allocate pages in chunks of this size, to throttle calls to memory
510 allocation routines. The first page is used, the rest go onto the
511 free list. This cannot be larger than HOST_BITS_PER_INT for the
512 in_use bitmask for page_group. Hosts that need a different value
513 can override this by defining GGC_QUIRE_SIZE explicitly. */
514 #ifndef GGC_QUIRE_SIZE
516 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
518 # define GGC_QUIRE_SIZE 16
522 /* Initial guess as to how many page table entries we might need. */
523 #define INITIAL_PTE_COUNT 128
525 static page_entry
*lookup_page_table_entry (const void *);
526 static void set_page_table_entry (void *, page_entry
*);
528 static char *alloc_anon (char *, size_t, bool check
);
530 #ifdef USING_MALLOC_PAGE_GROUPS
531 static size_t page_group_index (char *, char *);
532 static void set_page_group_in_use (page_group
*, char *);
533 static void clear_page_group_in_use (page_group
*, char *);
535 static struct page_entry
* alloc_page (unsigned);
536 static void free_page (struct page_entry
*);
537 static void release_pages (void);
538 static void clear_marks (void);
539 static void sweep_pages (void);
540 static void ggc_recalculate_in_use_p (page_entry
*);
541 static void compute_inverse (unsigned);
542 static inline void adjust_depth (void);
543 static void move_ptes_to_front (int, int);
545 void debug_print_page_list (int);
546 static void push_depth (unsigned int);
547 static void push_by_depth (page_entry
*, unsigned long *);
549 /* Push an entry onto G.depth. */
552 push_depth (unsigned int i
)
554 if (G
.depth_in_use
>= G
.depth_max
)
557 G
.depth
= XRESIZEVEC (unsigned int, G
.depth
, G
.depth_max
);
559 G
.depth
[G
.depth_in_use
++] = i
;
562 /* Push an entry onto G.by_depth and G.save_in_use. */
565 push_by_depth (page_entry
*p
, unsigned long *s
)
567 if (G
.by_depth_in_use
>= G
.by_depth_max
)
570 G
.by_depth
= XRESIZEVEC (page_entry
*, G
.by_depth
, G
.by_depth_max
);
571 G
.save_in_use
= XRESIZEVEC (unsigned long *, G
.save_in_use
,
574 G
.by_depth
[G
.by_depth_in_use
] = p
;
575 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
578 #if (GCC_VERSION < 3001)
579 #define prefetch(X) ((void) X)
581 #define prefetch(X) __builtin_prefetch (X)
584 #define save_in_use_p_i(__i) \
586 #define save_in_use_p(__p) \
587 (save_in_use_p_i (__p->index_by_depth))
589 /* Traverse the page table and find the entry for a page.
590 If the object wasn't allocated in GC return NULL. */
592 static inline page_entry
*
593 safe_lookup_page_table_entry (const void *p
)
598 #if HOST_BITS_PER_PTR <= 32
601 page_table table
= G
.lookup
;
602 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
607 if (table
->high_bits
== high_bits
)
611 base
= &table
->table
[0];
614 /* Extract the level 1 and 2 indices. */
623 /* Traverse the page table and find the entry for a page.
624 Die (probably) if the object wasn't allocated via GC. */
626 static inline page_entry
*
627 lookup_page_table_entry (const void *p
)
632 #if HOST_BITS_PER_PTR <= 32
635 page_table table
= G
.lookup
;
636 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
637 while (table
->high_bits
!= high_bits
)
639 base
= &table
->table
[0];
642 /* Extract the level 1 and 2 indices. */
649 /* Set the page table entry for a page. */
652 set_page_table_entry (void *p
, page_entry
*entry
)
657 #if HOST_BITS_PER_PTR <= 32
661 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
662 for (table
= G
.lookup
; table
; table
= table
->next
)
663 if (table
->high_bits
== high_bits
)
666 /* Not found -- allocate a new table. */
667 table
= XCNEW (struct page_table_chain
);
668 table
->next
= G
.lookup
;
669 table
->high_bits
= high_bits
;
672 base
= &table
->table
[0];
675 /* Extract the level 1 and 2 indices. */
679 if (base
[L1
] == NULL
)
680 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
682 base
[L1
][L2
] = entry
;
685 /* Prints the page-entry for object size ORDER, for debugging. */
688 debug_print_page_list (int order
)
691 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
692 (void *) G
.page_tails
[order
]);
696 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
697 p
->num_free_objects
);
705 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
706 (if non-null). The ifdef structure here is intended to cause a
707 compile error unless exactly one of the HAVE_* is defined. */
710 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, bool check
)
712 #ifdef HAVE_MMAP_ANON
713 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
714 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
716 #ifdef HAVE_MMAP_DEV_ZERO
717 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
718 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
721 if (page
== (char *) MAP_FAILED
)
725 perror ("virtual memory exhausted");
726 exit (FATAL_EXIT_CODE
);
729 /* Remember that we allocated this memory. */
730 G
.bytes_mapped
+= size
;
732 /* Pretend we don't have access to the allocated pages. We'll enable
733 access to smaller pieces of the area in ggc_internal_alloc. Discard the
734 handle to avoid handle leak. */
735 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
740 #ifdef USING_MALLOC_PAGE_GROUPS
741 /* Compute the index for this page into the page group. */
744 page_group_index (char *allocation
, char *page
)
746 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
749 /* Set and clear the in_use bit for this page in the page group. */
752 set_page_group_in_use (page_group
*group
, char *page
)
754 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
758 clear_page_group_in_use (page_group
*group
, char *page
)
760 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
764 /* Allocate a new page for allocating objects of size 2^ORDER,
765 and return an entry for it. The entry is not added to the
766 appropriate page_table list. */
768 static inline struct page_entry
*
769 alloc_page (unsigned order
)
771 struct page_entry
*entry
, *p
, **pp
;
775 size_t page_entry_size
;
777 #ifdef USING_MALLOC_PAGE_GROUPS
781 num_objects
= OBJECTS_PER_PAGE (order
);
782 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
783 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
784 entry_size
= num_objects
* OBJECT_SIZE (order
);
785 if (entry_size
< G
.pagesize
)
786 entry_size
= G
.pagesize
;
787 entry_size
= PAGE_ALIGN (entry_size
);
792 /* Check the list of free pages for one we can use. */
793 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
794 if (p
->bytes
== entry_size
)
800 G
.bytes_mapped
+= p
->bytes
;
801 p
->discarded
= false;
803 /* Recycle the allocated memory from this page ... */
807 #ifdef USING_MALLOC_PAGE_GROUPS
811 /* ... and, if possible, the page entry itself. */
812 if (p
->order
== order
)
815 memset (entry
, 0, page_entry_size
);
821 else if (entry_size
== G
.pagesize
)
823 /* We want just one page. Allocate a bunch of them and put the
824 extras on the freelist. (Can only do this optimization with
825 mmap for backing store.) */
826 struct page_entry
*e
, *f
= G
.free_pages
;
827 int i
, entries
= GGC_QUIRE_SIZE
;
829 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, false);
832 page
= alloc_anon (NULL
, G
.pagesize
, true);
836 /* This loop counts down so that the chain will be in ascending
838 for (i
= entries
- 1; i
>= 1; i
--)
840 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
842 e
->bytes
= G
.pagesize
;
843 e
->page
= page
+ (i
<< G
.lg_pagesize
);
851 page
= alloc_anon (NULL
, entry_size
, true);
853 #ifdef USING_MALLOC_PAGE_GROUPS
856 /* Allocate a large block of memory and serve out the aligned
857 pages therein. This results in much less memory wastage
858 than the traditional implementation of valloc. */
860 char *allocation
, *a
, *enda
;
861 size_t alloc_size
, head_slop
, tail_slop
;
862 int multiple_pages
= (entry_size
== G
.pagesize
);
865 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
867 alloc_size
= entry_size
+ G
.pagesize
- 1;
868 allocation
= XNEWVEC (char, alloc_size
);
870 page
= (char *) (((uintptr_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
871 head_slop
= page
- allocation
;
873 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
875 tail_slop
= alloc_size
- entry_size
- head_slop
;
876 enda
= allocation
+ alloc_size
- tail_slop
;
878 /* We allocated N pages, which are likely not aligned, leaving
879 us with N-1 usable pages. We plan to place the page_group
880 structure somewhere in the slop. */
881 if (head_slop
>= sizeof (page_group
))
882 group
= (page_group
*)page
- 1;
885 /* We magically got an aligned allocation. Too bad, we have
886 to waste a page anyway. */
890 tail_slop
+= G
.pagesize
;
892 gcc_assert (tail_slop
>= sizeof (page_group
));
893 group
= (page_group
*)enda
;
894 tail_slop
-= sizeof (page_group
);
897 /* Remember that we allocated this memory. */
898 group
->next
= G
.page_groups
;
899 group
->allocation
= allocation
;
900 group
->alloc_size
= alloc_size
;
902 G
.page_groups
= group
;
903 G
.bytes_mapped
+= alloc_size
;
905 /* If we allocated multiple pages, put the rest on the free list. */
908 struct page_entry
*e
, *f
= G
.free_pages
;
909 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
911 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
913 e
->bytes
= G
.pagesize
;
925 entry
= XCNEWVAR (struct page_entry
, page_entry_size
);
927 entry
->bytes
= entry_size
;
929 entry
->context_depth
= G
.context_depth
;
930 entry
->order
= order
;
931 entry
->num_free_objects
= num_objects
;
932 entry
->next_bit_hint
= 1;
934 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
936 #ifdef USING_MALLOC_PAGE_GROUPS
937 entry
->group
= group
;
938 set_page_group_in_use (group
, page
);
941 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
942 increment the hint. */
943 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
944 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
946 set_page_table_entry (page
, entry
);
948 if (GGC_DEBUG_LEVEL
>= 2)
949 fprintf (G
.debug_file
,
950 "Allocating page at %p, object size=%lu, data %p-%p\n",
951 (void *) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
952 page
+ entry_size
- 1);
957 /* Adjust the size of G.depth so that no index greater than the one
958 used by the top of the G.by_depth is used. */
965 if (G
.by_depth_in_use
)
967 top
= G
.by_depth
[G
.by_depth_in_use
-1];
969 /* Peel back indices in depth that index into by_depth, so that
970 as new elements are added to by_depth, we note the indices
971 of those elements, if they are for new context depths. */
972 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
977 /* For a page that is no longer needed, put it on the free page list. */
980 free_page (page_entry
*entry
)
982 if (GGC_DEBUG_LEVEL
>= 2)
983 fprintf (G
.debug_file
,
984 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
985 entry
->page
, entry
->page
+ entry
->bytes
- 1);
987 /* Mark the page as inaccessible. Discard the handle to avoid handle
989 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->page
, entry
->bytes
));
991 set_page_table_entry (entry
->page
, NULL
);
993 #ifdef USING_MALLOC_PAGE_GROUPS
994 clear_page_group_in_use (entry
->group
, entry
->page
);
997 if (G
.by_depth_in_use
> 1)
999 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
1000 int i
= entry
->index_by_depth
;
1002 /* We cannot free a page from a context deeper than the current
1004 gcc_assert (entry
->context_depth
== top
->context_depth
);
1006 /* Put top element into freed slot. */
1007 G
.by_depth
[i
] = top
;
1008 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
1009 top
->index_by_depth
= i
;
1011 --G
.by_depth_in_use
;
1015 entry
->next
= G
.free_pages
;
1016 G
.free_pages
= entry
;
1019 /* Release the free page cache to the system. */
1022 release_pages (void)
1024 #ifdef USING_MADVISE
1025 page_entry
*p
, *start_p
;
1029 page_entry
*next
, *prev
, *newprev
;
1030 size_t free_unit
= (GGC_QUIRE_SIZE
/2) * G
.pagesize
;
1032 /* First free larger continuous areas to the OS.
1033 This allows other allocators to grab these areas if needed.
1034 This is only done on larger chunks to avoid fragmentation.
1035 This does not always work because the free_pages list is only
1036 approximately sorted. */
1047 while (p
&& p
->page
== start
+ len
)
1051 mapped_len
+= p
->bytes
;
1055 if (len
>= free_unit
)
1057 while (start_p
!= p
)
1059 next
= start_p
->next
;
1063 munmap (start
, len
);
1068 G
.bytes_mapped
-= mapped_len
;
1074 /* Now give back the fragmented pages to the OS, but keep the address
1075 space to reuse it next time. */
1077 for (p
= G
.free_pages
; p
; )
1088 while (p
&& p
->page
== start
+ len
)
1093 /* Give the page back to the kernel, but don't free the mapping.
1094 This avoids fragmentation in the virtual memory map of the
1095 process. Next time we can reuse it by just touching it. */
1096 madvise (start
, len
, MADV_DONTNEED
);
1097 /* Don't count those pages as mapped to not touch the garbage collector
1099 G
.bytes_mapped
-= len
;
1100 while (start_p
!= p
)
1102 start_p
->discarded
= true;
1103 start_p
= start_p
->next
;
1107 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1108 page_entry
*p
, *next
;
1112 /* Gather up adjacent pages so they are unmapped together. */
1123 while (p
&& p
->page
== start
+ len
)
1131 munmap (start
, len
);
1132 G
.bytes_mapped
-= len
;
1135 G
.free_pages
= NULL
;
1137 #ifdef USING_MALLOC_PAGE_GROUPS
1138 page_entry
**pp
, *p
;
1139 page_group
**gp
, *g
;
1141 /* Remove all pages from free page groups from the list. */
1143 while ((p
= *pp
) != NULL
)
1144 if (p
->group
->in_use
== 0)
1152 /* Remove all free page groups, and release the storage. */
1153 gp
= &G
.page_groups
;
1154 while ((g
= *gp
) != NULL
)
1158 G
.bytes_mapped
-= g
->alloc_size
;
1159 free (g
->allocation
);
1166 /* This table provides a fast way to determine ceil(log_2(size)) for
1167 allocation requests. The minimum allocation size is eight bytes. */
1168 #define NUM_SIZE_LOOKUP 512
1169 static unsigned char size_lookup
[NUM_SIZE_LOOKUP
] =
1171 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1172 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1173 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1174 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1175 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1176 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1177 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1178 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1179 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1181 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1182 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1183 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1184 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1185 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1186 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1187 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1205 /* For a given size of memory requested for allocation, return the
1206 actual size that is going to be allocated, as well as the size
1210 ggc_round_alloc_size_1 (size_t requested_size
,
1212 size_t *alloced_size
)
1214 size_t order
, object_size
;
1216 if (requested_size
< NUM_SIZE_LOOKUP
)
1218 order
= size_lookup
[requested_size
];
1219 object_size
= OBJECT_SIZE (order
);
1224 while (requested_size
> (object_size
= OBJECT_SIZE (order
)))
1229 *size_order
= order
;
1231 *alloced_size
= object_size
;
1234 /* For a given size of memory requested for allocation, return the
1235 actual size that is going to be allocated. */
1238 ggc_round_alloc_size (size_t requested_size
)
1242 ggc_round_alloc_size_1 (requested_size
, NULL
, &size
);
1246 /* Push a finalizer onto the appropriate vec. */
1249 add_finalizer (void *result
, void (*f
)(void *), size_t s
, size_t n
)
1252 /* No finalizer. */;
1255 finalizer
fin (result
, f
);
1256 G
.finalizers
[G
.context_depth
].safe_push (fin
);
1260 vec_finalizer
fin (reinterpret_cast<uintptr_t> (result
), f
, s
, n
);
1261 G
.vec_finalizers
[G
.context_depth
].safe_push (fin
);
1265 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1268 ggc_internal_alloc (size_t size
, void (*f
)(void *), size_t s
, size_t n
1271 size_t order
, word
, bit
, object_offset
, object_size
;
1272 struct page_entry
*entry
;
1275 ggc_round_alloc_size_1 (size
, &order
, &object_size
);
1277 /* If there are non-full pages for this size allocation, they are at
1278 the head of the list. */
1279 entry
= G
.pages
[order
];
1281 /* If there is no page for this object size, or all pages in this
1282 context are full, allocate a new page. */
1283 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1285 struct page_entry
*new_entry
;
1286 new_entry
= alloc_page (order
);
1288 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1289 push_by_depth (new_entry
, 0);
1291 /* We can skip context depths, if we do, make sure we go all the
1292 way to the new depth. */
1293 while (new_entry
->context_depth
>= G
.depth_in_use
)
1294 push_depth (G
.by_depth_in_use
-1);
1296 /* If this is the only entry, it's also the tail. If it is not
1297 the only entry, then we must update the PREV pointer of the
1298 ENTRY (G.pages[order]) to point to our new page entry. */
1300 G
.page_tails
[order
] = new_entry
;
1302 entry
->prev
= new_entry
;
1304 /* Put new pages at the head of the page list. By definition the
1305 entry at the head of the list always has a NULL pointer. */
1306 new_entry
->next
= entry
;
1307 new_entry
->prev
= NULL
;
1309 G
.pages
[order
] = new_entry
;
1311 /* For a new page, we know the word and bit positions (in the
1312 in_use bitmap) of the first available object -- they're zero. */
1313 new_entry
->next_bit_hint
= 1;
1320 /* First try to use the hint left from the previous allocation
1321 to locate a clear bit in the in-use bitmap. We've made sure
1322 that the one-past-the-end bit is always set, so if the hint
1323 has run over, this test will fail. */
1324 unsigned hint
= entry
->next_bit_hint
;
1325 word
= hint
/ HOST_BITS_PER_LONG
;
1326 bit
= hint
% HOST_BITS_PER_LONG
;
1328 /* If the hint didn't work, scan the bitmap from the beginning. */
1329 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1332 while (~entry
->in_use_p
[word
] == 0)
1335 #if GCC_VERSION >= 3004
1336 bit
= __builtin_ctzl (~entry
->in_use_p
[word
]);
1338 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1342 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1345 /* Next time, try the next bit. */
1346 entry
->next_bit_hint
= hint
+ 1;
1348 object_offset
= hint
* object_size
;
1351 /* Set the in-use bit. */
1352 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1354 /* Keep a running total of the number of free objects. If this page
1355 fills up, we may have to move it to the end of the list if the
1356 next page isn't full. If the next page is full, all subsequent
1357 pages are full, so there's no need to move it. */
1358 if (--entry
->num_free_objects
== 0
1359 && entry
->next
!= NULL
1360 && entry
->next
->num_free_objects
> 0)
1362 /* We have a new head for the list. */
1363 G
.pages
[order
] = entry
->next
;
1365 /* We are moving ENTRY to the end of the page table list.
1366 The new page at the head of the list will have NULL in
1367 its PREV field and ENTRY will have NULL in its NEXT field. */
1368 entry
->next
->prev
= NULL
;
1371 /* Append ENTRY to the tail of the list. */
1372 entry
->prev
= G
.page_tails
[order
];
1373 G
.page_tails
[order
]->next
= entry
;
1374 G
.page_tails
[order
] = entry
;
1377 /* Calculate the object's address. */
1378 result
= entry
->page
+ object_offset
;
1379 if (GATHER_STATISTICS
)
1380 ggc_record_overhead (OBJECT_SIZE (order
), OBJECT_SIZE (order
) - size
,
1381 result FINAL_PASS_MEM_STAT
);
1383 #ifdef ENABLE_GC_CHECKING
1384 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1385 exact same semantics in presence of memory bugs, regardless of
1386 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1387 handle to avoid handle leak. */
1388 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, object_size
));
1390 /* `Poison' the entire allocated object, including any padding at
1392 memset (result
, 0xaf, object_size
);
1394 /* Make the bytes after the end of the object unaccessible. Discard the
1395 handle to avoid handle leak. */
1396 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result
+ size
,
1397 object_size
- size
));
1400 /* Tell Valgrind that the memory is there, but its content isn't
1401 defined. The bytes at the end of the object are still marked
1403 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1405 /* Keep track of how many bytes are being allocated. This
1406 information is used in deciding when to collect. */
1407 G
.allocated
+= object_size
;
1409 /* For timevar statistics. */
1410 timevar_ggc_mem_total
+= object_size
;
1413 add_finalizer (result
, f
, s
, n
);
1415 if (GATHER_STATISTICS
)
1417 size_t overhead
= object_size
- size
;
1419 G
.stats
.total_overhead
+= overhead
;
1420 G
.stats
.total_allocated
+= object_size
;
1421 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1422 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1426 G
.stats
.total_overhead_under32
+= overhead
;
1427 G
.stats
.total_allocated_under32
+= object_size
;
1431 G
.stats
.total_overhead_under64
+= overhead
;
1432 G
.stats
.total_allocated_under64
+= object_size
;
1436 G
.stats
.total_overhead_under128
+= overhead
;
1437 G
.stats
.total_allocated_under128
+= object_size
;
1441 if (GGC_DEBUG_LEVEL
>= 3)
1442 fprintf (G
.debug_file
,
1443 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1444 (unsigned long) size
, (unsigned long) object_size
, result
,
1450 /* Mark function for strings. */
1453 gt_ggc_m_S (const void *p
)
1458 unsigned long offset
;
1463 /* Look up the page on which the object is alloced. If it was not
1464 GC allocated, gracefully bail out. */
1465 entry
= safe_lookup_page_table_entry (p
);
1469 /* Calculate the index of the object on the page; this is its bit
1470 position in the in_use_p bitmap. Note that because a char* might
1471 point to the middle of an object, we need special code here to
1472 make sure P points to the start of an object. */
1473 offset
= ((const char *) p
- entry
->page
) % object_size_table
[entry
->order
];
1476 /* Here we've seen a char* which does not point to the beginning
1477 of an allocated object. We assume it points to the middle of
1479 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1480 p
= ((const char *) p
) - offset
;
1481 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p
));
1485 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1486 word
= bit
/ HOST_BITS_PER_LONG
;
1487 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1489 /* If the bit was previously set, skip it. */
1490 if (entry
->in_use_p
[word
] & mask
)
1493 /* Otherwise set it, and decrement the free object count. */
1494 entry
->in_use_p
[word
] |= mask
;
1495 entry
->num_free_objects
-= 1;
1497 if (GGC_DEBUG_LEVEL
>= 4)
1498 fprintf (G
.debug_file
, "Marking %p\n", p
);
1504 /* User-callable entry points for marking string X. */
1507 gt_ggc_mx (const char *& x
)
1513 gt_ggc_mx (unsigned char *& x
)
1519 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED
)
1523 /* If P is not marked, marks it and return false. Otherwise return true.
1524 P must have been allocated by the GC allocator; it mustn't point to
1525 static objects, stack variables, or memory allocated with malloc. */
1528 ggc_set_mark (const void *p
)
1534 /* Look up the page on which the object is alloced. If the object
1535 wasn't allocated by the collector, we'll probably die. */
1536 entry
= lookup_page_table_entry (p
);
1539 /* Calculate the index of the object on the page; this is its bit
1540 position in the in_use_p bitmap. */
1541 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1542 word
= bit
/ HOST_BITS_PER_LONG
;
1543 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1545 /* If the bit was previously set, skip it. */
1546 if (entry
->in_use_p
[word
] & mask
)
1549 /* Otherwise set it, and decrement the free object count. */
1550 entry
->in_use_p
[word
] |= mask
;
1551 entry
->num_free_objects
-= 1;
1553 if (GGC_DEBUG_LEVEL
>= 4)
1554 fprintf (G
.debug_file
, "Marking %p\n", p
);
1559 /* Return 1 if P has been marked, zero otherwise.
1560 P must have been allocated by the GC allocator; it mustn't point to
1561 static objects, stack variables, or memory allocated with malloc. */
1564 ggc_marked_p (const void *p
)
1570 /* Look up the page on which the object is alloced. If the object
1571 wasn't allocated by the collector, we'll probably die. */
1572 entry
= lookup_page_table_entry (p
);
1575 /* Calculate the index of the object on the page; this is its bit
1576 position in the in_use_p bitmap. */
1577 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1578 word
= bit
/ HOST_BITS_PER_LONG
;
1579 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1581 return (entry
->in_use_p
[word
] & mask
) != 0;
1584 /* Return the size of the gc-able object P. */
1587 ggc_get_size (const void *p
)
1589 page_entry
*pe
= lookup_page_table_entry (p
);
1590 return OBJECT_SIZE (pe
->order
);
1593 /* Release the memory for object P. */
1601 page_entry
*pe
= lookup_page_table_entry (p
);
1602 size_t order
= pe
->order
;
1603 size_t size
= OBJECT_SIZE (order
);
1605 if (GATHER_STATISTICS
)
1606 ggc_free_overhead (p
);
1608 if (GGC_DEBUG_LEVEL
>= 3)
1609 fprintf (G
.debug_file
,
1610 "Freeing object, actual size=%lu, at %p on %p\n",
1611 (unsigned long) size
, p
, (void *) pe
);
1613 #ifdef ENABLE_GC_CHECKING
1614 /* Poison the data, to indicate the data is garbage. */
1615 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p
, size
));
1616 memset (p
, 0xa5, size
);
1618 /* Let valgrind know the object is free. */
1619 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p
, size
));
1621 #ifdef ENABLE_GC_ALWAYS_COLLECT
1622 /* In the completely-anal-checking mode, we do *not* immediately free
1623 the data, but instead verify that the data is *actually* not
1624 reachable the next time we collect. */
1626 struct free_object
*fo
= XNEW (struct free_object
);
1628 fo
->next
= G
.free_object_list
;
1629 G
.free_object_list
= fo
;
1633 unsigned int bit_offset
, word
, bit
;
1635 G
.allocated
-= size
;
1637 /* Mark the object not-in-use. */
1638 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1639 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1640 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1641 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1643 if (pe
->num_free_objects
++ == 0)
1647 /* If the page is completely full, then it's supposed to
1648 be after all pages that aren't. Since we've freed one
1649 object from a page that was full, we need to move the
1650 page to the head of the list.
1652 PE is the node we want to move. Q is the previous node
1653 and P is the next node in the list. */
1655 if (q
&& q
->num_free_objects
== 0)
1661 /* If PE was at the end of the list, then Q becomes the
1662 new end of the list. If PE was not the end of the
1663 list, then we need to update the PREV field for P. */
1665 G
.page_tails
[order
] = q
;
1669 /* Move PE to the head of the list. */
1670 pe
->next
= G
.pages
[order
];
1672 G
.pages
[order
]->prev
= pe
;
1673 G
.pages
[order
] = pe
;
1676 /* Reset the hint bit to point to the only free object. */
1677 pe
->next_bit_hint
= bit_offset
;
1683 /* Subroutine of init_ggc which computes the pair of numbers used to
1684 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1686 This algorithm is taken from Granlund and Montgomery's paper
1687 "Division by Invariant Integers using Multiplication"
1688 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1692 compute_inverse (unsigned order
)
1697 size
= OBJECT_SIZE (order
);
1699 while (size
% 2 == 0)
1706 while (inv
* size
!= 1)
1707 inv
= inv
* (2 - inv
*size
);
1709 DIV_MULT (order
) = inv
;
1710 DIV_SHIFT (order
) = e
;
1713 /* Initialize the ggc-mmap allocator. */
1717 static bool init_p
= false;
1724 G
.pagesize
= getpagesize ();
1725 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1727 #ifdef HAVE_MMAP_DEV_ZERO
1728 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1729 if (G
.dev_zero_fd
== -1)
1730 internal_error ("open /dev/zero: %m");
1734 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1736 G
.debug_file
= stdout
;
1740 /* StunOS has an amazing off-by-one error for the first mmap allocation
1741 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1742 believe, is an unaligned page allocation, which would cause us to
1743 hork badly if we tried to use it. */
1745 char *p
= alloc_anon (NULL
, G
.pagesize
, true);
1746 struct page_entry
*e
;
1747 if ((uintptr_t)p
& (G
.pagesize
- 1))
1749 /* How losing. Discard this one and try another. If we still
1750 can't get something useful, give up. */
1752 p
= alloc_anon (NULL
, G
.pagesize
, true);
1753 gcc_assert (!((uintptr_t)p
& (G
.pagesize
- 1)));
1756 /* We have a good page, might as well hold onto it... */
1757 e
= XCNEW (struct page_entry
);
1758 e
->bytes
= G
.pagesize
;
1760 e
->next
= G
.free_pages
;
1765 /* Initialize the object size table. */
1766 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1767 object_size_table
[order
] = (size_t) 1 << order
;
1768 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1770 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1772 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1773 so that we're sure of getting aligned memory. */
1774 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1775 object_size_table
[order
] = s
;
1778 /* Initialize the objects-per-page and inverse tables. */
1779 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1781 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1782 if (objects_per_page_table
[order
] == 0)
1783 objects_per_page_table
[order
] = 1;
1784 compute_inverse (order
);
1787 /* Reset the size_lookup array to put appropriately sized objects in
1788 the special orders. All objects bigger than the previous power
1789 of two, but no greater than the special size, should go in the
1791 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1796 i
= OBJECT_SIZE (order
);
1797 if (i
>= NUM_SIZE_LOOKUP
)
1800 for (o
= size_lookup
[i
]; o
== size_lookup
[i
]; --i
)
1801 size_lookup
[i
] = order
;
1806 G
.depth
= XNEWVEC (unsigned int, G
.depth_max
);
1808 G
.by_depth_in_use
= 0;
1809 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1810 G
.by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
1811 G
.save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
1813 /* Allocate space for the depth 0 finalizers. */
1814 G
.finalizers
.safe_push (vNULL
);
1815 G
.vec_finalizers
.safe_push (vNULL
);
1816 gcc_assert (G
.finalizers
.length() == 1);
1819 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1820 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1823 ggc_recalculate_in_use_p (page_entry
*p
)
1828 /* Because the past-the-end bit in in_use_p is always set, we
1829 pretend there is one additional object. */
1830 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1832 /* Reset the free object count. */
1833 p
->num_free_objects
= num_objects
;
1835 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1837 i
< CEIL (BITMAP_SIZE (num_objects
),
1838 sizeof (*p
->in_use_p
));
1843 /* Something is in use if it is marked, or if it was in use in a
1844 context further down the context stack. */
1845 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1847 /* Decrement the free object count for every object allocated. */
1848 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1849 p
->num_free_objects
-= (j
& 1);
1852 gcc_assert (p
->num_free_objects
< num_objects
);
1855 /* Unmark all objects. */
1862 for (order
= 2; order
< NUM_ORDERS
; order
++)
1866 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1868 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1869 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1871 /* The data should be page-aligned. */
1872 gcc_assert (!((uintptr_t) p
->page
& (G
.pagesize
- 1)));
1874 /* Pages that aren't in the topmost context are not collected;
1875 nevertheless, we need their in-use bit vectors to store GC
1876 marks. So, back them up first. */
1877 if (p
->context_depth
< G
.context_depth
)
1879 if (! save_in_use_p (p
))
1880 save_in_use_p (p
) = XNEWVAR (unsigned long, bitmap_size
);
1881 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1884 /* Reset reset the number of free objects and clear the
1885 in-use bits. These will be adjusted by mark_obj. */
1886 p
->num_free_objects
= num_objects
;
1887 memset (p
->in_use_p
, 0, bitmap_size
);
1889 /* Make sure the one-past-the-end bit is always set. */
1890 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1891 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1896 /* Check if any blocks with a registered finalizer have become unmarked. If so
1897 run the finalizer and unregister it because the block is about to be freed.
1898 Note that no garantee is made about what order finalizers will run in so
1899 touching other objects in gc memory is extremely unwise. */
1902 ggc_handle_finalizers ()
1904 unsigned dlen
= G
.finalizers
.length();
1905 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1907 vec
<finalizer
> &v
= G
.finalizers
[d
];
1908 unsigned length
= v
.length ();
1909 for (unsigned int i
= 0; i
< length
;)
1911 finalizer
&f
= v
[i
];
1912 if (!ggc_marked_p (f
.addr ()))
1915 v
.unordered_remove (i
);
1923 gcc_assert (dlen
== G
.vec_finalizers
.length());
1924 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1926 vec
<vec_finalizer
> &vv
= G
.vec_finalizers
[d
];
1927 unsigned length
= vv
.length ();
1928 for (unsigned int i
= 0; i
< length
;)
1930 vec_finalizer
&f
= vv
[i
];
1931 if (!ggc_marked_p (f
.addr ()))
1934 vv
.unordered_remove (i
);
1943 /* Free all empty pages. Partially empty pages need no attention
1944 because the `mark' bit doubles as an `unused' bit. */
1951 for (order
= 2; order
< NUM_ORDERS
; order
++)
1953 /* The last page-entry to consider, regardless of entries
1954 placed at the end of the list. */
1955 page_entry
* const last
= G
.page_tails
[order
];
1958 size_t live_objects
;
1959 page_entry
*p
, *previous
;
1969 page_entry
*next
= p
->next
;
1971 /* Loop until all entries have been examined. */
1974 num_objects
= OBJECTS_IN_PAGE (p
);
1976 /* Add all live objects on this page to the count of
1977 allocated memory. */
1978 live_objects
= num_objects
- p
->num_free_objects
;
1980 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1982 /* Only objects on pages in the topmost context should get
1984 if (p
->context_depth
< G
.context_depth
)
1987 /* Remove the page if it's empty. */
1988 else if (live_objects
== 0)
1990 /* If P was the first page in the list, then NEXT
1991 becomes the new first page in the list, otherwise
1992 splice P out of the forward pointers. */
1994 G
.pages
[order
] = next
;
1996 previous
->next
= next
;
1998 /* Splice P out of the back pointers too. */
2000 next
->prev
= previous
;
2002 /* Are we removing the last element? */
2003 if (p
== G
.page_tails
[order
])
2004 G
.page_tails
[order
] = previous
;
2009 /* If the page is full, move it to the end. */
2010 else if (p
->num_free_objects
== 0)
2012 /* Don't move it if it's already at the end. */
2013 if (p
!= G
.page_tails
[order
])
2015 /* Move p to the end of the list. */
2017 p
->prev
= G
.page_tails
[order
];
2018 G
.page_tails
[order
]->next
= p
;
2020 /* Update the tail pointer... */
2021 G
.page_tails
[order
] = p
;
2023 /* ... and the head pointer, if necessary. */
2025 G
.pages
[order
] = next
;
2027 previous
->next
= next
;
2029 /* And update the backpointer in NEXT if necessary. */
2031 next
->prev
= previous
;
2037 /* If we've fallen through to here, it's a page in the
2038 topmost context that is neither full nor empty. Such a
2039 page must precede pages at lesser context depth in the
2040 list, so move it to the head. */
2041 else if (p
!= G
.pages
[order
])
2043 previous
->next
= p
->next
;
2045 /* Update the backchain in the next node if it exists. */
2047 p
->next
->prev
= previous
;
2049 /* Move P to the head of the list. */
2050 p
->next
= G
.pages
[order
];
2052 G
.pages
[order
]->prev
= p
;
2054 /* Update the head pointer. */
2057 /* Are we moving the last element? */
2058 if (G
.page_tails
[order
] == p
)
2059 G
.page_tails
[order
] = previous
;
2068 /* Now, restore the in_use_p vectors for any pages from contexts
2069 other than the current one. */
2070 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
2071 if (p
->context_depth
!= G
.context_depth
)
2072 ggc_recalculate_in_use_p (p
);
2076 #ifdef ENABLE_GC_CHECKING
2077 /* Clobber all free objects. */
2084 for (order
= 2; order
< NUM_ORDERS
; order
++)
2086 size_t size
= OBJECT_SIZE (order
);
2089 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
2094 if (p
->context_depth
!= G
.context_depth
)
2095 /* Since we don't do any collection for pages in pushed
2096 contexts, there's no need to do any poisoning. And
2097 besides, the IN_USE_P array isn't valid until we pop
2101 num_objects
= OBJECTS_IN_PAGE (p
);
2102 for (i
= 0; i
< num_objects
; i
++)
2105 word
= i
/ HOST_BITS_PER_LONG
;
2106 bit
= i
% HOST_BITS_PER_LONG
;
2107 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
2109 char *object
= p
->page
+ i
* size
;
2111 /* Keep poison-by-write when we expect to use Valgrind,
2112 so the exact same memory semantics is kept, in case
2113 there are memory errors. We override this request
2115 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object
,
2117 memset (object
, 0xa5, size
);
2119 /* Drop the handle to avoid handle leak. */
2120 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
2127 #define poison_pages()
2130 #ifdef ENABLE_GC_ALWAYS_COLLECT
2131 /* Validate that the reportedly free objects actually are. */
2134 validate_free_objects (void)
2136 struct free_object
*f
, *next
, *still_free
= NULL
;
2138 for (f
= G
.free_object_list
; f
; f
= next
)
2140 page_entry
*pe
= lookup_page_table_entry (f
->object
);
2143 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
2144 word
= bit
/ HOST_BITS_PER_LONG
;
2145 bit
= bit
% HOST_BITS_PER_LONG
;
2148 /* Make certain it isn't visible from any root. Notice that we
2149 do this check before sweep_pages merges save_in_use_p. */
2150 gcc_assert (!(pe
->in_use_p
[word
] & (1UL << bit
)));
2152 /* If the object comes from an outer context, then retain the
2153 free_object entry, so that we can verify that the address
2154 isn't live on the stack in some outer context. */
2155 if (pe
->context_depth
!= G
.context_depth
)
2157 f
->next
= still_free
;
2164 G
.free_object_list
= still_free
;
2167 #define validate_free_objects()
2170 /* Top level mark-and-sweep routine. */
2175 /* Avoid frequent unnecessary work by skipping collection if the
2176 total allocations haven't expanded much since the last
2178 float allocated_last_gc
=
2179 MAX (G
.allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2181 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2182 if (G
.allocated
< allocated_last_gc
+ min_expand
&& !ggc_force_collect
)
2185 timevar_push (TV_GC
);
2187 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
2188 if (GGC_DEBUG_LEVEL
>= 2)
2189 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
2191 /* Zero the total allocated bytes. This will be recalculated in the
2195 /* Release the pages we freed the last time we collected, but didn't
2196 reuse in the interim. */
2199 /* Indicate that we've seen collections at this context depth. */
2200 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
2202 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2207 ggc_handle_finalizers ();
2209 if (GATHER_STATISTICS
)
2210 ggc_prune_overhead_list ();
2213 validate_free_objects ();
2217 G
.allocated_last_gc
= G
.allocated
;
2219 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2221 timevar_pop (TV_GC
);
2224 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
2225 if (GGC_DEBUG_LEVEL
>= 2)
2226 fprintf (G
.debug_file
, "END COLLECTING\n");
2229 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2230 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2237 G
.allocated_last_gc
= MAX (G
.allocated_last_gc
,
2242 fprintf (stderr
, " {GC start %luk} ", (unsigned long) G
.allocated
/ 1024);
2245 /* Print allocation statistics. */
2246 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2248 : ((x) < 1024*1024*10 \
2250 : (x) / (1024*1024))))
2251 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2254 ggc_print_statistics (void)
2256 struct ggc_statistics stats
;
2258 size_t total_overhead
= 0;
2260 /* Clear the statistics. */
2261 memset (&stats
, 0, sizeof (stats
));
2263 /* Make sure collection will really occur. */
2264 G
.allocated_last_gc
= 0;
2266 /* Collect and print the statistics common across collectors. */
2267 ggc_print_common_statistics (stderr
, &stats
);
2269 /* Release free pages so that we will not count the bytes allocated
2270 there as part of the total allocated memory. */
2273 /* Collect some information about the various sizes of
2276 "Memory still allocated at the end of the compilation process\n");
2277 fprintf (stderr
, "%-8s %10s %10s %10s\n",
2278 "Size", "Allocated", "Used", "Overhead");
2279 for (i
= 0; i
< NUM_ORDERS
; ++i
)
2286 /* Skip empty entries. */
2290 overhead
= allocated
= in_use
= 0;
2292 /* Figure out the total number of bytes allocated for objects of
2293 this size, and how many of them are actually in use. Also figure
2294 out how much memory the page table is using. */
2295 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
2297 allocated
+= p
->bytes
;
2299 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
2301 overhead
+= (sizeof (page_entry
) - sizeof (long)
2302 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
2304 fprintf (stderr
, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2305 (unsigned long) OBJECT_SIZE (i
),
2306 SCALE (allocated
), STAT_LABEL (allocated
),
2307 SCALE (in_use
), STAT_LABEL (in_use
),
2308 SCALE (overhead
), STAT_LABEL (overhead
));
2309 total_overhead
+= overhead
;
2311 fprintf (stderr
, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2312 SCALE (G
.bytes_mapped
), STAT_LABEL (G
.bytes_mapped
),
2313 SCALE (G
.allocated
), STAT_LABEL (G
.allocated
),
2314 SCALE (total_overhead
), STAT_LABEL (total_overhead
));
2316 if (GATHER_STATISTICS
)
2318 fprintf (stderr
, "\nTotal allocations and overheads during "
2319 "the compilation process\n");
2321 fprintf (stderr
, "Total Overhead: %10"
2322 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead
);
2323 fprintf (stderr
, "Total Allocated: %10"
2324 HOST_LONG_LONG_FORMAT
"d\n",
2325 G
.stats
.total_allocated
);
2327 fprintf (stderr
, "Total Overhead under 32B: %10"
2328 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under32
);
2329 fprintf (stderr
, "Total Allocated under 32B: %10"
2330 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under32
);
2331 fprintf (stderr
, "Total Overhead under 64B: %10"
2332 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under64
);
2333 fprintf (stderr
, "Total Allocated under 64B: %10"
2334 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under64
);
2335 fprintf (stderr
, "Total Overhead under 128B: %10"
2336 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under128
);
2337 fprintf (stderr
, "Total Allocated under 128B: %10"
2338 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under128
);
2340 for (i
= 0; i
< NUM_ORDERS
; i
++)
2341 if (G
.stats
.total_allocated_per_order
[i
])
2343 fprintf (stderr
, "Total Overhead page size %9lu: %10"
2344 HOST_LONG_LONG_FORMAT
"d\n",
2345 (unsigned long) OBJECT_SIZE (i
),
2346 G
.stats
.total_overhead_per_order
[i
]);
2347 fprintf (stderr
, "Total Allocated page size %9lu: %10"
2348 HOST_LONG_LONG_FORMAT
"d\n",
2349 (unsigned long) OBJECT_SIZE (i
),
2350 G
.stats
.total_allocated_per_order
[i
]);
2355 struct ggc_pch_ondisk
2357 unsigned totals
[NUM_ORDERS
];
2362 struct ggc_pch_ondisk d
;
2363 uintptr_t base
[NUM_ORDERS
];
2364 size_t written
[NUM_ORDERS
];
2367 struct ggc_pch_data
*
2370 return XCNEW (struct ggc_pch_data
);
2374 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2375 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2379 if (size
< NUM_SIZE_LOOKUP
)
2380 order
= size_lookup
[size
];
2384 while (size
> OBJECT_SIZE (order
))
2388 d
->d
.totals
[order
]++;
2392 ggc_pch_total_size (struct ggc_pch_data
*d
)
2397 for (i
= 0; i
< NUM_ORDERS
; i
++)
2398 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2403 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2405 uintptr_t a
= (uintptr_t) base
;
2408 for (i
= 0; i
< NUM_ORDERS
; i
++)
2411 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2417 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2418 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2423 if (size
< NUM_SIZE_LOOKUP
)
2424 order
= size_lookup
[size
];
2428 while (size
> OBJECT_SIZE (order
))
2432 result
= (char *) d
->base
[order
];
2433 d
->base
[order
] += OBJECT_SIZE (order
);
2438 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2439 FILE *f ATTRIBUTE_UNUSED
)
2441 /* Nothing to do. */
2445 ggc_pch_write_object (struct ggc_pch_data
*d
,
2446 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2447 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2450 static const char emptyBytes
[256] = { 0 };
2452 if (size
< NUM_SIZE_LOOKUP
)
2453 order
= size_lookup
[size
];
2457 while (size
> OBJECT_SIZE (order
))
2461 if (fwrite (x
, size
, 1, f
) != 1)
2462 fatal_error (input_location
, "can%'t write PCH file: %m");
2464 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2465 object out to OBJECT_SIZE(order). This happens for strings. */
2467 if (size
!= OBJECT_SIZE (order
))
2469 unsigned padding
= OBJECT_SIZE (order
) - size
;
2471 /* To speed small writes, we use a nulled-out array that's larger
2472 than most padding requests as the source for our null bytes. This
2473 permits us to do the padding with fwrite() rather than fseek(), and
2474 limits the chance the OS may try to flush any outstanding writes. */
2475 if (padding
<= sizeof (emptyBytes
))
2477 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2478 fatal_error (input_location
, "can%'t write PCH file");
2482 /* Larger than our buffer? Just default to fseek. */
2483 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2484 fatal_error (input_location
, "can%'t write PCH file");
2488 d
->written
[order
]++;
2489 if (d
->written
[order
] == d
->d
.totals
[order
]
2490 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2493 fatal_error (input_location
, "can%'t write PCH file: %m");
2497 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2499 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2500 fatal_error (input_location
, "can%'t write PCH file: %m");
2504 /* Move the PCH PTE entries just added to the end of by_depth, to the
2508 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2510 /* First, we swap the new entries to the front of the varrays. */
2511 page_entry
**new_by_depth
;
2512 unsigned long **new_save_in_use
;
2514 new_by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
2515 new_save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
2517 memcpy (&new_by_depth
[0],
2518 &G
.by_depth
[count_old_page_tables
],
2519 count_new_page_tables
* sizeof (void *));
2520 memcpy (&new_by_depth
[count_new_page_tables
],
2522 count_old_page_tables
* sizeof (void *));
2523 memcpy (&new_save_in_use
[0],
2524 &G
.save_in_use
[count_old_page_tables
],
2525 count_new_page_tables
* sizeof (void *));
2526 memcpy (&new_save_in_use
[count_new_page_tables
],
2528 count_old_page_tables
* sizeof (void *));
2531 free (G
.save_in_use
);
2533 G
.by_depth
= new_by_depth
;
2534 G
.save_in_use
= new_save_in_use
;
2536 /* Now update all the index_by_depth fields. */
2537 for (unsigned i
= G
.by_depth_in_use
; i
--;)
2539 page_entry
*p
= G
.by_depth
[i
];
2540 p
->index_by_depth
= i
;
2543 /* And last, we update the depth pointers in G.depth. The first
2544 entry is already 0, and context 0 entries always start at index
2545 0, so there is nothing to update in the first slot. We need a
2546 second slot, only if we have old ptes, and if we do, they start
2547 at index count_new_page_tables. */
2548 if (count_old_page_tables
)
2549 push_depth (count_new_page_tables
);
2553 ggc_pch_read (FILE *f
, void *addr
)
2555 struct ggc_pch_ondisk d
;
2557 char *offs
= (char *) addr
;
2558 unsigned long count_old_page_tables
;
2559 unsigned long count_new_page_tables
;
2561 count_old_page_tables
= G
.by_depth_in_use
;
2563 /* We've just read in a PCH file. So, every object that used to be
2564 allocated is now free. */
2566 #ifdef ENABLE_GC_CHECKING
2569 /* Since we free all the allocated objects, the free list becomes
2570 useless. Validate it now, which will also clear it. */
2571 validate_free_objects ();
2573 /* No object read from a PCH file should ever be freed. So, set the
2574 context depth to 1, and set the depth of all the currently-allocated
2575 pages to be 1 too. PCH pages will have depth 0. */
2576 gcc_assert (!G
.context_depth
);
2577 G
.context_depth
= 1;
2578 /* Allocate space for the depth 1 finalizers. */
2579 G
.finalizers
.safe_push (vNULL
);
2580 G
.vec_finalizers
.safe_push (vNULL
);
2581 gcc_assert (G
.finalizers
.length() == 2);
2582 for (i
= 0; i
< NUM_ORDERS
; i
++)
2585 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2586 p
->context_depth
= G
.context_depth
;
2589 /* Allocate the appropriate page-table entries for the pages read from
2591 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2592 fatal_error (input_location
, "can%'t read PCH file: %m");
2594 for (i
= 0; i
< NUM_ORDERS
; i
++)
2596 struct page_entry
*entry
;
2602 if (d
.totals
[i
] == 0)
2605 bytes
= PAGE_ALIGN (d
.totals
[i
] * OBJECT_SIZE (i
));
2606 num_objs
= bytes
/ OBJECT_SIZE (i
);
2607 entry
= XCNEWVAR (struct page_entry
, (sizeof (struct page_entry
)
2609 + BITMAP_SIZE (num_objs
+ 1)));
2610 entry
->bytes
= bytes
;
2612 entry
->context_depth
= 0;
2614 entry
->num_free_objects
= 0;
2618 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2619 j
+= HOST_BITS_PER_LONG
)
2620 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2621 for (; j
< num_objs
+ 1; j
++)
2622 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2623 |= 1L << (j
% HOST_BITS_PER_LONG
);
2625 for (pte
= entry
->page
;
2626 pte
< entry
->page
+ entry
->bytes
;
2628 set_page_table_entry (pte
, entry
);
2630 if (G
.page_tails
[i
] != NULL
)
2631 G
.page_tails
[i
]->next
= entry
;
2634 G
.page_tails
[i
] = entry
;
2636 /* We start off by just adding all the new information to the
2637 end of the varrays, later, we will move the new information
2638 to the front of the varrays, as the PCH page tables are at
2640 push_by_depth (entry
, 0);
2643 /* Now, we update the various data structures that speed page table
2645 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2647 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2649 /* Update the statistics. */
2650 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;