1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
41 # undef HAVE_MMAP_DEV_ZERO
45 #ifdef HAVE_MMAP_DEV_ZERO
50 #define USING_MALLOC_PAGE_GROUPS
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54 && defined(USING_MMAP)
55 # define USING_MADVISE
60 This garbage-collecting allocator allocates objects on one of a set
61 of pages. Each page can allocate objects of a single size only;
62 available sizes are powers of two starting at four bytes. The size
63 of an allocation request is rounded up to the next power of two
64 (`order'), and satisfied from the appropriate page.
66 Each page is recorded in a page-entry, which also maintains an
67 in-use bitmap of object positions on the page. This allows the
68 allocation state of a particular object to be flipped without
69 touching the page itself.
71 Each page-entry also has a context depth, which is used to track
72 pushing and popping of allocation contexts. Only objects allocated
73 in the current (highest-numbered) context may be collected.
75 Page entries are arranged in an array of singly-linked lists. The
76 array is indexed by the allocation size, in bits, of the pages on
77 it; i.e. all pages on a list allocate objects of the same size.
78 Pages are ordered on the list such that all non-full pages precede
79 all full pages, with non-full pages arranged in order of decreasing
82 Empty pages (of all orders) are kept on a single page cache list,
83 and are considered first when new pages are required; they are
84 deallocated at the start of the next collection if they haven't
85 been recycled by then. */
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88 0: No debugging output.
89 1: GC statistics only.
90 2: Page-entry allocations/deallocations as well.
91 3: Object allocations as well.
92 4: Object marks as well. */
93 #define GGC_DEBUG_LEVEL (0)
95 /* A two-level tree is used to look up the page-entry for a given
96 pointer. Two chunks of the pointer's bits are extracted to index
97 the first and second levels of the tree, as follows:
101 msb +----------------+----+------+------+ lsb
107 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108 pages are aligned on system page boundaries. The next most
109 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110 index values in the lookup table, respectively.
112 For 32-bit architectures and the settings below, there are no
113 leftover bits. For architectures with wider pointers, the lookup
114 tree points to a list of pages, which must be scanned to find the
117 #define PAGE_L1_BITS (8)
118 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
122 #define LOOKUP_L1(p) \
123 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
125 #define LOOKUP_L2(p) \
126 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
128 /* The number of objects per allocation page, for objects on a page of
129 the indicated ORDER. */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
132 /* The number of objects in P. */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
135 /* The size of an object on a page of the indicated ORDER. */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
138 /* For speed, we avoid doing a general integer divide to locate the
139 offset in the allocation bitmap, by precalculating numbers M, S
140 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141 within the page which is evenly divisible by the object size Z. */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
147 /* We use this structure to determine the alignment required for
148 allocations. For power-of-two sized allocations, that's not a
149 problem, but it does matter for odd-sized allocations.
150 We do not care about alignment for floating-point types. */
152 struct max_alignment
{
160 /* The biggest alignment required. */
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
165 /* The number of extra orders, not corresponding to power-of-two sized
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
170 #define RTL_SIZE(NSLOTS) \
171 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
173 #define TREE_EXP_SIZE(OPS) \
174 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
176 /* The Ith entry is the maximum size of an object to be stored in the
177 Ith extra order. Adding a new entry to this array is the *only*
178 thing you need to do to add a new special allocation size. */
180 static const size_t extra_order_size_table
[] = {
181 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182 There are a lot of structures with these sizes and explicitly
183 listing them risks orders being dropped because they changed size. */
195 sizeof (struct tree_decl_non_common
),
196 sizeof (struct tree_field_decl
),
197 sizeof (struct tree_parm_decl
),
198 sizeof (struct tree_var_decl
),
199 sizeof (struct tree_type_non_common
),
200 sizeof (struct function
),
201 sizeof (struct basic_block_def
),
202 sizeof (struct cgraph_node
),
203 sizeof (struct loop
),
206 /* The total number of orders. */
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
210 /* Compute the smallest nonnegative number which when added to X gives
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
215 /* Round X to next multiple of the page size */
217 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
219 /* The Ith entry is the number of objects on a page or order I. */
221 static unsigned objects_per_page_table
[NUM_ORDERS
];
223 /* The Ith entry is the size of an object on a page of order I. */
225 static size_t object_size_table
[NUM_ORDERS
];
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229 for all k evenly divisible by OBJECT_SIZE(I). */
236 inverse_table
[NUM_ORDERS
];
238 /* A page_entry records the status of an allocation page. This
239 structure is dynamically sized to fit the bitmap in_use_p. */
242 /* The next page-entry with objects of the same size, or NULL if
243 this is the last page-entry. */
244 struct page_entry
*next
;
246 /* The previous page-entry with objects of the same size, or NULL if
247 this is the first page-entry. The PREV pointer exists solely to
248 keep the cost of ggc_free manageable. */
249 struct page_entry
*prev
;
251 /* The number of bytes allocated. (This will always be a multiple
252 of the host system page size.) */
255 /* The address at which the memory is allocated. */
258 #ifdef USING_MALLOC_PAGE_GROUPS
259 /* Back pointer to the page group this page came from. */
260 struct page_group
*group
;
263 /* This is the index in the by_depth varray where this page table
265 unsigned long index_by_depth
;
267 /* Context depth of this page. */
268 unsigned short context_depth
;
270 /* The number of free objects remaining on this page. */
271 unsigned short num_free_objects
;
273 /* A likely candidate for the bit position of a free object for the
274 next allocation from this page. */
275 unsigned short next_bit_hint
;
277 /* The lg of size of objects allocated from this page. */
280 /* Discarded page? */
283 /* A bit vector indicating whether or not objects are in use. The
284 Nth bit is one if the Nth object on this page is allocated. This
285 array is dynamically sized. */
286 unsigned long in_use_p
[1];
289 #ifdef USING_MALLOC_PAGE_GROUPS
290 /* A page_group describes a large allocation from malloc, from which
291 we parcel out aligned pages. */
294 /* A linked list of all extant page groups. */
295 struct page_group
*next
;
297 /* The address we received from malloc. */
300 /* The size of the block. */
303 /* A bitmask of pages in use. */
308 #if HOST_BITS_PER_PTR <= 32
310 /* On 32-bit hosts, we use a two level page table, as pictured above. */
311 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
315 /* On 64-bit hosts, we use the same two level page tables plus a linked
316 list that disambiguates the top 32-bits. There will almost always be
317 exactly one entry in the list. */
318 typedef struct page_table_chain
320 struct page_table_chain
*next
;
322 page_entry
**table
[PAGE_L1_SIZE
];
330 finalizer (void *addr
, void (*f
)(void *)) : m_addr (addr
), m_function (f
) {}
332 void *addr () const { return m_addr
; }
334 void call () const { m_function (m_addr
); }
338 void (*m_function
)(void *);
344 vec_finalizer (uintptr_t addr
, void (*f
)(void *), size_t s
, size_t n
) :
345 m_addr (addr
), m_function (f
), m_object_size (s
), m_n_objects (n
) {}
349 for (size_t i
= 0; i
< m_n_objects
; i
++)
350 m_function (reinterpret_cast<void *> (m_addr
+ (i
* m_object_size
)));
353 void *addr () const { return reinterpret_cast<void *> (m_addr
); }
357 void (*m_function
)(void *);
358 size_t m_object_size
;
362 #ifdef ENABLE_GC_ALWAYS_COLLECT
363 /* List of free objects to be verified as actually free on the
368 struct free_object
*next
;
372 /* The rest of the global variables. */
373 static struct ggc_globals
375 /* The Nth element in this array is a page with objects of size 2^N.
376 If there are any pages with free objects, they will be at the
377 head of the list. NULL if there are no page-entries for this
379 page_entry
*pages
[NUM_ORDERS
];
381 /* The Nth element in this array is the last page with objects of
382 size 2^N. NULL if there are no page-entries for this object
384 page_entry
*page_tails
[NUM_ORDERS
];
386 /* Lookup table for associating allocation pages with object addresses. */
389 /* The system's page size. */
393 /* Bytes currently allocated. */
396 /* Bytes currently allocated at the end of the last collection. */
397 size_t allocated_last_gc
;
399 /* Total amount of memory mapped. */
402 /* Bit N set if any allocations have been done at context depth N. */
403 unsigned long context_depth_allocations
;
405 /* Bit N set if any collections have been done at context depth N. */
406 unsigned long context_depth_collections
;
408 /* The current depth in the context stack. */
409 unsigned short context_depth
;
411 /* A file descriptor open to /dev/zero for reading. */
412 #if defined (HAVE_MMAP_DEV_ZERO)
416 /* A cache of free system pages. */
417 page_entry
*free_pages
;
419 #ifdef USING_MALLOC_PAGE_GROUPS
420 page_group
*page_groups
;
423 /* The file descriptor for debugging output. */
426 /* Current number of elements in use in depth below. */
427 unsigned int depth_in_use
;
429 /* Maximum number of elements that can be used before resizing. */
430 unsigned int depth_max
;
432 /* Each element of this array is an index in by_depth where the given
433 depth starts. This structure is indexed by that given depth we
434 are interested in. */
437 /* Current number of elements in use in by_depth below. */
438 unsigned int by_depth_in_use
;
440 /* Maximum number of elements that can be used before resizing. */
441 unsigned int by_depth_max
;
443 /* Each element of this array is a pointer to a page_entry, all
444 page_entries can be found in here by increasing depth.
445 index_by_depth in the page_entry is the index into this data
446 structure where that page_entry can be found. This is used to
447 speed up finding all page_entries at a particular depth. */
448 page_entry
**by_depth
;
450 /* Each element is a pointer to the saved in_use_p bits, if any,
451 zero otherwise. We allocate them all together, to enable a
452 better runtime data access pattern. */
453 unsigned long **save_in_use
;
455 /* Finalizers for single objects. The first index is collection_depth. */
456 vec
<vec
<finalizer
> > finalizers
;
458 /* Finalizers for vectors of objects. */
459 vec
<vec
<vec_finalizer
> > vec_finalizers
;
461 #ifdef ENABLE_GC_ALWAYS_COLLECT
462 /* List of free objects to be verified as actually free on the
464 struct free_object
*free_object_list
;
469 /* Total GC-allocated memory. */
470 unsigned long long total_allocated
;
471 /* Total overhead for GC-allocated memory. */
472 unsigned long long total_overhead
;
474 /* Total allocations and overhead for sizes less than 32, 64 and 128.
475 These sizes are interesting because they are typical cache line
478 unsigned long long total_allocated_under32
;
479 unsigned long long total_overhead_under32
;
481 unsigned long long total_allocated_under64
;
482 unsigned long long total_overhead_under64
;
484 unsigned long long total_allocated_under128
;
485 unsigned long long total_overhead_under128
;
487 /* The allocations for each of the allocation orders. */
488 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
490 /* The overhead for each of the allocation orders. */
491 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
495 /* True if a gc is currently taking place. */
497 static bool in_gc
= false;
499 /* The size in bytes required to maintain a bitmap for the objects
501 #define BITMAP_SIZE(Num_objects) \
502 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
504 /* Allocate pages in chunks of this size, to throttle calls to memory
505 allocation routines. The first page is used, the rest go onto the
506 free list. This cannot be larger than HOST_BITS_PER_INT for the
507 in_use bitmask for page_group. Hosts that need a different value
508 can override this by defining GGC_QUIRE_SIZE explicitly. */
509 #ifndef GGC_QUIRE_SIZE
511 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
513 # define GGC_QUIRE_SIZE 16
517 /* Initial guess as to how many page table entries we might need. */
518 #define INITIAL_PTE_COUNT 128
520 static page_entry
*lookup_page_table_entry (const void *);
521 static void set_page_table_entry (void *, page_entry
*);
523 static char *alloc_anon (char *, size_t, bool check
);
525 #ifdef USING_MALLOC_PAGE_GROUPS
526 static size_t page_group_index (char *, char *);
527 static void set_page_group_in_use (page_group
*, char *);
528 static void clear_page_group_in_use (page_group
*, char *);
530 static struct page_entry
* alloc_page (unsigned);
531 static void free_page (struct page_entry
*);
532 static void release_pages (void);
533 static void clear_marks (void);
534 static void sweep_pages (void);
535 static void ggc_recalculate_in_use_p (page_entry
*);
536 static void compute_inverse (unsigned);
537 static inline void adjust_depth (void);
538 static void move_ptes_to_front (int, int);
540 void debug_print_page_list (int);
541 static void push_depth (unsigned int);
542 static void push_by_depth (page_entry
*, unsigned long *);
544 /* Push an entry onto G.depth. */
547 push_depth (unsigned int i
)
549 if (G
.depth_in_use
>= G
.depth_max
)
552 G
.depth
= XRESIZEVEC (unsigned int, G
.depth
, G
.depth_max
);
554 G
.depth
[G
.depth_in_use
++] = i
;
557 /* Push an entry onto G.by_depth and G.save_in_use. */
560 push_by_depth (page_entry
*p
, unsigned long *s
)
562 if (G
.by_depth_in_use
>= G
.by_depth_max
)
565 G
.by_depth
= XRESIZEVEC (page_entry
*, G
.by_depth
, G
.by_depth_max
);
566 G
.save_in_use
= XRESIZEVEC (unsigned long *, G
.save_in_use
,
569 G
.by_depth
[G
.by_depth_in_use
] = p
;
570 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
573 #if (GCC_VERSION < 3001)
574 #define prefetch(X) ((void) X)
576 #define prefetch(X) __builtin_prefetch (X)
579 #define save_in_use_p_i(__i) \
581 #define save_in_use_p(__p) \
582 (save_in_use_p_i (__p->index_by_depth))
584 /* Traverse the page table and find the entry for a page.
585 If the object wasn't allocated in GC return NULL. */
587 static inline page_entry
*
588 safe_lookup_page_table_entry (const void *p
)
593 #if HOST_BITS_PER_PTR <= 32
596 page_table table
= G
.lookup
;
597 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
602 if (table
->high_bits
== high_bits
)
606 base
= &table
->table
[0];
609 /* Extract the level 1 and 2 indices. */
618 /* Traverse the page table and find the entry for a page.
619 Die (probably) if the object wasn't allocated via GC. */
621 static inline page_entry
*
622 lookup_page_table_entry (const void *p
)
627 #if HOST_BITS_PER_PTR <= 32
630 page_table table
= G
.lookup
;
631 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
632 while (table
->high_bits
!= high_bits
)
634 base
= &table
->table
[0];
637 /* Extract the level 1 and 2 indices. */
644 /* Set the page table entry for a page. */
647 set_page_table_entry (void *p
, page_entry
*entry
)
652 #if HOST_BITS_PER_PTR <= 32
656 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
657 for (table
= G
.lookup
; table
; table
= table
->next
)
658 if (table
->high_bits
== high_bits
)
661 /* Not found -- allocate a new table. */
662 table
= XCNEW (struct page_table_chain
);
663 table
->next
= G
.lookup
;
664 table
->high_bits
= high_bits
;
667 base
= &table
->table
[0];
670 /* Extract the level 1 and 2 indices. */
674 if (base
[L1
] == NULL
)
675 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
677 base
[L1
][L2
] = entry
;
680 /* Prints the page-entry for object size ORDER, for debugging. */
683 debug_print_page_list (int order
)
686 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
687 (void *) G
.page_tails
[order
]);
691 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
692 p
->num_free_objects
);
700 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
701 (if non-null). The ifdef structure here is intended to cause a
702 compile error unless exactly one of the HAVE_* is defined. */
705 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, bool check
)
707 #ifdef HAVE_MMAP_ANON
708 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
709 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
711 #ifdef HAVE_MMAP_DEV_ZERO
712 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
713 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
716 if (page
== (char *) MAP_FAILED
)
720 perror ("virtual memory exhausted");
721 exit (FATAL_EXIT_CODE
);
724 /* Remember that we allocated this memory. */
725 G
.bytes_mapped
+= size
;
727 /* Pretend we don't have access to the allocated pages. We'll enable
728 access to smaller pieces of the area in ggc_internal_alloc. Discard the
729 handle to avoid handle leak. */
730 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
735 #ifdef USING_MALLOC_PAGE_GROUPS
736 /* Compute the index for this page into the page group. */
739 page_group_index (char *allocation
, char *page
)
741 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
744 /* Set and clear the in_use bit for this page in the page group. */
747 set_page_group_in_use (page_group
*group
, char *page
)
749 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
753 clear_page_group_in_use (page_group
*group
, char *page
)
755 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
759 /* Allocate a new page for allocating objects of size 2^ORDER,
760 and return an entry for it. The entry is not added to the
761 appropriate page_table list. */
763 static inline struct page_entry
*
764 alloc_page (unsigned order
)
766 struct page_entry
*entry
, *p
, **pp
;
770 size_t page_entry_size
;
772 #ifdef USING_MALLOC_PAGE_GROUPS
776 num_objects
= OBJECTS_PER_PAGE (order
);
777 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
778 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
779 entry_size
= num_objects
* OBJECT_SIZE (order
);
780 if (entry_size
< G
.pagesize
)
781 entry_size
= G
.pagesize
;
782 entry_size
= PAGE_ALIGN (entry_size
);
787 /* Check the list of free pages for one we can use. */
788 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
789 if (p
->bytes
== entry_size
)
795 G
.bytes_mapped
+= p
->bytes
;
796 p
->discarded
= false;
798 /* Recycle the allocated memory from this page ... */
802 #ifdef USING_MALLOC_PAGE_GROUPS
806 /* ... and, if possible, the page entry itself. */
807 if (p
->order
== order
)
810 memset (entry
, 0, page_entry_size
);
816 else if (entry_size
== G
.pagesize
)
818 /* We want just one page. Allocate a bunch of them and put the
819 extras on the freelist. (Can only do this optimization with
820 mmap for backing store.) */
821 struct page_entry
*e
, *f
= G
.free_pages
;
822 int i
, entries
= GGC_QUIRE_SIZE
;
824 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, false);
827 page
= alloc_anon (NULL
, G
.pagesize
, true);
831 /* This loop counts down so that the chain will be in ascending
833 for (i
= entries
- 1; i
>= 1; i
--)
835 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
837 e
->bytes
= G
.pagesize
;
838 e
->page
= page
+ (i
<< G
.lg_pagesize
);
846 page
= alloc_anon (NULL
, entry_size
, true);
848 #ifdef USING_MALLOC_PAGE_GROUPS
851 /* Allocate a large block of memory and serve out the aligned
852 pages therein. This results in much less memory wastage
853 than the traditional implementation of valloc. */
855 char *allocation
, *a
, *enda
;
856 size_t alloc_size
, head_slop
, tail_slop
;
857 int multiple_pages
= (entry_size
== G
.pagesize
);
860 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
862 alloc_size
= entry_size
+ G
.pagesize
- 1;
863 allocation
= XNEWVEC (char, alloc_size
);
865 page
= (char *) (((uintptr_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
866 head_slop
= page
- allocation
;
868 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
870 tail_slop
= alloc_size
- entry_size
- head_slop
;
871 enda
= allocation
+ alloc_size
- tail_slop
;
873 /* We allocated N pages, which are likely not aligned, leaving
874 us with N-1 usable pages. We plan to place the page_group
875 structure somewhere in the slop. */
876 if (head_slop
>= sizeof (page_group
))
877 group
= (page_group
*)page
- 1;
880 /* We magically got an aligned allocation. Too bad, we have
881 to waste a page anyway. */
885 tail_slop
+= G
.pagesize
;
887 gcc_assert (tail_slop
>= sizeof (page_group
));
888 group
= (page_group
*)enda
;
889 tail_slop
-= sizeof (page_group
);
892 /* Remember that we allocated this memory. */
893 group
->next
= G
.page_groups
;
894 group
->allocation
= allocation
;
895 group
->alloc_size
= alloc_size
;
897 G
.page_groups
= group
;
898 G
.bytes_mapped
+= alloc_size
;
900 /* If we allocated multiple pages, put the rest on the free list. */
903 struct page_entry
*e
, *f
= G
.free_pages
;
904 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
906 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
908 e
->bytes
= G
.pagesize
;
920 entry
= XCNEWVAR (struct page_entry
, page_entry_size
);
922 entry
->bytes
= entry_size
;
924 entry
->context_depth
= G
.context_depth
;
925 entry
->order
= order
;
926 entry
->num_free_objects
= num_objects
;
927 entry
->next_bit_hint
= 1;
929 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
931 #ifdef USING_MALLOC_PAGE_GROUPS
932 entry
->group
= group
;
933 set_page_group_in_use (group
, page
);
936 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
937 increment the hint. */
938 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
939 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
941 set_page_table_entry (page
, entry
);
943 if (GGC_DEBUG_LEVEL
>= 2)
944 fprintf (G
.debug_file
,
945 "Allocating page at %p, object size=%lu, data %p-%p\n",
946 (void *) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
947 page
+ entry_size
- 1);
952 /* Adjust the size of G.depth so that no index greater than the one
953 used by the top of the G.by_depth is used. */
960 if (G
.by_depth_in_use
)
962 top
= G
.by_depth
[G
.by_depth_in_use
-1];
964 /* Peel back indices in depth that index into by_depth, so that
965 as new elements are added to by_depth, we note the indices
966 of those elements, if they are for new context depths. */
967 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
972 /* For a page that is no longer needed, put it on the free page list. */
975 free_page (page_entry
*entry
)
977 if (GGC_DEBUG_LEVEL
>= 2)
978 fprintf (G
.debug_file
,
979 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
980 entry
->page
, entry
->page
+ entry
->bytes
- 1);
982 /* Mark the page as inaccessible. Discard the handle to avoid handle
984 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->page
, entry
->bytes
));
986 set_page_table_entry (entry
->page
, NULL
);
988 #ifdef USING_MALLOC_PAGE_GROUPS
989 clear_page_group_in_use (entry
->group
, entry
->page
);
992 if (G
.by_depth_in_use
> 1)
994 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
995 int i
= entry
->index_by_depth
;
997 /* We cannot free a page from a context deeper than the current
999 gcc_assert (entry
->context_depth
== top
->context_depth
);
1001 /* Put top element into freed slot. */
1002 G
.by_depth
[i
] = top
;
1003 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
1004 top
->index_by_depth
= i
;
1006 --G
.by_depth_in_use
;
1010 entry
->next
= G
.free_pages
;
1011 G
.free_pages
= entry
;
1014 /* Release the free page cache to the system. */
1017 release_pages (void)
1019 #ifdef USING_MADVISE
1020 page_entry
*p
, *start_p
;
1024 page_entry
*next
, *prev
, *newprev
;
1025 size_t free_unit
= (GGC_QUIRE_SIZE
/2) * G
.pagesize
;
1027 /* First free larger continuous areas to the OS.
1028 This allows other allocators to grab these areas if needed.
1029 This is only done on larger chunks to avoid fragmentation.
1030 This does not always work because the free_pages list is only
1031 approximately sorted. */
1042 while (p
&& p
->page
== start
+ len
)
1046 mapped_len
+= p
->bytes
;
1050 if (len
>= free_unit
)
1052 while (start_p
!= p
)
1054 next
= start_p
->next
;
1058 munmap (start
, len
);
1063 G
.bytes_mapped
-= mapped_len
;
1069 /* Now give back the fragmented pages to the OS, but keep the address
1070 space to reuse it next time. */
1072 for (p
= G
.free_pages
; p
; )
1083 while (p
&& p
->page
== start
+ len
)
1088 /* Give the page back to the kernel, but don't free the mapping.
1089 This avoids fragmentation in the virtual memory map of the
1090 process. Next time we can reuse it by just touching it. */
1091 madvise (start
, len
, MADV_DONTNEED
);
1092 /* Don't count those pages as mapped to not touch the garbage collector
1094 G
.bytes_mapped
-= len
;
1095 while (start_p
!= p
)
1097 start_p
->discarded
= true;
1098 start_p
= start_p
->next
;
1102 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1103 page_entry
*p
, *next
;
1107 /* Gather up adjacent pages so they are unmapped together. */
1118 while (p
&& p
->page
== start
+ len
)
1126 munmap (start
, len
);
1127 G
.bytes_mapped
-= len
;
1130 G
.free_pages
= NULL
;
1132 #ifdef USING_MALLOC_PAGE_GROUPS
1133 page_entry
**pp
, *p
;
1134 page_group
**gp
, *g
;
1136 /* Remove all pages from free page groups from the list. */
1138 while ((p
= *pp
) != NULL
)
1139 if (p
->group
->in_use
== 0)
1147 /* Remove all free page groups, and release the storage. */
1148 gp
= &G
.page_groups
;
1149 while ((g
= *gp
) != NULL
)
1153 G
.bytes_mapped
-= g
->alloc_size
;
1154 free (g
->allocation
);
1161 /* This table provides a fast way to determine ceil(log_2(size)) for
1162 allocation requests. The minimum allocation size is eight bytes. */
1163 #define NUM_SIZE_LOOKUP 512
1164 static unsigned char size_lookup
[NUM_SIZE_LOOKUP
] =
1166 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1167 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1168 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1169 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1170 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1171 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1172 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1173 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1174 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1175 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1176 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1181 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1182 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1183 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1184 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1200 /* For a given size of memory requested for allocation, return the
1201 actual size that is going to be allocated, as well as the size
1205 ggc_round_alloc_size_1 (size_t requested_size
,
1207 size_t *alloced_size
)
1209 size_t order
, object_size
;
1211 if (requested_size
< NUM_SIZE_LOOKUP
)
1213 order
= size_lookup
[requested_size
];
1214 object_size
= OBJECT_SIZE (order
);
1219 while (requested_size
> (object_size
= OBJECT_SIZE (order
)))
1224 *size_order
= order
;
1226 *alloced_size
= object_size
;
1229 /* For a given size of memory requested for allocation, return the
1230 actual size that is going to be allocated. */
1233 ggc_round_alloc_size (size_t requested_size
)
1237 ggc_round_alloc_size_1 (requested_size
, NULL
, &size
);
1241 /* Push a finalizer onto the appropriate vec. */
1244 add_finalizer (void *result
, void (*f
)(void *), size_t s
, size_t n
)
1247 /* No finalizer. */;
1250 finalizer
fin (result
, f
);
1251 G
.finalizers
[G
.context_depth
].safe_push (fin
);
1255 vec_finalizer
fin (reinterpret_cast<uintptr_t> (result
), f
, s
, n
);
1256 G
.vec_finalizers
[G
.context_depth
].safe_push (fin
);
1260 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1263 ggc_internal_alloc (size_t size
, void (*f
)(void *), size_t s
, size_t n
1266 size_t order
, word
, bit
, object_offset
, object_size
;
1267 struct page_entry
*entry
;
1270 ggc_round_alloc_size_1 (size
, &order
, &object_size
);
1272 /* If there are non-full pages for this size allocation, they are at
1273 the head of the list. */
1274 entry
= G
.pages
[order
];
1276 /* If there is no page for this object size, or all pages in this
1277 context are full, allocate a new page. */
1278 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1280 struct page_entry
*new_entry
;
1281 new_entry
= alloc_page (order
);
1283 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1284 push_by_depth (new_entry
, 0);
1286 /* We can skip context depths, if we do, make sure we go all the
1287 way to the new depth. */
1288 while (new_entry
->context_depth
>= G
.depth_in_use
)
1289 push_depth (G
.by_depth_in_use
-1);
1291 /* If this is the only entry, it's also the tail. If it is not
1292 the only entry, then we must update the PREV pointer of the
1293 ENTRY (G.pages[order]) to point to our new page entry. */
1295 G
.page_tails
[order
] = new_entry
;
1297 entry
->prev
= new_entry
;
1299 /* Put new pages at the head of the page list. By definition the
1300 entry at the head of the list always has a NULL pointer. */
1301 new_entry
->next
= entry
;
1302 new_entry
->prev
= NULL
;
1304 G
.pages
[order
] = new_entry
;
1306 /* For a new page, we know the word and bit positions (in the
1307 in_use bitmap) of the first available object -- they're zero. */
1308 new_entry
->next_bit_hint
= 1;
1315 /* First try to use the hint left from the previous allocation
1316 to locate a clear bit in the in-use bitmap. We've made sure
1317 that the one-past-the-end bit is always set, so if the hint
1318 has run over, this test will fail. */
1319 unsigned hint
= entry
->next_bit_hint
;
1320 word
= hint
/ HOST_BITS_PER_LONG
;
1321 bit
= hint
% HOST_BITS_PER_LONG
;
1323 /* If the hint didn't work, scan the bitmap from the beginning. */
1324 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1327 while (~entry
->in_use_p
[word
] == 0)
1330 #if GCC_VERSION >= 3004
1331 bit
= __builtin_ctzl (~entry
->in_use_p
[word
]);
1333 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1337 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1340 /* Next time, try the next bit. */
1341 entry
->next_bit_hint
= hint
+ 1;
1343 object_offset
= hint
* object_size
;
1346 /* Set the in-use bit. */
1347 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1349 /* Keep a running total of the number of free objects. If this page
1350 fills up, we may have to move it to the end of the list if the
1351 next page isn't full. If the next page is full, all subsequent
1352 pages are full, so there's no need to move it. */
1353 if (--entry
->num_free_objects
== 0
1354 && entry
->next
!= NULL
1355 && entry
->next
->num_free_objects
> 0)
1357 /* We have a new head for the list. */
1358 G
.pages
[order
] = entry
->next
;
1360 /* We are moving ENTRY to the end of the page table list.
1361 The new page at the head of the list will have NULL in
1362 its PREV field and ENTRY will have NULL in its NEXT field. */
1363 entry
->next
->prev
= NULL
;
1366 /* Append ENTRY to the tail of the list. */
1367 entry
->prev
= G
.page_tails
[order
];
1368 G
.page_tails
[order
]->next
= entry
;
1369 G
.page_tails
[order
] = entry
;
1372 /* Calculate the object's address. */
1373 result
= entry
->page
+ object_offset
;
1374 if (GATHER_STATISTICS
)
1375 ggc_record_overhead (OBJECT_SIZE (order
), OBJECT_SIZE (order
) - size
,
1376 result FINAL_PASS_MEM_STAT
);
1378 #ifdef ENABLE_GC_CHECKING
1379 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1380 exact same semantics in presence of memory bugs, regardless of
1381 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1382 handle to avoid handle leak. */
1383 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, object_size
));
1385 /* `Poison' the entire allocated object, including any padding at
1387 memset (result
, 0xaf, object_size
);
1389 /* Make the bytes after the end of the object unaccessible. Discard the
1390 handle to avoid handle leak. */
1391 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result
+ size
,
1392 object_size
- size
));
1395 /* Tell Valgrind that the memory is there, but its content isn't
1396 defined. The bytes at the end of the object are still marked
1398 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1400 /* Keep track of how many bytes are being allocated. This
1401 information is used in deciding when to collect. */
1402 G
.allocated
+= object_size
;
1404 /* For timevar statistics. */
1405 timevar_ggc_mem_total
+= object_size
;
1408 add_finalizer (result
, f
, s
, n
);
1410 if (GATHER_STATISTICS
)
1412 size_t overhead
= object_size
- size
;
1414 G
.stats
.total_overhead
+= overhead
;
1415 G
.stats
.total_allocated
+= object_size
;
1416 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1417 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1421 G
.stats
.total_overhead_under32
+= overhead
;
1422 G
.stats
.total_allocated_under32
+= object_size
;
1426 G
.stats
.total_overhead_under64
+= overhead
;
1427 G
.stats
.total_allocated_under64
+= object_size
;
1431 G
.stats
.total_overhead_under128
+= overhead
;
1432 G
.stats
.total_allocated_under128
+= object_size
;
1436 if (GGC_DEBUG_LEVEL
>= 3)
1437 fprintf (G
.debug_file
,
1438 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1439 (unsigned long) size
, (unsigned long) object_size
, result
,
1445 /* Mark function for strings. */
1448 gt_ggc_m_S (const void *p
)
1453 unsigned long offset
;
1458 /* Look up the page on which the object is alloced. If it was not
1459 GC allocated, gracefully bail out. */
1460 entry
= safe_lookup_page_table_entry (p
);
1464 /* Calculate the index of the object on the page; this is its bit
1465 position in the in_use_p bitmap. Note that because a char* might
1466 point to the middle of an object, we need special code here to
1467 make sure P points to the start of an object. */
1468 offset
= ((const char *) p
- entry
->page
) % object_size_table
[entry
->order
];
1471 /* Here we've seen a char* which does not point to the beginning
1472 of an allocated object. We assume it points to the middle of
1474 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1475 p
= ((const char *) p
) - offset
;
1476 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p
));
1480 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1481 word
= bit
/ HOST_BITS_PER_LONG
;
1482 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1484 /* If the bit was previously set, skip it. */
1485 if (entry
->in_use_p
[word
] & mask
)
1488 /* Otherwise set it, and decrement the free object count. */
1489 entry
->in_use_p
[word
] |= mask
;
1490 entry
->num_free_objects
-= 1;
1492 if (GGC_DEBUG_LEVEL
>= 4)
1493 fprintf (G
.debug_file
, "Marking %p\n", p
);
1499 /* User-callable entry points for marking string X. */
1502 gt_ggc_mx (const char *& x
)
1508 gt_ggc_mx (unsigned char *& x
)
1514 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED
)
1518 /* If P is not marked, marks it and return false. Otherwise return true.
1519 P must have been allocated by the GC allocator; it mustn't point to
1520 static objects, stack variables, or memory allocated with malloc. */
1523 ggc_set_mark (const void *p
)
1529 /* Look up the page on which the object is alloced. If the object
1530 wasn't allocated by the collector, we'll probably die. */
1531 entry
= lookup_page_table_entry (p
);
1534 /* Calculate the index of the object on the page; this is its bit
1535 position in the in_use_p bitmap. */
1536 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1537 word
= bit
/ HOST_BITS_PER_LONG
;
1538 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1540 /* If the bit was previously set, skip it. */
1541 if (entry
->in_use_p
[word
] & mask
)
1544 /* Otherwise set it, and decrement the free object count. */
1545 entry
->in_use_p
[word
] |= mask
;
1546 entry
->num_free_objects
-= 1;
1548 if (GGC_DEBUG_LEVEL
>= 4)
1549 fprintf (G
.debug_file
, "Marking %p\n", p
);
1554 /* Return 1 if P has been marked, zero otherwise.
1555 P must have been allocated by the GC allocator; it mustn't point to
1556 static objects, stack variables, or memory allocated with malloc. */
1559 ggc_marked_p (const void *p
)
1565 /* Look up the page on which the object is alloced. If the object
1566 wasn't allocated by the collector, we'll probably die. */
1567 entry
= lookup_page_table_entry (p
);
1570 /* Calculate the index of the object on the page; this is its bit
1571 position in the in_use_p bitmap. */
1572 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1573 word
= bit
/ HOST_BITS_PER_LONG
;
1574 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1576 return (entry
->in_use_p
[word
] & mask
) != 0;
1579 /* Return the size of the gc-able object P. */
1582 ggc_get_size (const void *p
)
1584 page_entry
*pe
= lookup_page_table_entry (p
);
1585 return OBJECT_SIZE (pe
->order
);
1588 /* Release the memory for object P. */
1596 page_entry
*pe
= lookup_page_table_entry (p
);
1597 size_t order
= pe
->order
;
1598 size_t size
= OBJECT_SIZE (order
);
1600 if (GATHER_STATISTICS
)
1601 ggc_free_overhead (p
);
1603 if (GGC_DEBUG_LEVEL
>= 3)
1604 fprintf (G
.debug_file
,
1605 "Freeing object, actual size=%lu, at %p on %p\n",
1606 (unsigned long) size
, p
, (void *) pe
);
1608 #ifdef ENABLE_GC_CHECKING
1609 /* Poison the data, to indicate the data is garbage. */
1610 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p
, size
));
1611 memset (p
, 0xa5, size
);
1613 /* Let valgrind know the object is free. */
1614 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p
, size
));
1616 #ifdef ENABLE_GC_ALWAYS_COLLECT
1617 /* In the completely-anal-checking mode, we do *not* immediately free
1618 the data, but instead verify that the data is *actually* not
1619 reachable the next time we collect. */
1621 struct free_object
*fo
= XNEW (struct free_object
);
1623 fo
->next
= G
.free_object_list
;
1624 G
.free_object_list
= fo
;
1628 unsigned int bit_offset
, word
, bit
;
1630 G
.allocated
-= size
;
1632 /* Mark the object not-in-use. */
1633 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1634 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1635 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1636 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1638 if (pe
->num_free_objects
++ == 0)
1642 /* If the page is completely full, then it's supposed to
1643 be after all pages that aren't. Since we've freed one
1644 object from a page that was full, we need to move the
1645 page to the head of the list.
1647 PE is the node we want to move. Q is the previous node
1648 and P is the next node in the list. */
1650 if (q
&& q
->num_free_objects
== 0)
1656 /* If PE was at the end of the list, then Q becomes the
1657 new end of the list. If PE was not the end of the
1658 list, then we need to update the PREV field for P. */
1660 G
.page_tails
[order
] = q
;
1664 /* Move PE to the head of the list. */
1665 pe
->next
= G
.pages
[order
];
1667 G
.pages
[order
]->prev
= pe
;
1668 G
.pages
[order
] = pe
;
1671 /* Reset the hint bit to point to the only free object. */
1672 pe
->next_bit_hint
= bit_offset
;
1678 /* Subroutine of init_ggc which computes the pair of numbers used to
1679 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1681 This algorithm is taken from Granlund and Montgomery's paper
1682 "Division by Invariant Integers using Multiplication"
1683 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1687 compute_inverse (unsigned order
)
1692 size
= OBJECT_SIZE (order
);
1694 while (size
% 2 == 0)
1701 while (inv
* size
!= 1)
1702 inv
= inv
* (2 - inv
*size
);
1704 DIV_MULT (order
) = inv
;
1705 DIV_SHIFT (order
) = e
;
1708 /* Initialize the ggc-mmap allocator. */
1712 static bool init_p
= false;
1719 G
.pagesize
= getpagesize ();
1720 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1722 #ifdef HAVE_MMAP_DEV_ZERO
1723 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1724 if (G
.dev_zero_fd
== -1)
1725 internal_error ("open /dev/zero: %m");
1729 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1731 G
.debug_file
= stdout
;
1735 /* StunOS has an amazing off-by-one error for the first mmap allocation
1736 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1737 believe, is an unaligned page allocation, which would cause us to
1738 hork badly if we tried to use it. */
1740 char *p
= alloc_anon (NULL
, G
.pagesize
, true);
1741 struct page_entry
*e
;
1742 if ((uintptr_t)p
& (G
.pagesize
- 1))
1744 /* How losing. Discard this one and try another. If we still
1745 can't get something useful, give up. */
1747 p
= alloc_anon (NULL
, G
.pagesize
, true);
1748 gcc_assert (!((uintptr_t)p
& (G
.pagesize
- 1)));
1751 /* We have a good page, might as well hold onto it... */
1752 e
= XCNEW (struct page_entry
);
1753 e
->bytes
= G
.pagesize
;
1755 e
->next
= G
.free_pages
;
1760 /* Initialize the object size table. */
1761 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1762 object_size_table
[order
] = (size_t) 1 << order
;
1763 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1765 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1767 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1768 so that we're sure of getting aligned memory. */
1769 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1770 object_size_table
[order
] = s
;
1773 /* Initialize the objects-per-page and inverse tables. */
1774 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1776 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1777 if (objects_per_page_table
[order
] == 0)
1778 objects_per_page_table
[order
] = 1;
1779 compute_inverse (order
);
1782 /* Reset the size_lookup array to put appropriately sized objects in
1783 the special orders. All objects bigger than the previous power
1784 of two, but no greater than the special size, should go in the
1786 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1791 i
= OBJECT_SIZE (order
);
1792 if (i
>= NUM_SIZE_LOOKUP
)
1795 for (o
= size_lookup
[i
]; o
== size_lookup
[i
]; --i
)
1796 size_lookup
[i
] = order
;
1801 G
.depth
= XNEWVEC (unsigned int, G
.depth_max
);
1803 G
.by_depth_in_use
= 0;
1804 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1805 G
.by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
1806 G
.save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
1808 /* Allocate space for the depth 0 finalizers. */
1809 G
.finalizers
.safe_push (vNULL
);
1810 G
.vec_finalizers
.safe_push (vNULL
);
1811 gcc_assert (G
.finalizers
.length() == 1);
1814 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1815 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1818 ggc_recalculate_in_use_p (page_entry
*p
)
1823 /* Because the past-the-end bit in in_use_p is always set, we
1824 pretend there is one additional object. */
1825 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1827 /* Reset the free object count. */
1828 p
->num_free_objects
= num_objects
;
1830 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1832 i
< CEIL (BITMAP_SIZE (num_objects
),
1833 sizeof (*p
->in_use_p
));
1838 /* Something is in use if it is marked, or if it was in use in a
1839 context further down the context stack. */
1840 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1842 /* Decrement the free object count for every object allocated. */
1843 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1844 p
->num_free_objects
-= (j
& 1);
1847 gcc_assert (p
->num_free_objects
< num_objects
);
1850 /* Unmark all objects. */
1857 for (order
= 2; order
< NUM_ORDERS
; order
++)
1861 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1863 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1864 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1866 /* The data should be page-aligned. */
1867 gcc_assert (!((uintptr_t) p
->page
& (G
.pagesize
- 1)));
1869 /* Pages that aren't in the topmost context are not collected;
1870 nevertheless, we need their in-use bit vectors to store GC
1871 marks. So, back them up first. */
1872 if (p
->context_depth
< G
.context_depth
)
1874 if (! save_in_use_p (p
))
1875 save_in_use_p (p
) = XNEWVAR (unsigned long, bitmap_size
);
1876 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1879 /* Reset reset the number of free objects and clear the
1880 in-use bits. These will be adjusted by mark_obj. */
1881 p
->num_free_objects
= num_objects
;
1882 memset (p
->in_use_p
, 0, bitmap_size
);
1884 /* Make sure the one-past-the-end bit is always set. */
1885 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1886 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1891 /* Check if any blocks with a registered finalizer have become unmarked. If so
1892 run the finalizer and unregister it because the block is about to be freed.
1893 Note that no garantee is made about what order finalizers will run in so
1894 touching other objects in gc memory is extremely unwise. */
1897 ggc_handle_finalizers ()
1899 unsigned dlen
= G
.finalizers
.length();
1900 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1902 vec
<finalizer
> &v
= G
.finalizers
[d
];
1903 unsigned length
= v
.length ();
1904 for (unsigned int i
= 0; i
< length
;)
1906 finalizer
&f
= v
[i
];
1907 if (!ggc_marked_p (f
.addr ()))
1910 v
.unordered_remove (i
);
1918 gcc_assert (dlen
== G
.vec_finalizers
.length());
1919 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1921 vec
<vec_finalizer
> &vv
= G
.vec_finalizers
[d
];
1922 unsigned length
= vv
.length ();
1923 for (unsigned int i
= 0; i
< length
;)
1925 vec_finalizer
&f
= vv
[i
];
1926 if (!ggc_marked_p (f
.addr ()))
1929 vv
.unordered_remove (i
);
1938 /* Free all empty pages. Partially empty pages need no attention
1939 because the `mark' bit doubles as an `unused' bit. */
1946 for (order
= 2; order
< NUM_ORDERS
; order
++)
1948 /* The last page-entry to consider, regardless of entries
1949 placed at the end of the list. */
1950 page_entry
* const last
= G
.page_tails
[order
];
1953 size_t live_objects
;
1954 page_entry
*p
, *previous
;
1964 page_entry
*next
= p
->next
;
1966 /* Loop until all entries have been examined. */
1969 num_objects
= OBJECTS_IN_PAGE (p
);
1971 /* Add all live objects on this page to the count of
1972 allocated memory. */
1973 live_objects
= num_objects
- p
->num_free_objects
;
1975 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1977 /* Only objects on pages in the topmost context should get
1979 if (p
->context_depth
< G
.context_depth
)
1982 /* Remove the page if it's empty. */
1983 else if (live_objects
== 0)
1985 /* If P was the first page in the list, then NEXT
1986 becomes the new first page in the list, otherwise
1987 splice P out of the forward pointers. */
1989 G
.pages
[order
] = next
;
1991 previous
->next
= next
;
1993 /* Splice P out of the back pointers too. */
1995 next
->prev
= previous
;
1997 /* Are we removing the last element? */
1998 if (p
== G
.page_tails
[order
])
1999 G
.page_tails
[order
] = previous
;
2004 /* If the page is full, move it to the end. */
2005 else if (p
->num_free_objects
== 0)
2007 /* Don't move it if it's already at the end. */
2008 if (p
!= G
.page_tails
[order
])
2010 /* Move p to the end of the list. */
2012 p
->prev
= G
.page_tails
[order
];
2013 G
.page_tails
[order
]->next
= p
;
2015 /* Update the tail pointer... */
2016 G
.page_tails
[order
] = p
;
2018 /* ... and the head pointer, if necessary. */
2020 G
.pages
[order
] = next
;
2022 previous
->next
= next
;
2024 /* And update the backpointer in NEXT if necessary. */
2026 next
->prev
= previous
;
2032 /* If we've fallen through to here, it's a page in the
2033 topmost context that is neither full nor empty. Such a
2034 page must precede pages at lesser context depth in the
2035 list, so move it to the head. */
2036 else if (p
!= G
.pages
[order
])
2038 previous
->next
= p
->next
;
2040 /* Update the backchain in the next node if it exists. */
2042 p
->next
->prev
= previous
;
2044 /* Move P to the head of the list. */
2045 p
->next
= G
.pages
[order
];
2047 G
.pages
[order
]->prev
= p
;
2049 /* Update the head pointer. */
2052 /* Are we moving the last element? */
2053 if (G
.page_tails
[order
] == p
)
2054 G
.page_tails
[order
] = previous
;
2063 /* Now, restore the in_use_p vectors for any pages from contexts
2064 other than the current one. */
2065 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
2066 if (p
->context_depth
!= G
.context_depth
)
2067 ggc_recalculate_in_use_p (p
);
2071 #ifdef ENABLE_GC_CHECKING
2072 /* Clobber all free objects. */
2079 for (order
= 2; order
< NUM_ORDERS
; order
++)
2081 size_t size
= OBJECT_SIZE (order
);
2084 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
2089 if (p
->context_depth
!= G
.context_depth
)
2090 /* Since we don't do any collection for pages in pushed
2091 contexts, there's no need to do any poisoning. And
2092 besides, the IN_USE_P array isn't valid until we pop
2096 num_objects
= OBJECTS_IN_PAGE (p
);
2097 for (i
= 0; i
< num_objects
; i
++)
2100 word
= i
/ HOST_BITS_PER_LONG
;
2101 bit
= i
% HOST_BITS_PER_LONG
;
2102 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
2104 char *object
= p
->page
+ i
* size
;
2106 /* Keep poison-by-write when we expect to use Valgrind,
2107 so the exact same memory semantics is kept, in case
2108 there are memory errors. We override this request
2110 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object
,
2112 memset (object
, 0xa5, size
);
2114 /* Drop the handle to avoid handle leak. */
2115 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
2122 #define poison_pages()
2125 #ifdef ENABLE_GC_ALWAYS_COLLECT
2126 /* Validate that the reportedly free objects actually are. */
2129 validate_free_objects (void)
2131 struct free_object
*f
, *next
, *still_free
= NULL
;
2133 for (f
= G
.free_object_list
; f
; f
= next
)
2135 page_entry
*pe
= lookup_page_table_entry (f
->object
);
2138 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
2139 word
= bit
/ HOST_BITS_PER_LONG
;
2140 bit
= bit
% HOST_BITS_PER_LONG
;
2143 /* Make certain it isn't visible from any root. Notice that we
2144 do this check before sweep_pages merges save_in_use_p. */
2145 gcc_assert (!(pe
->in_use_p
[word
] & (1UL << bit
)));
2147 /* If the object comes from an outer context, then retain the
2148 free_object entry, so that we can verify that the address
2149 isn't live on the stack in some outer context. */
2150 if (pe
->context_depth
!= G
.context_depth
)
2152 f
->next
= still_free
;
2159 G
.free_object_list
= still_free
;
2162 #define validate_free_objects()
2165 /* Top level mark-and-sweep routine. */
2170 /* Avoid frequent unnecessary work by skipping collection if the
2171 total allocations haven't expanded much since the last
2173 float allocated_last_gc
=
2174 MAX (G
.allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2176 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2177 if (G
.allocated
< allocated_last_gc
+ min_expand
&& !ggc_force_collect
)
2180 timevar_push (TV_GC
);
2182 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
2183 if (GGC_DEBUG_LEVEL
>= 2)
2184 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
2186 /* Zero the total allocated bytes. This will be recalculated in the
2190 /* Release the pages we freed the last time we collected, but didn't
2191 reuse in the interim. */
2194 /* Indicate that we've seen collections at this context depth. */
2195 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
2197 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2202 ggc_handle_finalizers ();
2204 if (GATHER_STATISTICS
)
2205 ggc_prune_overhead_list ();
2208 validate_free_objects ();
2212 G
.allocated_last_gc
= G
.allocated
;
2214 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2216 timevar_pop (TV_GC
);
2219 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
2220 if (GGC_DEBUG_LEVEL
>= 2)
2221 fprintf (G
.debug_file
, "END COLLECTING\n");
2224 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2225 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2232 G
.allocated_last_gc
= MAX (G
.allocated_last_gc
,
2237 fprintf (stderr
, " {GC start %luk} ", (unsigned long) G
.allocated
/ 1024);
2240 /* Print allocation statistics. */
2241 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2243 : ((x) < 1024*1024*10 \
2245 : (x) / (1024*1024))))
2246 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2249 ggc_print_statistics (void)
2251 struct ggc_statistics stats
;
2253 size_t total_overhead
= 0;
2255 /* Clear the statistics. */
2256 memset (&stats
, 0, sizeof (stats
));
2258 /* Make sure collection will really occur. */
2259 G
.allocated_last_gc
= 0;
2261 /* Collect and print the statistics common across collectors. */
2262 ggc_print_common_statistics (stderr
, &stats
);
2264 /* Release free pages so that we will not count the bytes allocated
2265 there as part of the total allocated memory. */
2268 /* Collect some information about the various sizes of
2271 "Memory still allocated at the end of the compilation process\n");
2272 fprintf (stderr
, "%-8s %10s %10s %10s\n",
2273 "Size", "Allocated", "Used", "Overhead");
2274 for (i
= 0; i
< NUM_ORDERS
; ++i
)
2281 /* Skip empty entries. */
2285 overhead
= allocated
= in_use
= 0;
2287 /* Figure out the total number of bytes allocated for objects of
2288 this size, and how many of them are actually in use. Also figure
2289 out how much memory the page table is using. */
2290 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
2292 allocated
+= p
->bytes
;
2294 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
2296 overhead
+= (sizeof (page_entry
) - sizeof (long)
2297 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
2299 fprintf (stderr
, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2300 (unsigned long) OBJECT_SIZE (i
),
2301 SCALE (allocated
), STAT_LABEL (allocated
),
2302 SCALE (in_use
), STAT_LABEL (in_use
),
2303 SCALE (overhead
), STAT_LABEL (overhead
));
2304 total_overhead
+= overhead
;
2306 fprintf (stderr
, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2307 SCALE (G
.bytes_mapped
), STAT_LABEL (G
.bytes_mapped
),
2308 SCALE (G
.allocated
), STAT_LABEL (G
.allocated
),
2309 SCALE (total_overhead
), STAT_LABEL (total_overhead
));
2311 if (GATHER_STATISTICS
)
2313 fprintf (stderr
, "\nTotal allocations and overheads during "
2314 "the compilation process\n");
2316 fprintf (stderr
, "Total Overhead: %10"
2317 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead
);
2318 fprintf (stderr
, "Total Allocated: %10"
2319 HOST_LONG_LONG_FORMAT
"d\n",
2320 G
.stats
.total_allocated
);
2322 fprintf (stderr
, "Total Overhead under 32B: %10"
2323 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under32
);
2324 fprintf (stderr
, "Total Allocated under 32B: %10"
2325 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under32
);
2326 fprintf (stderr
, "Total Overhead under 64B: %10"
2327 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under64
);
2328 fprintf (stderr
, "Total Allocated under 64B: %10"
2329 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under64
);
2330 fprintf (stderr
, "Total Overhead under 128B: %10"
2331 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under128
);
2332 fprintf (stderr
, "Total Allocated under 128B: %10"
2333 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under128
);
2335 for (i
= 0; i
< NUM_ORDERS
; i
++)
2336 if (G
.stats
.total_allocated_per_order
[i
])
2338 fprintf (stderr
, "Total Overhead page size %9lu: %10"
2339 HOST_LONG_LONG_FORMAT
"d\n",
2340 (unsigned long) OBJECT_SIZE (i
),
2341 G
.stats
.total_overhead_per_order
[i
]);
2342 fprintf (stderr
, "Total Allocated page size %9lu: %10"
2343 HOST_LONG_LONG_FORMAT
"d\n",
2344 (unsigned long) OBJECT_SIZE (i
),
2345 G
.stats
.total_allocated_per_order
[i
]);
2350 struct ggc_pch_ondisk
2352 unsigned totals
[NUM_ORDERS
];
2357 struct ggc_pch_ondisk d
;
2358 uintptr_t base
[NUM_ORDERS
];
2359 size_t written
[NUM_ORDERS
];
2362 struct ggc_pch_data
*
2365 return XCNEW (struct ggc_pch_data
);
2369 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2370 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2374 if (size
< NUM_SIZE_LOOKUP
)
2375 order
= size_lookup
[size
];
2379 while (size
> OBJECT_SIZE (order
))
2383 d
->d
.totals
[order
]++;
2387 ggc_pch_total_size (struct ggc_pch_data
*d
)
2392 for (i
= 0; i
< NUM_ORDERS
; i
++)
2393 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2398 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2400 uintptr_t a
= (uintptr_t) base
;
2403 for (i
= 0; i
< NUM_ORDERS
; i
++)
2406 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2412 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2413 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2418 if (size
< NUM_SIZE_LOOKUP
)
2419 order
= size_lookup
[size
];
2423 while (size
> OBJECT_SIZE (order
))
2427 result
= (char *) d
->base
[order
];
2428 d
->base
[order
] += OBJECT_SIZE (order
);
2433 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2434 FILE *f ATTRIBUTE_UNUSED
)
2436 /* Nothing to do. */
2440 ggc_pch_write_object (struct ggc_pch_data
*d
,
2441 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2442 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2445 static const char emptyBytes
[256] = { 0 };
2447 if (size
< NUM_SIZE_LOOKUP
)
2448 order
= size_lookup
[size
];
2452 while (size
> OBJECT_SIZE (order
))
2456 if (fwrite (x
, size
, 1, f
) != 1)
2457 fatal_error (input_location
, "can%'t write PCH file: %m");
2459 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2460 object out to OBJECT_SIZE(order). This happens for strings. */
2462 if (size
!= OBJECT_SIZE (order
))
2464 unsigned padding
= OBJECT_SIZE (order
) - size
;
2466 /* To speed small writes, we use a nulled-out array that's larger
2467 than most padding requests as the source for our null bytes. This
2468 permits us to do the padding with fwrite() rather than fseek(), and
2469 limits the chance the OS may try to flush any outstanding writes. */
2470 if (padding
<= sizeof (emptyBytes
))
2472 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2473 fatal_error (input_location
, "can%'t write PCH file");
2477 /* Larger than our buffer? Just default to fseek. */
2478 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2479 fatal_error (input_location
, "can%'t write PCH file");
2483 d
->written
[order
]++;
2484 if (d
->written
[order
] == d
->d
.totals
[order
]
2485 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2488 fatal_error (input_location
, "can%'t write PCH file: %m");
2492 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2494 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2495 fatal_error (input_location
, "can%'t write PCH file: %m");
2499 /* Move the PCH PTE entries just added to the end of by_depth, to the
2503 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2505 /* First, we swap the new entries to the front of the varrays. */
2506 page_entry
**new_by_depth
;
2507 unsigned long **new_save_in_use
;
2509 new_by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
2510 new_save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
2512 memcpy (&new_by_depth
[0],
2513 &G
.by_depth
[count_old_page_tables
],
2514 count_new_page_tables
* sizeof (void *));
2515 memcpy (&new_by_depth
[count_new_page_tables
],
2517 count_old_page_tables
* sizeof (void *));
2518 memcpy (&new_save_in_use
[0],
2519 &G
.save_in_use
[count_old_page_tables
],
2520 count_new_page_tables
* sizeof (void *));
2521 memcpy (&new_save_in_use
[count_new_page_tables
],
2523 count_old_page_tables
* sizeof (void *));
2526 free (G
.save_in_use
);
2528 G
.by_depth
= new_by_depth
;
2529 G
.save_in_use
= new_save_in_use
;
2531 /* Now update all the index_by_depth fields. */
2532 for (unsigned i
= G
.by_depth_in_use
; i
--;)
2534 page_entry
*p
= G
.by_depth
[i
];
2535 p
->index_by_depth
= i
;
2538 /* And last, we update the depth pointers in G.depth. The first
2539 entry is already 0, and context 0 entries always start at index
2540 0, so there is nothing to update in the first slot. We need a
2541 second slot, only if we have old ptes, and if we do, they start
2542 at index count_new_page_tables. */
2543 if (count_old_page_tables
)
2544 push_depth (count_new_page_tables
);
2548 ggc_pch_read (FILE *f
, void *addr
)
2550 struct ggc_pch_ondisk d
;
2552 char *offs
= (char *) addr
;
2553 unsigned long count_old_page_tables
;
2554 unsigned long count_new_page_tables
;
2556 count_old_page_tables
= G
.by_depth_in_use
;
2558 /* We've just read in a PCH file. So, every object that used to be
2559 allocated is now free. */
2561 #ifdef ENABLE_GC_CHECKING
2564 /* Since we free all the allocated objects, the free list becomes
2565 useless. Validate it now, which will also clear it. */
2566 validate_free_objects ();
2568 /* No object read from a PCH file should ever be freed. So, set the
2569 context depth to 1, and set the depth of all the currently-allocated
2570 pages to be 1 too. PCH pages will have depth 0. */
2571 gcc_assert (!G
.context_depth
);
2572 G
.context_depth
= 1;
2573 /* Allocate space for the depth 1 finalizers. */
2574 G
.finalizers
.safe_push (vNULL
);
2575 G
.vec_finalizers
.safe_push (vNULL
);
2576 gcc_assert (G
.finalizers
.length() == 2);
2577 for (i
= 0; i
< NUM_ORDERS
; i
++)
2580 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2581 p
->context_depth
= G
.context_depth
;
2584 /* Allocate the appropriate page-table entries for the pages read from
2586 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2587 fatal_error (input_location
, "can%'t read PCH file: %m");
2589 for (i
= 0; i
< NUM_ORDERS
; i
++)
2591 struct page_entry
*entry
;
2597 if (d
.totals
[i
] == 0)
2600 bytes
= PAGE_ALIGN (d
.totals
[i
] * OBJECT_SIZE (i
));
2601 num_objs
= bytes
/ OBJECT_SIZE (i
);
2602 entry
= XCNEWVAR (struct page_entry
, (sizeof (struct page_entry
)
2604 + BITMAP_SIZE (num_objs
+ 1)));
2605 entry
->bytes
= bytes
;
2607 entry
->context_depth
= 0;
2609 entry
->num_free_objects
= 0;
2613 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2614 j
+= HOST_BITS_PER_LONG
)
2615 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2616 for (; j
< num_objs
+ 1; j
++)
2617 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2618 |= 1L << (j
% HOST_BITS_PER_LONG
);
2620 for (pte
= entry
->page
;
2621 pte
< entry
->page
+ entry
->bytes
;
2623 set_page_table_entry (pte
, entry
);
2625 if (G
.page_tails
[i
] != NULL
)
2626 G
.page_tails
[i
]->next
= entry
;
2629 G
.page_tails
[i
] = entry
;
2631 /* We start off by just adding all the new information to the
2632 end of the varrays, later, we will move the new information
2633 to the front of the varrays, as the PCH page tables are at
2635 push_by_depth (entry
, 0);
2638 /* Now, we update the various data structures that speed page table
2640 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2642 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2644 /* Update the statistics. */
2645 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;