1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
40 # undef HAVE_MMAP_DEV_ZERO
44 #ifdef HAVE_MMAP_DEV_ZERO
49 #define USING_MALLOC_PAGE_GROUPS
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
54 # define USING_MADVISE
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92 #define GGC_DEBUG_LEVEL (0)
94 /* A two-level tree is used to look up the page-entry for a given
95 pointer. Two chunks of the pointer's bits are extracted to index
96 the first and second levels of the tree, as follows:
100 msb +----------------+----+------+------+ lsb
106 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107 pages are aligned on system page boundaries. The next most
108 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109 index values in the lookup table, respectively.
111 For 32-bit architectures and the settings below, there are no
112 leftover bits. For architectures with wider pointers, the lookup
113 tree points to a list of pages, which must be scanned to find the
116 #define PAGE_L1_BITS (8)
117 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
118 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
119 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
121 #define LOOKUP_L1(p) \
122 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124 #define LOOKUP_L2(p) \
125 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127 /* The number of objects per allocation page, for objects on a page of
128 the indicated ORDER. */
129 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131 /* The number of objects in P. */
132 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134 /* The size of an object on a page of the indicated ORDER. */
135 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137 /* For speed, we avoid doing a general integer divide to locate the
138 offset in the allocation bitmap, by precalculating numbers M, S
139 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140 within the page which is evenly divisible by the object size Z. */
141 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
142 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143 #define OFFSET_TO_BIT(OFFSET, ORDER) \
144 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146 /* We use this structure to determine the alignment required for
147 allocations. For power-of-two sized allocations, that's not a
148 problem, but it does matter for odd-sized allocations.
149 We do not care about alignment for floating-point types. */
151 struct max_alignment
{
159 /* The biggest alignment required. */
161 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
164 /* The number of extra orders, not corresponding to power-of-two sized
167 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169 #define RTL_SIZE(NSLOTS) \
170 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172 #define TREE_EXP_SIZE(OPS) \
173 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175 /* The Ith entry is the maximum size of an object to be stored in the
176 Ith extra order. Adding a new entry to this array is the *only*
177 thing you need to do to add a new special allocation size. */
179 static const size_t extra_order_size_table
[] = {
180 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181 There are a lot of structures with these sizes and explicitly
182 listing them risks orders being dropped because they changed size. */
194 sizeof (struct tree_decl_non_common
),
195 sizeof (struct tree_field_decl
),
196 sizeof (struct tree_parm_decl
),
197 sizeof (struct tree_var_decl
),
198 sizeof (struct tree_type_non_common
),
199 sizeof (struct function
),
200 sizeof (struct basic_block_def
),
201 sizeof (struct cgraph_node
),
205 /* The total number of orders. */
207 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209 /* Compute the smallest nonnegative number which when added to X gives
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Round X to next multiple of the page size */
216 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218 /* The Ith entry is the number of objects on a page or order I. */
220 static unsigned objects_per_page_table
[NUM_ORDERS
];
222 /* The Ith entry is the size of an object on a page of order I. */
224 static size_t object_size_table
[NUM_ORDERS
];
226 /* The Ith entry is a pair of numbers (mult, shift) such that
227 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228 for all k evenly divisible by OBJECT_SIZE(I). */
235 inverse_table
[NUM_ORDERS
];
237 /* A page_entry records the status of an allocation page. This
238 structure is dynamically sized to fit the bitmap in_use_p. */
241 /* The next page-entry with objects of the same size, or NULL if
242 this is the last page-entry. */
243 struct page_entry
*next
;
245 /* The previous page-entry with objects of the same size, or NULL if
246 this is the first page-entry. The PREV pointer exists solely to
247 keep the cost of ggc_free manageable. */
248 struct page_entry
*prev
;
250 /* The number of bytes allocated. (This will always be a multiple
251 of the host system page size.) */
254 /* The address at which the memory is allocated. */
257 #ifdef USING_MALLOC_PAGE_GROUPS
258 /* Back pointer to the page group this page came from. */
259 struct page_group
*group
;
262 /* This is the index in the by_depth varray where this page table
264 unsigned long index_by_depth
;
266 /* Context depth of this page. */
267 unsigned short context_depth
;
269 /* The number of free objects remaining on this page. */
270 unsigned short num_free_objects
;
272 /* A likely candidate for the bit position of a free object for the
273 next allocation from this page. */
274 unsigned short next_bit_hint
;
276 /* The lg of size of objects allocated from this page. */
279 /* Discarded page? */
282 /* A bit vector indicating whether or not objects are in use. The
283 Nth bit is one if the Nth object on this page is allocated. This
284 array is dynamically sized. */
285 unsigned long in_use_p
[1];
288 #ifdef USING_MALLOC_PAGE_GROUPS
289 /* A page_group describes a large allocation from malloc, from which
290 we parcel out aligned pages. */
293 /* A linked list of all extant page groups. */
294 struct page_group
*next
;
296 /* The address we received from malloc. */
299 /* The size of the block. */
302 /* A bitmask of pages in use. */
307 #if HOST_BITS_PER_PTR <= 32
309 /* On 32-bit hosts, we use a two level page table, as pictured above. */
310 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
314 /* On 64-bit hosts, we use the same two level page tables plus a linked
315 list that disambiguates the top 32-bits. There will almost always be
316 exactly one entry in the list. */
317 typedef struct page_table_chain
319 struct page_table_chain
*next
;
321 page_entry
**table
[PAGE_L1_SIZE
];
329 finalizer (void *addr
, void (*f
)(void *)) : m_addr (addr
), m_function (f
) {}
331 void *addr () const { return m_addr
; }
333 void call () const { m_function (m_addr
); }
337 void (*m_function
)(void *);
343 vec_finalizer (uintptr_t addr
, void (*f
)(void *), size_t s
, size_t n
) :
344 m_addr (addr
), m_function (f
), m_object_size (s
), m_n_objects (n
) {}
348 for (size_t i
= 0; i
< m_n_objects
; i
++)
349 m_function (reinterpret_cast<void *> (m_addr
+ (i
* m_object_size
)));
352 void *addr () const { return reinterpret_cast<void *> (m_addr
); }
356 void (*m_function
)(void *);
357 size_t m_object_size
;
361 #ifdef ENABLE_GC_ALWAYS_COLLECT
362 /* List of free objects to be verified as actually free on the
367 struct free_object
*next
;
371 /* The rest of the global variables. */
372 static struct ggc_globals
374 /* The Nth element in this array is a page with objects of size 2^N.
375 If there are any pages with free objects, they will be at the
376 head of the list. NULL if there are no page-entries for this
378 page_entry
*pages
[NUM_ORDERS
];
380 /* The Nth element in this array is the last page with objects of
381 size 2^N. NULL if there are no page-entries for this object
383 page_entry
*page_tails
[NUM_ORDERS
];
385 /* Lookup table for associating allocation pages with object addresses. */
388 /* The system's page size. */
392 /* Bytes currently allocated. */
395 /* Bytes currently allocated at the end of the last collection. */
396 size_t allocated_last_gc
;
398 /* Total amount of memory mapped. */
401 /* Bit N set if any allocations have been done at context depth N. */
402 unsigned long context_depth_allocations
;
404 /* Bit N set if any collections have been done at context depth N. */
405 unsigned long context_depth_collections
;
407 /* The current depth in the context stack. */
408 unsigned short context_depth
;
410 /* A file descriptor open to /dev/zero for reading. */
411 #if defined (HAVE_MMAP_DEV_ZERO)
415 /* A cache of free system pages. */
416 page_entry
*free_pages
;
418 #ifdef USING_MALLOC_PAGE_GROUPS
419 page_group
*page_groups
;
422 /* The file descriptor for debugging output. */
425 /* Current number of elements in use in depth below. */
426 unsigned int depth_in_use
;
428 /* Maximum number of elements that can be used before resizing. */
429 unsigned int depth_max
;
431 /* Each element of this array is an index in by_depth where the given
432 depth starts. This structure is indexed by that given depth we
433 are interested in. */
436 /* Current number of elements in use in by_depth below. */
437 unsigned int by_depth_in_use
;
439 /* Maximum number of elements that can be used before resizing. */
440 unsigned int by_depth_max
;
442 /* Each element of this array is a pointer to a page_entry, all
443 page_entries can be found in here by increasing depth.
444 index_by_depth in the page_entry is the index into this data
445 structure where that page_entry can be found. This is used to
446 speed up finding all page_entries at a particular depth. */
447 page_entry
**by_depth
;
449 /* Each element is a pointer to the saved in_use_p bits, if any,
450 zero otherwise. We allocate them all together, to enable a
451 better runtime data access pattern. */
452 unsigned long **save_in_use
;
454 /* Finalizers for single objects. The first index is collection_depth. */
455 vec
<vec
<finalizer
> > finalizers
;
457 /* Finalizers for vectors of objects. */
458 vec
<vec
<vec_finalizer
> > vec_finalizers
;
460 #ifdef ENABLE_GC_ALWAYS_COLLECT
461 /* List of free objects to be verified as actually free on the
463 struct free_object
*free_object_list
;
468 /* Total GC-allocated memory. */
469 unsigned long long total_allocated
;
470 /* Total overhead for GC-allocated memory. */
471 unsigned long long total_overhead
;
473 /* Total allocations and overhead for sizes less than 32, 64 and 128.
474 These sizes are interesting because they are typical cache line
477 unsigned long long total_allocated_under32
;
478 unsigned long long total_overhead_under32
;
480 unsigned long long total_allocated_under64
;
481 unsigned long long total_overhead_under64
;
483 unsigned long long total_allocated_under128
;
484 unsigned long long total_overhead_under128
;
486 /* The allocations for each of the allocation orders. */
487 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
489 /* The overhead for each of the allocation orders. */
490 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
494 /* True if a gc is currently taking place. */
496 static bool in_gc
= false;
498 /* The size in bytes required to maintain a bitmap for the objects
500 #define BITMAP_SIZE(Num_objects) \
501 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503 /* Allocate pages in chunks of this size, to throttle calls to memory
504 allocation routines. The first page is used, the rest go onto the
505 free list. This cannot be larger than HOST_BITS_PER_INT for the
506 in_use bitmask for page_group. Hosts that need a different value
507 can override this by defining GGC_QUIRE_SIZE explicitly. */
508 #ifndef GGC_QUIRE_SIZE
510 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
512 # define GGC_QUIRE_SIZE 16
516 /* Initial guess as to how many page table entries we might need. */
517 #define INITIAL_PTE_COUNT 128
519 static page_entry
*lookup_page_table_entry (const void *);
520 static void set_page_table_entry (void *, page_entry
*);
522 static char *alloc_anon (char *, size_t, bool check
);
524 #ifdef USING_MALLOC_PAGE_GROUPS
525 static size_t page_group_index (char *, char *);
526 static void set_page_group_in_use (page_group
*, char *);
527 static void clear_page_group_in_use (page_group
*, char *);
529 static struct page_entry
* alloc_page (unsigned);
530 static void free_page (struct page_entry
*);
531 static void clear_marks (void);
532 static void sweep_pages (void);
533 static void ggc_recalculate_in_use_p (page_entry
*);
534 static void compute_inverse (unsigned);
535 static inline void adjust_depth (void);
536 static void move_ptes_to_front (int, int);
538 void debug_print_page_list (int);
539 static void push_depth (unsigned int);
540 static void push_by_depth (page_entry
*, unsigned long *);
542 /* Push an entry onto G.depth. */
545 push_depth (unsigned int i
)
547 if (G
.depth_in_use
>= G
.depth_max
)
550 G
.depth
= XRESIZEVEC (unsigned int, G
.depth
, G
.depth_max
);
552 G
.depth
[G
.depth_in_use
++] = i
;
555 /* Push an entry onto G.by_depth and G.save_in_use. */
558 push_by_depth (page_entry
*p
, unsigned long *s
)
560 if (G
.by_depth_in_use
>= G
.by_depth_max
)
563 G
.by_depth
= XRESIZEVEC (page_entry
*, G
.by_depth
, G
.by_depth_max
);
564 G
.save_in_use
= XRESIZEVEC (unsigned long *, G
.save_in_use
,
567 G
.by_depth
[G
.by_depth_in_use
] = p
;
568 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
571 #if (GCC_VERSION < 3001)
572 #define prefetch(X) ((void) X)
574 #define prefetch(X) __builtin_prefetch (X)
577 #define save_in_use_p_i(__i) \
579 #define save_in_use_p(__p) \
580 (save_in_use_p_i (__p->index_by_depth))
582 /* Traverse the page table and find the entry for a page.
583 If the object wasn't allocated in GC return NULL. */
585 static inline page_entry
*
586 safe_lookup_page_table_entry (const void *p
)
591 #if HOST_BITS_PER_PTR <= 32
594 page_table table
= G
.lookup
;
595 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
600 if (table
->high_bits
== high_bits
)
604 base
= &table
->table
[0];
607 /* Extract the level 1 and 2 indices. */
616 /* Traverse the page table and find the entry for a page.
617 Die (probably) if the object wasn't allocated via GC. */
619 static inline page_entry
*
620 lookup_page_table_entry (const void *p
)
625 #if HOST_BITS_PER_PTR <= 32
628 page_table table
= G
.lookup
;
629 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
630 while (table
->high_bits
!= high_bits
)
632 base
= &table
->table
[0];
635 /* Extract the level 1 and 2 indices. */
642 /* Set the page table entry for a page. */
645 set_page_table_entry (void *p
, page_entry
*entry
)
650 #if HOST_BITS_PER_PTR <= 32
654 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
655 for (table
= G
.lookup
; table
; table
= table
->next
)
656 if (table
->high_bits
== high_bits
)
659 /* Not found -- allocate a new table. */
660 table
= XCNEW (struct page_table_chain
);
661 table
->next
= G
.lookup
;
662 table
->high_bits
= high_bits
;
665 base
= &table
->table
[0];
668 /* Extract the level 1 and 2 indices. */
672 if (base
[L1
] == NULL
)
673 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
675 base
[L1
][L2
] = entry
;
678 /* Prints the page-entry for object size ORDER, for debugging. */
681 debug_print_page_list (int order
)
684 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
685 (void *) G
.page_tails
[order
]);
689 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
690 p
->num_free_objects
);
698 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
699 (if non-null). The ifdef structure here is intended to cause a
700 compile error unless exactly one of the HAVE_* is defined. */
703 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, bool check
)
705 #ifdef HAVE_MMAP_ANON
706 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
707 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
709 #ifdef HAVE_MMAP_DEV_ZERO
710 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
711 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
714 if (page
== (char *) MAP_FAILED
)
718 perror ("virtual memory exhausted");
719 exit (FATAL_EXIT_CODE
);
722 /* Remember that we allocated this memory. */
723 G
.bytes_mapped
+= size
;
725 /* Pretend we don't have access to the allocated pages. We'll enable
726 access to smaller pieces of the area in ggc_internal_alloc. Discard the
727 handle to avoid handle leak. */
728 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
733 #ifdef USING_MALLOC_PAGE_GROUPS
734 /* Compute the index for this page into the page group. */
737 page_group_index (char *allocation
, char *page
)
739 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
742 /* Set and clear the in_use bit for this page in the page group. */
745 set_page_group_in_use (page_group
*group
, char *page
)
747 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
751 clear_page_group_in_use (page_group
*group
, char *page
)
753 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
757 /* Allocate a new page for allocating objects of size 2^ORDER,
758 and return an entry for it. The entry is not added to the
759 appropriate page_table list. */
761 static inline struct page_entry
*
762 alloc_page (unsigned order
)
764 struct page_entry
*entry
, *p
, **pp
;
768 size_t page_entry_size
;
770 #ifdef USING_MALLOC_PAGE_GROUPS
774 num_objects
= OBJECTS_PER_PAGE (order
);
775 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
776 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
777 entry_size
= num_objects
* OBJECT_SIZE (order
);
778 if (entry_size
< G
.pagesize
)
779 entry_size
= G
.pagesize
;
780 entry_size
= PAGE_ALIGN (entry_size
);
785 /* Check the list of free pages for one we can use. */
786 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
787 if (p
->bytes
== entry_size
)
793 G
.bytes_mapped
+= p
->bytes
;
794 p
->discarded
= false;
796 /* Recycle the allocated memory from this page ... */
800 #ifdef USING_MALLOC_PAGE_GROUPS
804 /* ... and, if possible, the page entry itself. */
805 if (p
->order
== order
)
808 memset (entry
, 0, page_entry_size
);
814 else if (entry_size
== G
.pagesize
)
816 /* We want just one page. Allocate a bunch of them and put the
817 extras on the freelist. (Can only do this optimization with
818 mmap for backing store.) */
819 struct page_entry
*e
, *f
= G
.free_pages
;
820 int i
, entries
= GGC_QUIRE_SIZE
;
822 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, false);
825 page
= alloc_anon (NULL
, G
.pagesize
, true);
829 /* This loop counts down so that the chain will be in ascending
831 for (i
= entries
- 1; i
>= 1; i
--)
833 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
835 e
->bytes
= G
.pagesize
;
836 e
->page
= page
+ (i
<< G
.lg_pagesize
);
844 page
= alloc_anon (NULL
, entry_size
, true);
846 #ifdef USING_MALLOC_PAGE_GROUPS
849 /* Allocate a large block of memory and serve out the aligned
850 pages therein. This results in much less memory wastage
851 than the traditional implementation of valloc. */
853 char *allocation
, *a
, *enda
;
854 size_t alloc_size
, head_slop
, tail_slop
;
855 int multiple_pages
= (entry_size
== G
.pagesize
);
858 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
860 alloc_size
= entry_size
+ G
.pagesize
- 1;
861 allocation
= XNEWVEC (char, alloc_size
);
863 page
= (char *) (((uintptr_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
864 head_slop
= page
- allocation
;
866 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
868 tail_slop
= alloc_size
- entry_size
- head_slop
;
869 enda
= allocation
+ alloc_size
- tail_slop
;
871 /* We allocated N pages, which are likely not aligned, leaving
872 us with N-1 usable pages. We plan to place the page_group
873 structure somewhere in the slop. */
874 if (head_slop
>= sizeof (page_group
))
875 group
= (page_group
*)page
- 1;
878 /* We magically got an aligned allocation. Too bad, we have
879 to waste a page anyway. */
883 tail_slop
+= G
.pagesize
;
885 gcc_assert (tail_slop
>= sizeof (page_group
));
886 group
= (page_group
*)enda
;
887 tail_slop
-= sizeof (page_group
);
890 /* Remember that we allocated this memory. */
891 group
->next
= G
.page_groups
;
892 group
->allocation
= allocation
;
893 group
->alloc_size
= alloc_size
;
895 G
.page_groups
= group
;
896 G
.bytes_mapped
+= alloc_size
;
898 /* If we allocated multiple pages, put the rest on the free list. */
901 struct page_entry
*e
, *f
= G
.free_pages
;
902 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
904 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
906 e
->bytes
= G
.pagesize
;
918 entry
= XCNEWVAR (struct page_entry
, page_entry_size
);
920 entry
->bytes
= entry_size
;
922 entry
->context_depth
= G
.context_depth
;
923 entry
->order
= order
;
924 entry
->num_free_objects
= num_objects
;
925 entry
->next_bit_hint
= 1;
927 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
929 #ifdef USING_MALLOC_PAGE_GROUPS
930 entry
->group
= group
;
931 set_page_group_in_use (group
, page
);
934 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
935 increment the hint. */
936 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
937 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
939 set_page_table_entry (page
, entry
);
941 if (GGC_DEBUG_LEVEL
>= 2)
942 fprintf (G
.debug_file
,
943 "Allocating page at %p, object size=%lu, data %p-%p\n",
944 (void *) entry
, (unsigned long) OBJECT_SIZE (order
),
945 (void *) page
, (void *) (page
+ entry_size
- 1));
950 /* Adjust the size of G.depth so that no index greater than the one
951 used by the top of the G.by_depth is used. */
958 if (G
.by_depth_in_use
)
960 top
= G
.by_depth
[G
.by_depth_in_use
-1];
962 /* Peel back indices in depth that index into by_depth, so that
963 as new elements are added to by_depth, we note the indices
964 of those elements, if they are for new context depths. */
965 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
970 /* For a page that is no longer needed, put it on the free page list. */
973 free_page (page_entry
*entry
)
975 if (GGC_DEBUG_LEVEL
>= 2)
976 fprintf (G
.debug_file
,
977 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
978 (void *) entry
->page
, (void *) (entry
->page
+ entry
->bytes
- 1));
980 /* Mark the page as inaccessible. Discard the handle to avoid handle
982 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->page
, entry
->bytes
));
984 set_page_table_entry (entry
->page
, NULL
);
986 #ifdef USING_MALLOC_PAGE_GROUPS
987 clear_page_group_in_use (entry
->group
, entry
->page
);
990 if (G
.by_depth_in_use
> 1)
992 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
993 int i
= entry
->index_by_depth
;
995 /* We cannot free a page from a context deeper than the current
997 gcc_assert (entry
->context_depth
== top
->context_depth
);
999 /* Put top element into freed slot. */
1000 G
.by_depth
[i
] = top
;
1001 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
1002 top
->index_by_depth
= i
;
1004 --G
.by_depth_in_use
;
1008 entry
->next
= G
.free_pages
;
1009 G
.free_pages
= entry
;
1012 /* Release the free page cache to the system. */
1015 release_pages (void)
1019 #ifdef USING_MADVISE
1020 page_entry
*p
, *start_p
;
1024 page_entry
*next
, *prev
, *newprev
;
1025 size_t free_unit
= (GGC_QUIRE_SIZE
/2) * G
.pagesize
;
1027 /* First free larger continuous areas to the OS.
1028 This allows other allocators to grab these areas if needed.
1029 This is only done on larger chunks to avoid fragmentation.
1030 This does not always work because the free_pages list is only
1031 approximately sorted. */
1042 while (p
&& p
->page
== start
+ len
)
1046 mapped_len
+= p
->bytes
;
1050 if (len
>= free_unit
)
1052 while (start_p
!= p
)
1054 next
= start_p
->next
;
1058 munmap (start
, len
);
1063 G
.bytes_mapped
-= mapped_len
;
1070 /* Now give back the fragmented pages to the OS, but keep the address
1071 space to reuse it next time. */
1073 for (p
= G
.free_pages
; p
; )
1084 while (p
&& p
->page
== start
+ len
)
1089 /* Give the page back to the kernel, but don't free the mapping.
1090 This avoids fragmentation in the virtual memory map of the
1091 process. Next time we can reuse it by just touching it. */
1092 madvise (start
, len
, MADV_DONTNEED
);
1093 /* Don't count those pages as mapped to not touch the garbage collector
1095 G
.bytes_mapped
-= len
;
1097 while (start_p
!= p
)
1099 start_p
->discarded
= true;
1100 start_p
= start_p
->next
;
1104 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1105 page_entry
*p
, *next
;
1109 /* Gather up adjacent pages so they are unmapped together. */
1120 while (p
&& p
->page
== start
+ len
)
1128 munmap (start
, len
);
1130 G
.bytes_mapped
-= len
;
1133 G
.free_pages
= NULL
;
1135 #ifdef USING_MALLOC_PAGE_GROUPS
1136 page_entry
**pp
, *p
;
1137 page_group
**gp
, *g
;
1139 /* Remove all pages from free page groups from the list. */
1141 while ((p
= *pp
) != NULL
)
1142 if (p
->group
->in_use
== 0)
1150 /* Remove all free page groups, and release the storage. */
1151 gp
= &G
.page_groups
;
1152 while ((g
= *gp
) != NULL
)
1156 G
.bytes_mapped
-= g
->alloc_size
;
1157 n1
+= g
->alloc_size
;
1158 free (g
->allocation
);
1163 if (!quiet_flag
&& (n1
|| n2
))
1165 fprintf (stderr
, " {GC");
1167 fprintf (stderr
, " released " PRsa (0), SIZE_AMOUNT (n1
));
1169 fprintf (stderr
, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2
));
1170 fprintf (stderr
, "}");
1174 /* This table provides a fast way to determine ceil(log_2(size)) for
1175 allocation requests. The minimum allocation size is eight bytes. */
1176 #define NUM_SIZE_LOOKUP 512
1177 static unsigned char size_lookup
[NUM_SIZE_LOOKUP
] =
1179 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1180 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1181 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1182 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1183 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1184 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1185 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1188 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1189 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1213 /* For a given size of memory requested for allocation, return the
1214 actual size that is going to be allocated, as well as the size
1218 ggc_round_alloc_size_1 (size_t requested_size
,
1220 size_t *alloced_size
)
1222 size_t order
, object_size
;
1224 if (requested_size
< NUM_SIZE_LOOKUP
)
1226 order
= size_lookup
[requested_size
];
1227 object_size
= OBJECT_SIZE (order
);
1232 while (requested_size
> (object_size
= OBJECT_SIZE (order
)))
1237 *size_order
= order
;
1239 *alloced_size
= object_size
;
1242 /* For a given size of memory requested for allocation, return the
1243 actual size that is going to be allocated. */
1246 ggc_round_alloc_size (size_t requested_size
)
1250 ggc_round_alloc_size_1 (requested_size
, NULL
, &size
);
1254 /* Push a finalizer onto the appropriate vec. */
1257 add_finalizer (void *result
, void (*f
)(void *), size_t s
, size_t n
)
1260 /* No finalizer. */;
1263 finalizer
fin (result
, f
);
1264 G
.finalizers
[G
.context_depth
].safe_push (fin
);
1268 vec_finalizer
fin (reinterpret_cast<uintptr_t> (result
), f
, s
, n
);
1269 G
.vec_finalizers
[G
.context_depth
].safe_push (fin
);
1273 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1276 ggc_internal_alloc (size_t size
, void (*f
)(void *), size_t s
, size_t n
1279 size_t order
, word
, bit
, object_offset
, object_size
;
1280 struct page_entry
*entry
;
1283 ggc_round_alloc_size_1 (size
, &order
, &object_size
);
1285 /* If there are non-full pages for this size allocation, they are at
1286 the head of the list. */
1287 entry
= G
.pages
[order
];
1289 /* If there is no page for this object size, or all pages in this
1290 context are full, allocate a new page. */
1291 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1293 struct page_entry
*new_entry
;
1294 new_entry
= alloc_page (order
);
1296 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1297 push_by_depth (new_entry
, 0);
1299 /* We can skip context depths, if we do, make sure we go all the
1300 way to the new depth. */
1301 while (new_entry
->context_depth
>= G
.depth_in_use
)
1302 push_depth (G
.by_depth_in_use
-1);
1304 /* If this is the only entry, it's also the tail. If it is not
1305 the only entry, then we must update the PREV pointer of the
1306 ENTRY (G.pages[order]) to point to our new page entry. */
1308 G
.page_tails
[order
] = new_entry
;
1310 entry
->prev
= new_entry
;
1312 /* Put new pages at the head of the page list. By definition the
1313 entry at the head of the list always has a NULL pointer. */
1314 new_entry
->next
= entry
;
1315 new_entry
->prev
= NULL
;
1317 G
.pages
[order
] = new_entry
;
1319 /* For a new page, we know the word and bit positions (in the
1320 in_use bitmap) of the first available object -- they're zero. */
1321 new_entry
->next_bit_hint
= 1;
1328 /* First try to use the hint left from the previous allocation
1329 to locate a clear bit in the in-use bitmap. We've made sure
1330 that the one-past-the-end bit is always set, so if the hint
1331 has run over, this test will fail. */
1332 unsigned hint
= entry
->next_bit_hint
;
1333 word
= hint
/ HOST_BITS_PER_LONG
;
1334 bit
= hint
% HOST_BITS_PER_LONG
;
1336 /* If the hint didn't work, scan the bitmap from the beginning. */
1337 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1340 while (~entry
->in_use_p
[word
] == 0)
1343 #if GCC_VERSION >= 3004
1344 bit
= __builtin_ctzl (~entry
->in_use_p
[word
]);
1346 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1350 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1353 /* Next time, try the next bit. */
1354 entry
->next_bit_hint
= hint
+ 1;
1356 object_offset
= hint
* object_size
;
1359 /* Set the in-use bit. */
1360 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1362 /* Keep a running total of the number of free objects. If this page
1363 fills up, we may have to move it to the end of the list if the
1364 next page isn't full. If the next page is full, all subsequent
1365 pages are full, so there's no need to move it. */
1366 if (--entry
->num_free_objects
== 0
1367 && entry
->next
!= NULL
1368 && entry
->next
->num_free_objects
> 0)
1370 /* We have a new head for the list. */
1371 G
.pages
[order
] = entry
->next
;
1373 /* We are moving ENTRY to the end of the page table list.
1374 The new page at the head of the list will have NULL in
1375 its PREV field and ENTRY will have NULL in its NEXT field. */
1376 entry
->next
->prev
= NULL
;
1379 /* Append ENTRY to the tail of the list. */
1380 entry
->prev
= G
.page_tails
[order
];
1381 G
.page_tails
[order
]->next
= entry
;
1382 G
.page_tails
[order
] = entry
;
1385 /* Calculate the object's address. */
1386 result
= entry
->page
+ object_offset
;
1387 if (GATHER_STATISTICS
)
1388 ggc_record_overhead (OBJECT_SIZE (order
), OBJECT_SIZE (order
) - size
,
1389 result FINAL_PASS_MEM_STAT
);
1391 #ifdef ENABLE_GC_CHECKING
1392 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1393 exact same semantics in presence of memory bugs, regardless of
1394 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1395 handle to avoid handle leak. */
1396 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, object_size
));
1398 /* `Poison' the entire allocated object, including any padding at
1400 memset (result
, 0xaf, object_size
);
1402 /* Make the bytes after the end of the object unaccessible. Discard the
1403 handle to avoid handle leak. */
1404 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result
+ size
,
1405 object_size
- size
));
1408 /* Tell Valgrind that the memory is there, but its content isn't
1409 defined. The bytes at the end of the object are still marked
1411 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1413 /* Keep track of how many bytes are being allocated. This
1414 information is used in deciding when to collect. */
1415 G
.allocated
+= object_size
;
1417 /* For timevar statistics. */
1418 timevar_ggc_mem_total
+= object_size
;
1421 add_finalizer (result
, f
, s
, n
);
1423 if (GATHER_STATISTICS
)
1425 size_t overhead
= object_size
- size
;
1427 G
.stats
.total_overhead
+= overhead
;
1428 G
.stats
.total_allocated
+= object_size
;
1429 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1430 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1434 G
.stats
.total_overhead_under32
+= overhead
;
1435 G
.stats
.total_allocated_under32
+= object_size
;
1439 G
.stats
.total_overhead_under64
+= overhead
;
1440 G
.stats
.total_allocated_under64
+= object_size
;
1444 G
.stats
.total_overhead_under128
+= overhead
;
1445 G
.stats
.total_allocated_under128
+= object_size
;
1449 if (GGC_DEBUG_LEVEL
>= 3)
1450 fprintf (G
.debug_file
,
1451 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1452 (unsigned long) size
, (unsigned long) object_size
, result
,
1458 /* Mark function for strings. */
1461 gt_ggc_m_S (const void *p
)
1466 unsigned long offset
;
1471 /* Look up the page on which the object is alloced. If it was not
1472 GC allocated, gracefully bail out. */
1473 entry
= safe_lookup_page_table_entry (p
);
1477 /* Calculate the index of the object on the page; this is its bit
1478 position in the in_use_p bitmap. Note that because a char* might
1479 point to the middle of an object, we need special code here to
1480 make sure P points to the start of an object. */
1481 offset
= ((const char *) p
- entry
->page
) % object_size_table
[entry
->order
];
1484 /* Here we've seen a char* which does not point to the beginning
1485 of an allocated object. We assume it points to the middle of
1487 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1488 p
= ((const char *) p
) - offset
;
1489 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p
));
1493 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1494 word
= bit
/ HOST_BITS_PER_LONG
;
1495 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1497 /* If the bit was previously set, skip it. */
1498 if (entry
->in_use_p
[word
] & mask
)
1501 /* Otherwise set it, and decrement the free object count. */
1502 entry
->in_use_p
[word
] |= mask
;
1503 entry
->num_free_objects
-= 1;
1505 if (GGC_DEBUG_LEVEL
>= 4)
1506 fprintf (G
.debug_file
, "Marking %p\n", p
);
1512 /* User-callable entry points for marking string X. */
1515 gt_ggc_mx (const char *& x
)
1521 gt_ggc_mx (char *& x
)
1527 gt_ggc_mx (unsigned char *& x
)
1533 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED
)
1537 /* If P is not marked, marks it and return false. Otherwise return true.
1538 P must have been allocated by the GC allocator; it mustn't point to
1539 static objects, stack variables, or memory allocated with malloc. */
1542 ggc_set_mark (const void *p
)
1548 /* Look up the page on which the object is alloced. If the object
1549 wasn't allocated by the collector, we'll probably die. */
1550 entry
= lookup_page_table_entry (p
);
1553 /* Calculate the index of the object on the page; this is its bit
1554 position in the in_use_p bitmap. */
1555 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1556 word
= bit
/ HOST_BITS_PER_LONG
;
1557 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1559 /* If the bit was previously set, skip it. */
1560 if (entry
->in_use_p
[word
] & mask
)
1563 /* Otherwise set it, and decrement the free object count. */
1564 entry
->in_use_p
[word
] |= mask
;
1565 entry
->num_free_objects
-= 1;
1567 if (GGC_DEBUG_LEVEL
>= 4)
1568 fprintf (G
.debug_file
, "Marking %p\n", p
);
1573 /* Return true if P has been marked, zero otherwise.
1574 P must have been allocated by the GC allocator; it mustn't point to
1575 static objects, stack variables, or memory allocated with malloc. */
1578 ggc_marked_p (const void *p
)
1584 /* Look up the page on which the object is alloced. If the object
1585 wasn't allocated by the collector, we'll probably die. */
1586 entry
= lookup_page_table_entry (p
);
1589 /* Calculate the index of the object on the page; this is its bit
1590 position in the in_use_p bitmap. */
1591 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1592 word
= bit
/ HOST_BITS_PER_LONG
;
1593 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1595 return (entry
->in_use_p
[word
] & mask
) != 0;
1598 /* Return the size of the gc-able object P. */
1601 ggc_get_size (const void *p
)
1603 page_entry
*pe
= lookup_page_table_entry (p
);
1604 return OBJECT_SIZE (pe
->order
);
1607 /* Release the memory for object P. */
1615 page_entry
*pe
= lookup_page_table_entry (p
);
1616 size_t order
= pe
->order
;
1617 size_t size
= OBJECT_SIZE (order
);
1619 if (GATHER_STATISTICS
)
1620 ggc_free_overhead (p
);
1622 if (GGC_DEBUG_LEVEL
>= 3)
1623 fprintf (G
.debug_file
,
1624 "Freeing object, actual size=%lu, at %p on %p\n",
1625 (unsigned long) size
, p
, (void *) pe
);
1627 #ifdef ENABLE_GC_CHECKING
1628 /* Poison the data, to indicate the data is garbage. */
1629 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p
, size
));
1630 memset (p
, 0xa5, size
);
1632 /* Let valgrind know the object is free. */
1633 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p
, size
));
1635 #ifdef ENABLE_GC_ALWAYS_COLLECT
1636 /* In the completely-anal-checking mode, we do *not* immediately free
1637 the data, but instead verify that the data is *actually* not
1638 reachable the next time we collect. */
1640 struct free_object
*fo
= XNEW (struct free_object
);
1642 fo
->next
= G
.free_object_list
;
1643 G
.free_object_list
= fo
;
1647 unsigned int bit_offset
, word
, bit
;
1649 G
.allocated
-= size
;
1651 /* Mark the object not-in-use. */
1652 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1653 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1654 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1655 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1657 if (pe
->num_free_objects
++ == 0)
1661 /* If the page is completely full, then it's supposed to
1662 be after all pages that aren't. Since we've freed one
1663 object from a page that was full, we need to move the
1664 page to the head of the list.
1666 PE is the node we want to move. Q is the previous node
1667 and P is the next node in the list. */
1669 if (q
&& q
->num_free_objects
== 0)
1675 /* If PE was at the end of the list, then Q becomes the
1676 new end of the list. If PE was not the end of the
1677 list, then we need to update the PREV field for P. */
1679 G
.page_tails
[order
] = q
;
1683 /* Move PE to the head of the list. */
1684 pe
->next
= G
.pages
[order
];
1686 G
.pages
[order
]->prev
= pe
;
1687 G
.pages
[order
] = pe
;
1690 /* Reset the hint bit to point to the only free object. */
1691 pe
->next_bit_hint
= bit_offset
;
1697 /* Subroutine of init_ggc which computes the pair of numbers used to
1698 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1700 This algorithm is taken from Granlund and Montgomery's paper
1701 "Division by Invariant Integers using Multiplication"
1702 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1706 compute_inverse (unsigned order
)
1711 size
= OBJECT_SIZE (order
);
1713 while (size
% 2 == 0)
1720 while (inv
* size
!= 1)
1721 inv
= inv
* (2 - inv
*size
);
1723 DIV_MULT (order
) = inv
;
1724 DIV_SHIFT (order
) = e
;
1727 /* Initialize the ggc-mmap allocator. */
1731 static bool init_p
= false;
1738 G
.pagesize
= getpagesize ();
1739 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1741 #ifdef HAVE_MMAP_DEV_ZERO
1742 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1743 if (G
.dev_zero_fd
== -1)
1744 internal_error ("open /dev/zero: %m");
1748 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1750 G
.debug_file
= stdout
;
1754 /* StunOS has an amazing off-by-one error for the first mmap allocation
1755 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1756 believe, is an unaligned page allocation, which would cause us to
1757 hork badly if we tried to use it. */
1759 char *p
= alloc_anon (NULL
, G
.pagesize
, true);
1760 struct page_entry
*e
;
1761 if ((uintptr_t)p
& (G
.pagesize
- 1))
1763 /* How losing. Discard this one and try another. If we still
1764 can't get something useful, give up. */
1766 p
= alloc_anon (NULL
, G
.pagesize
, true);
1767 gcc_assert (!((uintptr_t)p
& (G
.pagesize
- 1)));
1770 /* We have a good page, might as well hold onto it... */
1771 e
= XCNEW (struct page_entry
);
1772 e
->bytes
= G
.pagesize
;
1774 e
->next
= G
.free_pages
;
1779 /* Initialize the object size table. */
1780 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1781 object_size_table
[order
] = (size_t) 1 << order
;
1782 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1784 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1786 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1787 so that we're sure of getting aligned memory. */
1788 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1789 object_size_table
[order
] = s
;
1792 /* Initialize the objects-per-page and inverse tables. */
1793 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1795 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1796 if (objects_per_page_table
[order
] == 0)
1797 objects_per_page_table
[order
] = 1;
1798 compute_inverse (order
);
1801 /* Reset the size_lookup array to put appropriately sized objects in
1802 the special orders. All objects bigger than the previous power
1803 of two, but no greater than the special size, should go in the
1805 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1810 i
= OBJECT_SIZE (order
);
1811 if (i
>= NUM_SIZE_LOOKUP
)
1814 for (o
= size_lookup
[i
]; o
== size_lookup
[i
]; --i
)
1815 size_lookup
[i
] = order
;
1820 G
.depth
= XNEWVEC (unsigned int, G
.depth_max
);
1822 G
.by_depth_in_use
= 0;
1823 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1824 G
.by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
1825 G
.save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
1827 /* Allocate space for the depth 0 finalizers. */
1828 G
.finalizers
.safe_push (vNULL
);
1829 G
.vec_finalizers
.safe_push (vNULL
);
1830 gcc_assert (G
.finalizers
.length() == 1);
1833 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1834 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1837 ggc_recalculate_in_use_p (page_entry
*p
)
1842 /* Because the past-the-end bit in in_use_p is always set, we
1843 pretend there is one additional object. */
1844 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1846 /* Reset the free object count. */
1847 p
->num_free_objects
= num_objects
;
1849 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1851 i
< CEIL (BITMAP_SIZE (num_objects
),
1852 sizeof (*p
->in_use_p
));
1857 /* Something is in use if it is marked, or if it was in use in a
1858 context further down the context stack. */
1859 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1861 /* Decrement the free object count for every object allocated. */
1862 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1863 p
->num_free_objects
-= (j
& 1);
1866 gcc_assert (p
->num_free_objects
< num_objects
);
1869 /* Unmark all objects. */
1876 for (order
= 2; order
< NUM_ORDERS
; order
++)
1880 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1882 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1883 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1885 /* The data should be page-aligned. */
1886 gcc_assert (!((uintptr_t) p
->page
& (G
.pagesize
- 1)));
1888 /* Pages that aren't in the topmost context are not collected;
1889 nevertheless, we need their in-use bit vectors to store GC
1890 marks. So, back them up first. */
1891 if (p
->context_depth
< G
.context_depth
)
1893 if (! save_in_use_p (p
))
1894 save_in_use_p (p
) = XNEWVAR (unsigned long, bitmap_size
);
1895 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1898 /* Reset reset the number of free objects and clear the
1899 in-use bits. These will be adjusted by mark_obj. */
1900 p
->num_free_objects
= num_objects
;
1901 memset (p
->in_use_p
, 0, bitmap_size
);
1903 /* Make sure the one-past-the-end bit is always set. */
1904 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1905 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1910 /* Check if any blocks with a registered finalizer have become unmarked. If so
1911 run the finalizer and unregister it because the block is about to be freed.
1912 Note that no garantee is made about what order finalizers will run in so
1913 touching other objects in gc memory is extremely unwise. */
1916 ggc_handle_finalizers ()
1918 unsigned dlen
= G
.finalizers
.length();
1919 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1921 vec
<finalizer
> &v
= G
.finalizers
[d
];
1922 unsigned length
= v
.length ();
1923 for (unsigned int i
= 0; i
< length
;)
1925 finalizer
&f
= v
[i
];
1926 if (!ggc_marked_p (f
.addr ()))
1929 v
.unordered_remove (i
);
1937 gcc_assert (dlen
== G
.vec_finalizers
.length());
1938 for (unsigned d
= G
.context_depth
; d
< dlen
; ++d
)
1940 vec
<vec_finalizer
> &vv
= G
.vec_finalizers
[d
];
1941 unsigned length
= vv
.length ();
1942 for (unsigned int i
= 0; i
< length
;)
1944 vec_finalizer
&f
= vv
[i
];
1945 if (!ggc_marked_p (f
.addr ()))
1948 vv
.unordered_remove (i
);
1957 /* Free all empty pages. Partially empty pages need no attention
1958 because the `mark' bit doubles as an `unused' bit. */
1965 for (order
= 2; order
< NUM_ORDERS
; order
++)
1967 /* The last page-entry to consider, regardless of entries
1968 placed at the end of the list. */
1969 page_entry
* const last
= G
.page_tails
[order
];
1972 size_t live_objects
;
1973 page_entry
*p
, *previous
;
1983 page_entry
*next
= p
->next
;
1985 /* Loop until all entries have been examined. */
1988 num_objects
= OBJECTS_IN_PAGE (p
);
1990 /* Add all live objects on this page to the count of
1991 allocated memory. */
1992 live_objects
= num_objects
- p
->num_free_objects
;
1994 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1996 /* Only objects on pages in the topmost context should get
1998 if (p
->context_depth
< G
.context_depth
)
2001 /* Remove the page if it's empty. */
2002 else if (live_objects
== 0)
2004 /* If P was the first page in the list, then NEXT
2005 becomes the new first page in the list, otherwise
2006 splice P out of the forward pointers. */
2008 G
.pages
[order
] = next
;
2010 previous
->next
= next
;
2012 /* Splice P out of the back pointers too. */
2014 next
->prev
= previous
;
2016 /* Are we removing the last element? */
2017 if (p
== G
.page_tails
[order
])
2018 G
.page_tails
[order
] = previous
;
2023 /* If the page is full, move it to the end. */
2024 else if (p
->num_free_objects
== 0)
2026 /* Don't move it if it's already at the end. */
2027 if (p
!= G
.page_tails
[order
])
2029 /* Move p to the end of the list. */
2031 p
->prev
= G
.page_tails
[order
];
2032 G
.page_tails
[order
]->next
= p
;
2034 /* Update the tail pointer... */
2035 G
.page_tails
[order
] = p
;
2037 /* ... and the head pointer, if necessary. */
2039 G
.pages
[order
] = next
;
2041 previous
->next
= next
;
2043 /* And update the backpointer in NEXT if necessary. */
2045 next
->prev
= previous
;
2051 /* If we've fallen through to here, it's a page in the
2052 topmost context that is neither full nor empty. Such a
2053 page must precede pages at lesser context depth in the
2054 list, so move it to the head. */
2055 else if (p
!= G
.pages
[order
])
2057 previous
->next
= p
->next
;
2059 /* Update the backchain in the next node if it exists. */
2061 p
->next
->prev
= previous
;
2063 /* Move P to the head of the list. */
2064 p
->next
= G
.pages
[order
];
2066 G
.pages
[order
]->prev
= p
;
2068 /* Update the head pointer. */
2071 /* Are we moving the last element? */
2072 if (G
.page_tails
[order
] == p
)
2073 G
.page_tails
[order
] = previous
;
2082 /* Now, restore the in_use_p vectors for any pages from contexts
2083 other than the current one. */
2084 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
2085 if (p
->context_depth
!= G
.context_depth
)
2086 ggc_recalculate_in_use_p (p
);
2090 #ifdef ENABLE_GC_CHECKING
2091 /* Clobber all free objects. */
2098 for (order
= 2; order
< NUM_ORDERS
; order
++)
2100 size_t size
= OBJECT_SIZE (order
);
2103 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
2108 if (p
->context_depth
!= G
.context_depth
)
2109 /* Since we don't do any collection for pages in pushed
2110 contexts, there's no need to do any poisoning. And
2111 besides, the IN_USE_P array isn't valid until we pop
2115 num_objects
= OBJECTS_IN_PAGE (p
);
2116 for (i
= 0; i
< num_objects
; i
++)
2119 word
= i
/ HOST_BITS_PER_LONG
;
2120 bit
= i
% HOST_BITS_PER_LONG
;
2121 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
2123 char *object
= p
->page
+ i
* size
;
2125 /* Keep poison-by-write when we expect to use Valgrind,
2126 so the exact same memory semantics is kept, in case
2127 there are memory errors. We override this request
2129 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object
,
2131 memset (object
, 0xa5, size
);
2133 /* Drop the handle to avoid handle leak. */
2134 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
2141 #define poison_pages()
2144 #ifdef ENABLE_GC_ALWAYS_COLLECT
2145 /* Validate that the reportedly free objects actually are. */
2148 validate_free_objects (void)
2150 struct free_object
*f
, *next
, *still_free
= NULL
;
2152 for (f
= G
.free_object_list
; f
; f
= next
)
2154 page_entry
*pe
= lookup_page_table_entry (f
->object
);
2157 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
2158 word
= bit
/ HOST_BITS_PER_LONG
;
2159 bit
= bit
% HOST_BITS_PER_LONG
;
2162 /* Make certain it isn't visible from any root. Notice that we
2163 do this check before sweep_pages merges save_in_use_p. */
2164 gcc_assert (!(pe
->in_use_p
[word
] & (1UL << bit
)));
2166 /* If the object comes from an outer context, then retain the
2167 free_object entry, so that we can verify that the address
2168 isn't live on the stack in some outer context. */
2169 if (pe
->context_depth
!= G
.context_depth
)
2171 f
->next
= still_free
;
2178 G
.free_object_list
= still_free
;
2181 #define validate_free_objects()
2184 /* Top level mark-and-sweep routine. */
2187 ggc_collect (enum ggc_collect mode
)
2189 /* Avoid frequent unnecessary work by skipping collection if the
2190 total allocations haven't expanded much since the last
2192 float allocated_last_gc
=
2193 MAX (G
.allocated_last_gc
, (size_t)param_ggc_min_heapsize
* ONE_K
);
2195 /* It is also good time to get memory block pool into limits. */
2196 memory_block_pool::trim ();
2198 float min_expand
= allocated_last_gc
* param_ggc_min_expand
/ 100;
2199 if (mode
== GGC_COLLECT_HEURISTIC
2200 && G
.allocated
< allocated_last_gc
+ min_expand
)
2203 timevar_push (TV_GC
);
2204 if (GGC_DEBUG_LEVEL
>= 2)
2205 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
2207 /* Zero the total allocated bytes. This will be recalculated in the
2209 size_t allocated
= G
.allocated
;
2212 /* Release the pages we freed the last time we collected, but didn't
2213 reuse in the interim. */
2216 /* Output this later so we do not interfere with release_pages. */
2218 fprintf (stderr
, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated
));
2220 /* Indicate that we've seen collections at this context depth. */
2221 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
2223 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2228 ggc_handle_finalizers ();
2230 if (GATHER_STATISTICS
)
2231 ggc_prune_overhead_list ();
2234 validate_free_objects ();
2238 G
.allocated_last_gc
= G
.allocated
;
2240 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2242 timevar_pop (TV_GC
);
2245 fprintf (stderr
, PRsa (0) "}", SIZE_AMOUNT (G
.allocated
));
2246 if (GGC_DEBUG_LEVEL
>= 2)
2247 fprintf (G
.debug_file
, "END COLLECTING\n");
2250 /* Return free pages to the system. */
2255 timevar_push (TV_GC
);
2260 fprintf (stderr
, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
2261 SIZE_AMOUNT (G
.allocated
), SIZE_AMOUNT (G
.bytes_mapped
));
2262 timevar_pop (TV_GC
);
2265 /* Assume that all GGC memory is reachable and grow the limits for next
2266 collection. With checking, trigger GGC so -Q compilation outputs how much
2267 of memory really is reachable. */
2273 G
.allocated_last_gc
= MAX (G
.allocated_last_gc
,
2278 fprintf (stderr
, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G
.allocated
));
2282 ggc_print_statistics (void)
2284 struct ggc_statistics stats
;
2286 size_t total_overhead
= 0;
2288 /* Clear the statistics. */
2289 memset (&stats
, 0, sizeof (stats
));
2291 /* Make sure collection will really occur. */
2292 G
.allocated_last_gc
= 0;
2294 /* Collect and print the statistics common across collectors. */
2295 ggc_print_common_statistics (stderr
, &stats
);
2297 /* Release free pages so that we will not count the bytes allocated
2298 there as part of the total allocated memory. */
2301 /* Collect some information about the various sizes of
2304 "Memory still allocated at the end of the compilation process\n");
2305 fprintf (stderr
, "%-8s %10s %10s %10s\n",
2306 "Size", "Allocated", "Used", "Overhead");
2307 for (i
= 0; i
< NUM_ORDERS
; ++i
)
2314 /* Skip empty entries. */
2318 overhead
= allocated
= in_use
= 0;
2320 /* Figure out the total number of bytes allocated for objects of
2321 this size, and how many of them are actually in use. Also figure
2322 out how much memory the page table is using. */
2323 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
2325 allocated
+= p
->bytes
;
2327 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
2329 overhead
+= (sizeof (page_entry
) - sizeof (long)
2330 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
2332 fprintf (stderr
, "%-8" PRIu64
" " PRsa (10) " " PRsa (10) " "
2334 (uint64_t)OBJECT_SIZE (i
),
2335 SIZE_AMOUNT (allocated
),
2336 SIZE_AMOUNT (in_use
),
2337 SIZE_AMOUNT (overhead
));
2338 total_overhead
+= overhead
;
2340 fprintf (stderr
, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2342 SIZE_AMOUNT (G
.bytes_mapped
),
2343 SIZE_AMOUNT (G
.allocated
),
2344 SIZE_AMOUNT (total_overhead
));
2346 if (GATHER_STATISTICS
)
2348 fprintf (stderr
, "\nTotal allocations and overheads during "
2349 "the compilation process\n");
2351 fprintf (stderr
, "Total Overhead: "
2353 SIZE_AMOUNT (G
.stats
.total_overhead
));
2354 fprintf (stderr
, "Total Allocated: "
2356 SIZE_AMOUNT (G
.stats
.total_allocated
));
2358 fprintf (stderr
, "Total Overhead under 32B: "
2360 SIZE_AMOUNT (G
.stats
.total_overhead_under32
));
2361 fprintf (stderr
, "Total Allocated under 32B: "
2363 SIZE_AMOUNT (G
.stats
.total_allocated_under32
));
2364 fprintf (stderr
, "Total Overhead under 64B: "
2366 SIZE_AMOUNT (G
.stats
.total_overhead_under64
));
2367 fprintf (stderr
, "Total Allocated under 64B: "
2369 SIZE_AMOUNT (G
.stats
.total_allocated_under64
));
2370 fprintf (stderr
, "Total Overhead under 128B: "
2372 SIZE_AMOUNT (G
.stats
.total_overhead_under128
));
2373 fprintf (stderr
, "Total Allocated under 128B: "
2375 SIZE_AMOUNT (G
.stats
.total_allocated_under128
));
2377 for (i
= 0; i
< NUM_ORDERS
; i
++)
2378 if (G
.stats
.total_allocated_per_order
[i
])
2380 fprintf (stderr
, "Total Overhead page size %9" PRIu64
": "
2382 (uint64_t)OBJECT_SIZE (i
),
2383 SIZE_AMOUNT (G
.stats
.total_overhead_per_order
[i
]));
2384 fprintf (stderr
, "Total Allocated page size %9" PRIu64
": "
2386 (uint64_t)OBJECT_SIZE (i
),
2387 SIZE_AMOUNT (G
.stats
.total_allocated_per_order
[i
]));
2392 struct ggc_pch_ondisk
2394 unsigned totals
[NUM_ORDERS
];
2399 struct ggc_pch_ondisk d
;
2400 uintptr_t base
[NUM_ORDERS
];
2401 size_t written
[NUM_ORDERS
];
2404 struct ggc_pch_data
*
2407 return XCNEW (struct ggc_pch_data
);
2411 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2416 if (size
< NUM_SIZE_LOOKUP
)
2417 order
= size_lookup
[size
];
2421 while (size
> OBJECT_SIZE (order
))
2425 d
->d
.totals
[order
]++;
2429 ggc_pch_total_size (struct ggc_pch_data
*d
)
2434 for (i
= 0; i
< NUM_ORDERS
; i
++)
2435 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2440 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2442 uintptr_t a
= (uintptr_t) base
;
2445 for (i
= 0; i
< NUM_ORDERS
; i
++)
2448 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2454 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2460 if (size
< NUM_SIZE_LOOKUP
)
2461 order
= size_lookup
[size
];
2465 while (size
> OBJECT_SIZE (order
))
2469 result
= (char *) d
->base
[order
];
2470 d
->base
[order
] += OBJECT_SIZE (order
);
2475 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2476 FILE *f ATTRIBUTE_UNUSED
)
2478 /* Nothing to do. */
2482 ggc_pch_write_object (struct ggc_pch_data
*d
,
2483 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2487 static const char emptyBytes
[256] = { 0 };
2489 if (size
< NUM_SIZE_LOOKUP
)
2490 order
= size_lookup
[size
];
2494 while (size
> OBJECT_SIZE (order
))
2498 if (fwrite (x
, size
, 1, f
) != 1)
2499 fatal_error (input_location
, "cannot write PCH file: %m");
2501 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2502 object out to OBJECT_SIZE(order). This happens for strings. */
2504 if (size
!= OBJECT_SIZE (order
))
2506 unsigned padding
= OBJECT_SIZE (order
) - size
;
2508 /* To speed small writes, we use a nulled-out array that's larger
2509 than most padding requests as the source for our null bytes. This
2510 permits us to do the padding with fwrite() rather than fseek(), and
2511 limits the chance the OS may try to flush any outstanding writes. */
2512 if (padding
<= sizeof (emptyBytes
))
2514 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2515 fatal_error (input_location
, "cannot write PCH file");
2519 /* Larger than our buffer? Just default to fseek. */
2520 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2521 fatal_error (input_location
, "cannot write PCH file");
2525 d
->written
[order
]++;
2526 if (d
->written
[order
] == d
->d
.totals
[order
]
2527 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2530 fatal_error (input_location
, "cannot write PCH file: %m");
2534 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2536 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2537 fatal_error (input_location
, "cannot write PCH file: %m");
2541 /* Move the PCH PTE entries just added to the end of by_depth, to the
2545 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2547 /* First, we swap the new entries to the front of the varrays. */
2548 page_entry
**new_by_depth
;
2549 unsigned long **new_save_in_use
;
2551 new_by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
2552 new_save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
2554 memcpy (&new_by_depth
[0],
2555 &G
.by_depth
[count_old_page_tables
],
2556 count_new_page_tables
* sizeof (void *));
2557 memcpy (&new_by_depth
[count_new_page_tables
],
2559 count_old_page_tables
* sizeof (void *));
2560 memcpy (&new_save_in_use
[0],
2561 &G
.save_in_use
[count_old_page_tables
],
2562 count_new_page_tables
* sizeof (void *));
2563 memcpy (&new_save_in_use
[count_new_page_tables
],
2565 count_old_page_tables
* sizeof (void *));
2568 free (G
.save_in_use
);
2570 G
.by_depth
= new_by_depth
;
2571 G
.save_in_use
= new_save_in_use
;
2573 /* Now update all the index_by_depth fields. */
2574 for (unsigned i
= G
.by_depth_in_use
; i
--;)
2576 page_entry
*p
= G
.by_depth
[i
];
2577 p
->index_by_depth
= i
;
2580 /* And last, we update the depth pointers in G.depth. The first
2581 entry is already 0, and context 0 entries always start at index
2582 0, so there is nothing to update in the first slot. We need a
2583 second slot, only if we have old ptes, and if we do, they start
2584 at index count_new_page_tables. */
2585 if (count_old_page_tables
)
2586 push_depth (count_new_page_tables
);
2590 ggc_pch_read (FILE *f
, void *addr
)
2592 struct ggc_pch_ondisk d
;
2594 char *offs
= (char *) addr
;
2595 unsigned long count_old_page_tables
;
2596 unsigned long count_new_page_tables
;
2598 count_old_page_tables
= G
.by_depth_in_use
;
2600 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2601 fatal_error (input_location
, "cannot read PCH file: %m");
2603 /* We've just read in a PCH file. So, every object that used to be
2604 allocated is now free. */
2606 #ifdef ENABLE_GC_CHECKING
2609 /* Since we free all the allocated objects, the free list becomes
2610 useless. Validate it now, which will also clear it. */
2611 validate_free_objects ();
2613 /* No object read from a PCH file should ever be freed. So, set the
2614 context depth to 1, and set the depth of all the currently-allocated
2615 pages to be 1 too. PCH pages will have depth 0. */
2616 gcc_assert (!G
.context_depth
);
2617 G
.context_depth
= 1;
2618 /* Allocate space for the depth 1 finalizers. */
2619 G
.finalizers
.safe_push (vNULL
);
2620 G
.vec_finalizers
.safe_push (vNULL
);
2621 gcc_assert (G
.finalizers
.length() == 2);
2622 for (i
= 0; i
< NUM_ORDERS
; i
++)
2625 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2626 p
->context_depth
= G
.context_depth
;
2629 /* Allocate the appropriate page-table entries for the pages read from
2632 for (i
= 0; i
< NUM_ORDERS
; i
++)
2634 struct page_entry
*entry
;
2640 if (d
.totals
[i
] == 0)
2643 bytes
= PAGE_ALIGN (d
.totals
[i
] * OBJECT_SIZE (i
));
2644 num_objs
= bytes
/ OBJECT_SIZE (i
);
2645 entry
= XCNEWVAR (struct page_entry
, (sizeof (struct page_entry
)
2647 + BITMAP_SIZE (num_objs
+ 1)));
2648 entry
->bytes
= bytes
;
2650 entry
->context_depth
= 0;
2652 entry
->num_free_objects
= 0;
2656 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2657 j
+= HOST_BITS_PER_LONG
)
2658 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2659 for (; j
< num_objs
+ 1; j
++)
2660 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2661 |= 1L << (j
% HOST_BITS_PER_LONG
);
2663 for (pte
= entry
->page
;
2664 pte
< entry
->page
+ entry
->bytes
;
2666 set_page_table_entry (pte
, entry
);
2668 if (G
.page_tails
[i
] != NULL
)
2669 G
.page_tails
[i
]->next
= entry
;
2672 G
.page_tails
[i
] = entry
;
2674 /* We start off by just adding all the new information to the
2675 end of the varrays, later, we will move the new information
2676 to the front of the varrays, as the PCH page tables are at
2678 push_by_depth (entry
, 0);
2681 /* Now, we update the various data structures that speed page table
2683 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2685 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2687 /* Update the statistics. */
2688 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;