1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
29 #include "diagnostic-core.h"
31 #include "ggc-internal.h"
34 #include "plugin-api.h"
35 #include "hard-reg-set.h"
42 #include "basic-block.h"
44 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
45 file open. Prefer either to valloc. */
47 # undef HAVE_MMAP_DEV_ZERO
51 #ifdef HAVE_MMAP_DEV_ZERO
56 #define USING_MALLOC_PAGE_GROUPS
59 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
60 && defined(USING_MMAP)
61 # define USING_MADVISE
66 This garbage-collecting allocator allocates objects on one of a set
67 of pages. Each page can allocate objects of a single size only;
68 available sizes are powers of two starting at four bytes. The size
69 of an allocation request is rounded up to the next power of two
70 (`order'), and satisfied from the appropriate page.
72 Each page is recorded in a page-entry, which also maintains an
73 in-use bitmap of object positions on the page. This allows the
74 allocation state of a particular object to be flipped without
75 touching the page itself.
77 Each page-entry also has a context depth, which is used to track
78 pushing and popping of allocation contexts. Only objects allocated
79 in the current (highest-numbered) context may be collected.
81 Page entries are arranged in an array of singly-linked lists. The
82 array is indexed by the allocation size, in bits, of the pages on
83 it; i.e. all pages on a list allocate objects of the same size.
84 Pages are ordered on the list such that all non-full pages precede
85 all full pages, with non-full pages arranged in order of decreasing
88 Empty pages (of all orders) are kept on a single page cache list,
89 and are considered first when new pages are required; they are
90 deallocated at the start of the next collection if they haven't
91 been recycled by then. */
93 /* Define GGC_DEBUG_LEVEL to print debugging information.
94 0: No debugging output.
95 1: GC statistics only.
96 2: Page-entry allocations/deallocations as well.
97 3: Object allocations as well.
98 4: Object marks as well. */
99 #define GGC_DEBUG_LEVEL (0)
101 #ifndef HOST_BITS_PER_PTR
102 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
106 /* A two-level tree is used to look up the page-entry for a given
107 pointer. Two chunks of the pointer's bits are extracted to index
108 the first and second levels of the tree, as follows:
112 msb +----------------+----+------+------+ lsb
118 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
119 pages are aligned on system page boundaries. The next most
120 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
121 index values in the lookup table, respectively.
123 For 32-bit architectures and the settings below, there are no
124 leftover bits. For architectures with wider pointers, the lookup
125 tree points to a list of pages, which must be scanned to find the
128 #define PAGE_L1_BITS (8)
129 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
130 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
131 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
133 #define LOOKUP_L1(p) \
134 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
136 #define LOOKUP_L2(p) \
137 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
139 /* The number of objects per allocation page, for objects on a page of
140 the indicated ORDER. */
141 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
143 /* The number of objects in P. */
144 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
146 /* The size of an object on a page of the indicated ORDER. */
147 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
149 /* For speed, we avoid doing a general integer divide to locate the
150 offset in the allocation bitmap, by precalculating numbers M, S
151 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
152 within the page which is evenly divisible by the object size Z. */
153 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
154 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
155 #define OFFSET_TO_BIT(OFFSET, ORDER) \
156 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
158 /* We use this structure to determine the alignment required for
159 allocations. For power-of-two sized allocations, that's not a
160 problem, but it does matter for odd-sized allocations.
161 We do not care about alignment for floating-point types. */
163 struct max_alignment
{
171 /* The biggest alignment required. */
173 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
176 /* The number of extra orders, not corresponding to power-of-two sized
179 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
181 #define RTL_SIZE(NSLOTS) \
182 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
184 #define TREE_EXP_SIZE(OPS) \
185 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
187 /* The Ith entry is the maximum size of an object to be stored in the
188 Ith extra order. Adding a new entry to this array is the *only*
189 thing you need to do to add a new special allocation size. */
191 static const size_t extra_order_size_table
[] = {
192 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
193 There are a lot of structures with these sizes and explicitly
194 listing them risks orders being dropped because they changed size. */
206 sizeof (struct tree_decl_non_common
),
207 sizeof (struct tree_field_decl
),
208 sizeof (struct tree_parm_decl
),
209 sizeof (struct tree_var_decl
),
210 sizeof (struct tree_type_non_common
),
211 sizeof (struct function
),
212 sizeof (struct basic_block_def
),
213 sizeof (struct cgraph_node
),
214 sizeof (struct loop
),
217 /* The total number of orders. */
219 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
221 /* Compute the smallest nonnegative number which when added to X gives
224 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
226 /* Compute the smallest multiple of F that is >= X. */
228 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
230 /* Round X to next multiple of the page size */
232 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
234 /* The Ith entry is the number of objects on a page or order I. */
236 static unsigned objects_per_page_table
[NUM_ORDERS
];
238 /* The Ith entry is the size of an object on a page of order I. */
240 static size_t object_size_table
[NUM_ORDERS
];
242 /* The Ith entry is a pair of numbers (mult, shift) such that
243 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
244 for all k evenly divisible by OBJECT_SIZE(I). */
251 inverse_table
[NUM_ORDERS
];
253 /* A page_entry records the status of an allocation page. This
254 structure is dynamically sized to fit the bitmap in_use_p. */
255 typedef struct page_entry
257 /* The next page-entry with objects of the same size, or NULL if
258 this is the last page-entry. */
259 struct page_entry
*next
;
261 /* The previous page-entry with objects of the same size, or NULL if
262 this is the first page-entry. The PREV pointer exists solely to
263 keep the cost of ggc_free manageable. */
264 struct page_entry
*prev
;
266 /* The number of bytes allocated. (This will always be a multiple
267 of the host system page size.) */
270 /* The address at which the memory is allocated. */
273 #ifdef USING_MALLOC_PAGE_GROUPS
274 /* Back pointer to the page group this page came from. */
275 struct page_group
*group
;
278 /* This is the index in the by_depth varray where this page table
280 unsigned long index_by_depth
;
282 /* Context depth of this page. */
283 unsigned short context_depth
;
285 /* The number of free objects remaining on this page. */
286 unsigned short num_free_objects
;
288 /* A likely candidate for the bit position of a free object for the
289 next allocation from this page. */
290 unsigned short next_bit_hint
;
292 /* The lg of size of objects allocated from this page. */
295 /* Discarded page? */
298 /* A bit vector indicating whether or not objects are in use. The
299 Nth bit is one if the Nth object on this page is allocated. This
300 array is dynamically sized. */
301 unsigned long in_use_p
[1];
304 #ifdef USING_MALLOC_PAGE_GROUPS
305 /* A page_group describes a large allocation from malloc, from which
306 we parcel out aligned pages. */
307 typedef struct page_group
309 /* A linked list of all extant page groups. */
310 struct page_group
*next
;
312 /* The address we received from malloc. */
315 /* The size of the block. */
318 /* A bitmask of pages in use. */
323 #if HOST_BITS_PER_PTR <= 32
325 /* On 32-bit hosts, we use a two level page table, as pictured above. */
326 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
330 /* On 64-bit hosts, we use the same two level page tables plus a linked
331 list that disambiguates the top 32-bits. There will almost always be
332 exactly one entry in the list. */
333 typedef struct page_table_chain
335 struct page_table_chain
*next
;
337 page_entry
**table
[PAGE_L1_SIZE
];
345 finalizer (void *addr
, void (*f
)(void *)) : m_addr (addr
), m_function (f
) {}
347 void *addr () const { return m_addr
; }
349 void call () const { m_function (m_addr
); }
353 void (*m_function
)(void *);
359 vec_finalizer (uintptr_t addr
, void (*f
)(void *), size_t s
, size_t n
) :
360 m_addr (addr
), m_function (f
), m_object_size (s
), m_n_objects (n
) {}
364 for (size_t i
= 0; i
< m_n_objects
; i
++)
365 m_function (reinterpret_cast<void *> (m_addr
+ (i
* m_object_size
)));
368 void *addr () const { return reinterpret_cast<void *> (m_addr
); }
372 void (*m_function
)(void *);
373 size_t m_object_size
;
377 #ifdef ENABLE_GC_ALWAYS_COLLECT
378 /* List of free objects to be verified as actually free on the
383 struct free_object
*next
;
387 /* The rest of the global variables. */
388 static struct ggc_globals
390 /* The Nth element in this array is a page with objects of size 2^N.
391 If there are any pages with free objects, they will be at the
392 head of the list. NULL if there are no page-entries for this
394 page_entry
*pages
[NUM_ORDERS
];
396 /* The Nth element in this array is the last page with objects of
397 size 2^N. NULL if there are no page-entries for this object
399 page_entry
*page_tails
[NUM_ORDERS
];
401 /* Lookup table for associating allocation pages with object addresses. */
404 /* The system's page size. */
408 /* Bytes currently allocated. */
411 /* Bytes currently allocated at the end of the last collection. */
412 size_t allocated_last_gc
;
414 /* Total amount of memory mapped. */
417 /* Bit N set if any allocations have been done at context depth N. */
418 unsigned long context_depth_allocations
;
420 /* Bit N set if any collections have been done at context depth N. */
421 unsigned long context_depth_collections
;
423 /* The current depth in the context stack. */
424 unsigned short context_depth
;
426 /* A file descriptor open to /dev/zero for reading. */
427 #if defined (HAVE_MMAP_DEV_ZERO)
431 /* A cache of free system pages. */
432 page_entry
*free_pages
;
434 #ifdef USING_MALLOC_PAGE_GROUPS
435 page_group
*page_groups
;
438 /* The file descriptor for debugging output. */
441 /* Current number of elements in use in depth below. */
442 unsigned int depth_in_use
;
444 /* Maximum number of elements that can be used before resizing. */
445 unsigned int depth_max
;
447 /* Each element of this array is an index in by_depth where the given
448 depth starts. This structure is indexed by that given depth we
449 are interested in. */
452 /* Current number of elements in use in by_depth below. */
453 unsigned int by_depth_in_use
;
455 /* Maximum number of elements that can be used before resizing. */
456 unsigned int by_depth_max
;
458 /* Each element of this array is a pointer to a page_entry, all
459 page_entries can be found in here by increasing depth.
460 index_by_depth in the page_entry is the index into this data
461 structure where that page_entry can be found. This is used to
462 speed up finding all page_entries at a particular depth. */
463 page_entry
**by_depth
;
465 /* Each element is a pointer to the saved in_use_p bits, if any,
466 zero otherwise. We allocate them all together, to enable a
467 better runtime data access pattern. */
468 unsigned long **save_in_use
;
470 /* Finalizers for single objects. */
471 vec
<finalizer
> finalizers
;
473 /* Finalizers for vectors of objects. */
474 vec
<vec_finalizer
> vec_finalizers
;
476 #ifdef ENABLE_GC_ALWAYS_COLLECT
477 /* List of free objects to be verified as actually free on the
479 struct free_object
*free_object_list
;
484 /* Total GC-allocated memory. */
485 unsigned long long total_allocated
;
486 /* Total overhead for GC-allocated memory. */
487 unsigned long long total_overhead
;
489 /* Total allocations and overhead for sizes less than 32, 64 and 128.
490 These sizes are interesting because they are typical cache line
493 unsigned long long total_allocated_under32
;
494 unsigned long long total_overhead_under32
;
496 unsigned long long total_allocated_under64
;
497 unsigned long long total_overhead_under64
;
499 unsigned long long total_allocated_under128
;
500 unsigned long long total_overhead_under128
;
502 /* The allocations for each of the allocation orders. */
503 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
505 /* The overhead for each of the allocation orders. */
506 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
510 /* True if a gc is currently taking place. */
512 static bool in_gc
= false;
514 /* The size in bytes required to maintain a bitmap for the objects
516 #define BITMAP_SIZE(Num_objects) \
517 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
519 /* Allocate pages in chunks of this size, to throttle calls to memory
520 allocation routines. The first page is used, the rest go onto the
521 free list. This cannot be larger than HOST_BITS_PER_INT for the
522 in_use bitmask for page_group. Hosts that need a different value
523 can override this by defining GGC_QUIRE_SIZE explicitly. */
524 #ifndef GGC_QUIRE_SIZE
526 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
528 # define GGC_QUIRE_SIZE 16
532 /* Initial guess as to how many page table entries we might need. */
533 #define INITIAL_PTE_COUNT 128
535 static int ggc_allocated_p (const void *);
536 static page_entry
*lookup_page_table_entry (const void *);
537 static void set_page_table_entry (void *, page_entry
*);
539 static char *alloc_anon (char *, size_t, bool check
);
541 #ifdef USING_MALLOC_PAGE_GROUPS
542 static size_t page_group_index (char *, char *);
543 static void set_page_group_in_use (page_group
*, char *);
544 static void clear_page_group_in_use (page_group
*, char *);
546 static struct page_entry
* alloc_page (unsigned);
547 static void free_page (struct page_entry
*);
548 static void release_pages (void);
549 static void clear_marks (void);
550 static void sweep_pages (void);
551 static void ggc_recalculate_in_use_p (page_entry
*);
552 static void compute_inverse (unsigned);
553 static inline void adjust_depth (void);
554 static void move_ptes_to_front (int, int);
556 void debug_print_page_list (int);
557 static void push_depth (unsigned int);
558 static void push_by_depth (page_entry
*, unsigned long *);
560 /* Push an entry onto G.depth. */
563 push_depth (unsigned int i
)
565 if (G
.depth_in_use
>= G
.depth_max
)
568 G
.depth
= XRESIZEVEC (unsigned int, G
.depth
, G
.depth_max
);
570 G
.depth
[G
.depth_in_use
++] = i
;
573 /* Push an entry onto G.by_depth and G.save_in_use. */
576 push_by_depth (page_entry
*p
, unsigned long *s
)
578 if (G
.by_depth_in_use
>= G
.by_depth_max
)
581 G
.by_depth
= XRESIZEVEC (page_entry
*, G
.by_depth
, G
.by_depth_max
);
582 G
.save_in_use
= XRESIZEVEC (unsigned long *, G
.save_in_use
,
585 G
.by_depth
[G
.by_depth_in_use
] = p
;
586 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
589 #if (GCC_VERSION < 3001)
590 #define prefetch(X) ((void) X)
592 #define prefetch(X) __builtin_prefetch (X)
595 #define save_in_use_p_i(__i) \
597 #define save_in_use_p(__p) \
598 (save_in_use_p_i (__p->index_by_depth))
600 /* Returns nonzero if P was allocated in GC'able memory. */
603 ggc_allocated_p (const void *p
)
608 #if HOST_BITS_PER_PTR <= 32
611 page_table table
= G
.lookup
;
612 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
617 if (table
->high_bits
== high_bits
)
621 base
= &table
->table
[0];
624 /* Extract the level 1 and 2 indices. */
628 return base
[L1
] && base
[L1
][L2
];
631 /* Traverse the page table and find the entry for a page.
632 Die (probably) if the object wasn't allocated via GC. */
634 static inline page_entry
*
635 lookup_page_table_entry (const void *p
)
640 #if HOST_BITS_PER_PTR <= 32
643 page_table table
= G
.lookup
;
644 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
645 while (table
->high_bits
!= high_bits
)
647 base
= &table
->table
[0];
650 /* Extract the level 1 and 2 indices. */
657 /* Set the page table entry for a page. */
660 set_page_table_entry (void *p
, page_entry
*entry
)
665 #if HOST_BITS_PER_PTR <= 32
669 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
670 for (table
= G
.lookup
; table
; table
= table
->next
)
671 if (table
->high_bits
== high_bits
)
674 /* Not found -- allocate a new table. */
675 table
= XCNEW (struct page_table_chain
);
676 table
->next
= G
.lookup
;
677 table
->high_bits
= high_bits
;
680 base
= &table
->table
[0];
683 /* Extract the level 1 and 2 indices. */
687 if (base
[L1
] == NULL
)
688 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
690 base
[L1
][L2
] = entry
;
693 /* Prints the page-entry for object size ORDER, for debugging. */
696 debug_print_page_list (int order
)
699 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
700 (void *) G
.page_tails
[order
]);
704 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
705 p
->num_free_objects
);
713 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
714 (if non-null). The ifdef structure here is intended to cause a
715 compile error unless exactly one of the HAVE_* is defined. */
718 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, bool check
)
720 #ifdef HAVE_MMAP_ANON
721 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
722 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
724 #ifdef HAVE_MMAP_DEV_ZERO
725 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
726 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
729 if (page
== (char *) MAP_FAILED
)
733 perror ("virtual memory exhausted");
734 exit (FATAL_EXIT_CODE
);
737 /* Remember that we allocated this memory. */
738 G
.bytes_mapped
+= size
;
740 /* Pretend we don't have access to the allocated pages. We'll enable
741 access to smaller pieces of the area in ggc_internal_alloc. Discard the
742 handle to avoid handle leak. */
743 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
748 #ifdef USING_MALLOC_PAGE_GROUPS
749 /* Compute the index for this page into the page group. */
752 page_group_index (char *allocation
, char *page
)
754 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
757 /* Set and clear the in_use bit for this page in the page group. */
760 set_page_group_in_use (page_group
*group
, char *page
)
762 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
766 clear_page_group_in_use (page_group
*group
, char *page
)
768 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
772 /* Allocate a new page for allocating objects of size 2^ORDER,
773 and return an entry for it. The entry is not added to the
774 appropriate page_table list. */
776 static inline struct page_entry
*
777 alloc_page (unsigned order
)
779 struct page_entry
*entry
, *p
, **pp
;
783 size_t page_entry_size
;
785 #ifdef USING_MALLOC_PAGE_GROUPS
789 num_objects
= OBJECTS_PER_PAGE (order
);
790 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
791 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
792 entry_size
= num_objects
* OBJECT_SIZE (order
);
793 if (entry_size
< G
.pagesize
)
794 entry_size
= G
.pagesize
;
795 entry_size
= PAGE_ALIGN (entry_size
);
800 /* Check the list of free pages for one we can use. */
801 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
802 if (p
->bytes
== entry_size
)
808 G
.bytes_mapped
+= p
->bytes
;
809 p
->discarded
= false;
811 /* Recycle the allocated memory from this page ... */
815 #ifdef USING_MALLOC_PAGE_GROUPS
819 /* ... and, if possible, the page entry itself. */
820 if (p
->order
== order
)
823 memset (entry
, 0, page_entry_size
);
829 else if (entry_size
== G
.pagesize
)
831 /* We want just one page. Allocate a bunch of them and put the
832 extras on the freelist. (Can only do this optimization with
833 mmap for backing store.) */
834 struct page_entry
*e
, *f
= G
.free_pages
;
835 int i
, entries
= GGC_QUIRE_SIZE
;
837 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, false);
840 page
= alloc_anon (NULL
, G
.pagesize
, true);
844 /* This loop counts down so that the chain will be in ascending
846 for (i
= entries
- 1; i
>= 1; i
--)
848 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
850 e
->bytes
= G
.pagesize
;
851 e
->page
= page
+ (i
<< G
.lg_pagesize
);
859 page
= alloc_anon (NULL
, entry_size
, true);
861 #ifdef USING_MALLOC_PAGE_GROUPS
864 /* Allocate a large block of memory and serve out the aligned
865 pages therein. This results in much less memory wastage
866 than the traditional implementation of valloc. */
868 char *allocation
, *a
, *enda
;
869 size_t alloc_size
, head_slop
, tail_slop
;
870 int multiple_pages
= (entry_size
== G
.pagesize
);
873 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
875 alloc_size
= entry_size
+ G
.pagesize
- 1;
876 allocation
= XNEWVEC (char, alloc_size
);
878 page
= (char *) (((uintptr_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
879 head_slop
= page
- allocation
;
881 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
883 tail_slop
= alloc_size
- entry_size
- head_slop
;
884 enda
= allocation
+ alloc_size
- tail_slop
;
886 /* We allocated N pages, which are likely not aligned, leaving
887 us with N-1 usable pages. We plan to place the page_group
888 structure somewhere in the slop. */
889 if (head_slop
>= sizeof (page_group
))
890 group
= (page_group
*)page
- 1;
893 /* We magically got an aligned allocation. Too bad, we have
894 to waste a page anyway. */
898 tail_slop
+= G
.pagesize
;
900 gcc_assert (tail_slop
>= sizeof (page_group
));
901 group
= (page_group
*)enda
;
902 tail_slop
-= sizeof (page_group
);
905 /* Remember that we allocated this memory. */
906 group
->next
= G
.page_groups
;
907 group
->allocation
= allocation
;
908 group
->alloc_size
= alloc_size
;
910 G
.page_groups
= group
;
911 G
.bytes_mapped
+= alloc_size
;
913 /* If we allocated multiple pages, put the rest on the free list. */
916 struct page_entry
*e
, *f
= G
.free_pages
;
917 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
919 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
921 e
->bytes
= G
.pagesize
;
933 entry
= XCNEWVAR (struct page_entry
, page_entry_size
);
935 entry
->bytes
= entry_size
;
937 entry
->context_depth
= G
.context_depth
;
938 entry
->order
= order
;
939 entry
->num_free_objects
= num_objects
;
940 entry
->next_bit_hint
= 1;
942 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
944 #ifdef USING_MALLOC_PAGE_GROUPS
945 entry
->group
= group
;
946 set_page_group_in_use (group
, page
);
949 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
950 increment the hint. */
951 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
952 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
954 set_page_table_entry (page
, entry
);
956 if (GGC_DEBUG_LEVEL
>= 2)
957 fprintf (G
.debug_file
,
958 "Allocating page at %p, object size=%lu, data %p-%p\n",
959 (void *) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
960 page
+ entry_size
- 1);
965 /* Adjust the size of G.depth so that no index greater than the one
966 used by the top of the G.by_depth is used. */
973 if (G
.by_depth_in_use
)
975 top
= G
.by_depth
[G
.by_depth_in_use
-1];
977 /* Peel back indices in depth that index into by_depth, so that
978 as new elements are added to by_depth, we note the indices
979 of those elements, if they are for new context depths. */
980 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
985 /* For a page that is no longer needed, put it on the free page list. */
988 free_page (page_entry
*entry
)
990 if (GGC_DEBUG_LEVEL
>= 2)
991 fprintf (G
.debug_file
,
992 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
993 entry
->page
, entry
->page
+ entry
->bytes
- 1);
995 /* Mark the page as inaccessible. Discard the handle to avoid handle
997 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->page
, entry
->bytes
));
999 set_page_table_entry (entry
->page
, NULL
);
1001 #ifdef USING_MALLOC_PAGE_GROUPS
1002 clear_page_group_in_use (entry
->group
, entry
->page
);
1005 if (G
.by_depth_in_use
> 1)
1007 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
1008 int i
= entry
->index_by_depth
;
1010 /* We cannot free a page from a context deeper than the current
1012 gcc_assert (entry
->context_depth
== top
->context_depth
);
1014 /* Put top element into freed slot. */
1015 G
.by_depth
[i
] = top
;
1016 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
1017 top
->index_by_depth
= i
;
1019 --G
.by_depth_in_use
;
1023 entry
->next
= G
.free_pages
;
1024 G
.free_pages
= entry
;
1027 /* Release the free page cache to the system. */
1030 release_pages (void)
1032 #ifdef USING_MADVISE
1033 page_entry
*p
, *start_p
;
1037 page_entry
*next
, *prev
, *newprev
;
1038 size_t free_unit
= (GGC_QUIRE_SIZE
/2) * G
.pagesize
;
1040 /* First free larger continuous areas to the OS.
1041 This allows other allocators to grab these areas if needed.
1042 This is only done on larger chunks to avoid fragmentation.
1043 This does not always work because the free_pages list is only
1044 approximately sorted. */
1055 while (p
&& p
->page
== start
+ len
)
1059 mapped_len
+= p
->bytes
;
1063 if (len
>= free_unit
)
1065 while (start_p
!= p
)
1067 next
= start_p
->next
;
1071 munmap (start
, len
);
1076 G
.bytes_mapped
-= mapped_len
;
1082 /* Now give back the fragmented pages to the OS, but keep the address
1083 space to reuse it next time. */
1085 for (p
= G
.free_pages
; p
; )
1096 while (p
&& p
->page
== start
+ len
)
1101 /* Give the page back to the kernel, but don't free the mapping.
1102 This avoids fragmentation in the virtual memory map of the
1103 process. Next time we can reuse it by just touching it. */
1104 madvise (start
, len
, MADV_DONTNEED
);
1105 /* Don't count those pages as mapped to not touch the garbage collector
1107 G
.bytes_mapped
-= len
;
1108 while (start_p
!= p
)
1110 start_p
->discarded
= true;
1111 start_p
= start_p
->next
;
1115 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1116 page_entry
*p
, *next
;
1120 /* Gather up adjacent pages so they are unmapped together. */
1131 while (p
&& p
->page
== start
+ len
)
1139 munmap (start
, len
);
1140 G
.bytes_mapped
-= len
;
1143 G
.free_pages
= NULL
;
1145 #ifdef USING_MALLOC_PAGE_GROUPS
1146 page_entry
**pp
, *p
;
1147 page_group
**gp
, *g
;
1149 /* Remove all pages from free page groups from the list. */
1151 while ((p
= *pp
) != NULL
)
1152 if (p
->group
->in_use
== 0)
1160 /* Remove all free page groups, and release the storage. */
1161 gp
= &G
.page_groups
;
1162 while ((g
= *gp
) != NULL
)
1166 G
.bytes_mapped
-= g
->alloc_size
;
1167 free (g
->allocation
);
1174 /* This table provides a fast way to determine ceil(log_2(size)) for
1175 allocation requests. The minimum allocation size is eight bytes. */
1176 #define NUM_SIZE_LOOKUP 512
1177 static unsigned char size_lookup
[NUM_SIZE_LOOKUP
] =
1179 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1180 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1181 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1182 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1183 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1184 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1185 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1188 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1189 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1213 /* For a given size of memory requested for allocation, return the
1214 actual size that is going to be allocated, as well as the size
1218 ggc_round_alloc_size_1 (size_t requested_size
,
1220 size_t *alloced_size
)
1222 size_t order
, object_size
;
1224 if (requested_size
< NUM_SIZE_LOOKUP
)
1226 order
= size_lookup
[requested_size
];
1227 object_size
= OBJECT_SIZE (order
);
1232 while (requested_size
> (object_size
= OBJECT_SIZE (order
)))
1237 *size_order
= order
;
1239 *alloced_size
= object_size
;
1242 /* For a given size of memory requested for allocation, return the
1243 actual size that is going to be allocated. */
1246 ggc_round_alloc_size (size_t requested_size
)
1250 ggc_round_alloc_size_1 (requested_size
, NULL
, &size
);
1254 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1257 ggc_internal_alloc (size_t size
, void (*f
)(void *), size_t s
, size_t n
1260 size_t order
, word
, bit
, object_offset
, object_size
;
1261 struct page_entry
*entry
;
1264 ggc_round_alloc_size_1 (size
, &order
, &object_size
);
1266 /* If there are non-full pages for this size allocation, they are at
1267 the head of the list. */
1268 entry
= G
.pages
[order
];
1270 /* If there is no page for this object size, or all pages in this
1271 context are full, allocate a new page. */
1272 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1274 struct page_entry
*new_entry
;
1275 new_entry
= alloc_page (order
);
1277 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1278 push_by_depth (new_entry
, 0);
1280 /* We can skip context depths, if we do, make sure we go all the
1281 way to the new depth. */
1282 while (new_entry
->context_depth
>= G
.depth_in_use
)
1283 push_depth (G
.by_depth_in_use
-1);
1285 /* If this is the only entry, it's also the tail. If it is not
1286 the only entry, then we must update the PREV pointer of the
1287 ENTRY (G.pages[order]) to point to our new page entry. */
1289 G
.page_tails
[order
] = new_entry
;
1291 entry
->prev
= new_entry
;
1293 /* Put new pages at the head of the page list. By definition the
1294 entry at the head of the list always has a NULL pointer. */
1295 new_entry
->next
= entry
;
1296 new_entry
->prev
= NULL
;
1298 G
.pages
[order
] = new_entry
;
1300 /* For a new page, we know the word and bit positions (in the
1301 in_use bitmap) of the first available object -- they're zero. */
1302 new_entry
->next_bit_hint
= 1;
1309 /* First try to use the hint left from the previous allocation
1310 to locate a clear bit in the in-use bitmap. We've made sure
1311 that the one-past-the-end bit is always set, so if the hint
1312 has run over, this test will fail. */
1313 unsigned hint
= entry
->next_bit_hint
;
1314 word
= hint
/ HOST_BITS_PER_LONG
;
1315 bit
= hint
% HOST_BITS_PER_LONG
;
1317 /* If the hint didn't work, scan the bitmap from the beginning. */
1318 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1321 while (~entry
->in_use_p
[word
] == 0)
1324 #if GCC_VERSION >= 3004
1325 bit
= __builtin_ctzl (~entry
->in_use_p
[word
]);
1327 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1331 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1334 /* Next time, try the next bit. */
1335 entry
->next_bit_hint
= hint
+ 1;
1337 object_offset
= hint
* object_size
;
1340 /* Set the in-use bit. */
1341 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1343 /* Keep a running total of the number of free objects. If this page
1344 fills up, we may have to move it to the end of the list if the
1345 next page isn't full. If the next page is full, all subsequent
1346 pages are full, so there's no need to move it. */
1347 if (--entry
->num_free_objects
== 0
1348 && entry
->next
!= NULL
1349 && entry
->next
->num_free_objects
> 0)
1351 /* We have a new head for the list. */
1352 G
.pages
[order
] = entry
->next
;
1354 /* We are moving ENTRY to the end of the page table list.
1355 The new page at the head of the list will have NULL in
1356 its PREV field and ENTRY will have NULL in its NEXT field. */
1357 entry
->next
->prev
= NULL
;
1360 /* Append ENTRY to the tail of the list. */
1361 entry
->prev
= G
.page_tails
[order
];
1362 G
.page_tails
[order
]->next
= entry
;
1363 G
.page_tails
[order
] = entry
;
1366 /* Calculate the object's address. */
1367 result
= entry
->page
+ object_offset
;
1368 if (GATHER_STATISTICS
)
1369 ggc_record_overhead (OBJECT_SIZE (order
), OBJECT_SIZE (order
) - size
,
1370 result FINAL_PASS_MEM_STAT
);
1372 #ifdef ENABLE_GC_CHECKING
1373 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1374 exact same semantics in presence of memory bugs, regardless of
1375 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1376 handle to avoid handle leak. */
1377 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, object_size
));
1379 /* `Poison' the entire allocated object, including any padding at
1381 memset (result
, 0xaf, object_size
);
1383 /* Make the bytes after the end of the object unaccessible. Discard the
1384 handle to avoid handle leak. */
1385 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result
+ size
,
1386 object_size
- size
));
1389 /* Tell Valgrind that the memory is there, but its content isn't
1390 defined. The bytes at the end of the object are still marked
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1394 /* Keep track of how many bytes are being allocated. This
1395 information is used in deciding when to collect. */
1396 G
.allocated
+= object_size
;
1398 /* For timevar statistics. */
1399 timevar_ggc_mem_total
+= object_size
;
1402 G
.finalizers
.safe_push (finalizer (result
, f
));
1404 G
.vec_finalizers
.safe_push
1405 (vec_finalizer (reinterpret_cast<uintptr_t> (result
), f
, s
, n
));
1407 if (GATHER_STATISTICS
)
1409 size_t overhead
= object_size
- size
;
1411 G
.stats
.total_overhead
+= overhead
;
1412 G
.stats
.total_allocated
+= object_size
;
1413 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1414 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1418 G
.stats
.total_overhead_under32
+= overhead
;
1419 G
.stats
.total_allocated_under32
+= object_size
;
1423 G
.stats
.total_overhead_under64
+= overhead
;
1424 G
.stats
.total_allocated_under64
+= object_size
;
1428 G
.stats
.total_overhead_under128
+= overhead
;
1429 G
.stats
.total_allocated_under128
+= object_size
;
1433 if (GGC_DEBUG_LEVEL
>= 3)
1434 fprintf (G
.debug_file
,
1435 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1436 (unsigned long) size
, (unsigned long) object_size
, result
,
1442 /* Mark function for strings. */
1445 gt_ggc_m_S (const void *p
)
1450 unsigned long offset
;
1452 if (!p
|| !ggc_allocated_p (p
))
1455 /* Look up the page on which the object is alloced. . */
1456 entry
= lookup_page_table_entry (p
);
1459 /* Calculate the index of the object on the page; this is its bit
1460 position in the in_use_p bitmap. Note that because a char* might
1461 point to the middle of an object, we need special code here to
1462 make sure P points to the start of an object. */
1463 offset
= ((const char *) p
- entry
->page
) % object_size_table
[entry
->order
];
1466 /* Here we've seen a char* which does not point to the beginning
1467 of an allocated object. We assume it points to the middle of
1469 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1470 p
= ((const char *) p
) - offset
;
1471 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p
));
1475 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1476 word
= bit
/ HOST_BITS_PER_LONG
;
1477 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1479 /* If the bit was previously set, skip it. */
1480 if (entry
->in_use_p
[word
] & mask
)
1483 /* Otherwise set it, and decrement the free object count. */
1484 entry
->in_use_p
[word
] |= mask
;
1485 entry
->num_free_objects
-= 1;
1487 if (GGC_DEBUG_LEVEL
>= 4)
1488 fprintf (G
.debug_file
, "Marking %p\n", p
);
1494 /* User-callable entry points for marking string X. */
1497 gt_ggc_mx (const char *& x
)
1503 gt_ggc_mx (unsigned char *& x
)
1509 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED
)
1513 /* If P is not marked, marks it and return false. Otherwise return true.
1514 P must have been allocated by the GC allocator; it mustn't point to
1515 static objects, stack variables, or memory allocated with malloc. */
1518 ggc_set_mark (const void *p
)
1524 /* Look up the page on which the object is alloced. If the object
1525 wasn't allocated by the collector, we'll probably die. */
1526 entry
= lookup_page_table_entry (p
);
1529 /* Calculate the index of the object on the page; this is its bit
1530 position in the in_use_p bitmap. */
1531 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1532 word
= bit
/ HOST_BITS_PER_LONG
;
1533 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1535 /* If the bit was previously set, skip it. */
1536 if (entry
->in_use_p
[word
] & mask
)
1539 /* Otherwise set it, and decrement the free object count. */
1540 entry
->in_use_p
[word
] |= mask
;
1541 entry
->num_free_objects
-= 1;
1543 if (GGC_DEBUG_LEVEL
>= 4)
1544 fprintf (G
.debug_file
, "Marking %p\n", p
);
1549 /* Return 1 if P has been marked, zero otherwise.
1550 P must have been allocated by the GC allocator; it mustn't point to
1551 static objects, stack variables, or memory allocated with malloc. */
1554 ggc_marked_p (const void *p
)
1560 /* Look up the page on which the object is alloced. If the object
1561 wasn't allocated by the collector, we'll probably die. */
1562 entry
= lookup_page_table_entry (p
);
1565 /* Calculate the index of the object on the page; this is its bit
1566 position in the in_use_p bitmap. */
1567 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1568 word
= bit
/ HOST_BITS_PER_LONG
;
1569 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1571 return (entry
->in_use_p
[word
] & mask
) != 0;
1574 /* Return the size of the gc-able object P. */
1577 ggc_get_size (const void *p
)
1579 page_entry
*pe
= lookup_page_table_entry (p
);
1580 return OBJECT_SIZE (pe
->order
);
1583 /* Release the memory for object P. */
1591 page_entry
*pe
= lookup_page_table_entry (p
);
1592 size_t order
= pe
->order
;
1593 size_t size
= OBJECT_SIZE (order
);
1595 if (GATHER_STATISTICS
)
1596 ggc_free_overhead (p
);
1598 if (GGC_DEBUG_LEVEL
>= 3)
1599 fprintf (G
.debug_file
,
1600 "Freeing object, actual size=%lu, at %p on %p\n",
1601 (unsigned long) size
, p
, (void *) pe
);
1603 #ifdef ENABLE_GC_CHECKING
1604 /* Poison the data, to indicate the data is garbage. */
1605 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p
, size
));
1606 memset (p
, 0xa5, size
);
1608 /* Let valgrind know the object is free. */
1609 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p
, size
));
1611 #ifdef ENABLE_GC_ALWAYS_COLLECT
1612 /* In the completely-anal-checking mode, we do *not* immediately free
1613 the data, but instead verify that the data is *actually* not
1614 reachable the next time we collect. */
1616 struct free_object
*fo
= XNEW (struct free_object
);
1618 fo
->next
= G
.free_object_list
;
1619 G
.free_object_list
= fo
;
1623 unsigned int bit_offset
, word
, bit
;
1625 G
.allocated
-= size
;
1627 /* Mark the object not-in-use. */
1628 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1629 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1630 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1631 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1633 if (pe
->num_free_objects
++ == 0)
1637 /* If the page is completely full, then it's supposed to
1638 be after all pages that aren't. Since we've freed one
1639 object from a page that was full, we need to move the
1640 page to the head of the list.
1642 PE is the node we want to move. Q is the previous node
1643 and P is the next node in the list. */
1645 if (q
&& q
->num_free_objects
== 0)
1651 /* If PE was at the end of the list, then Q becomes the
1652 new end of the list. If PE was not the end of the
1653 list, then we need to update the PREV field for P. */
1655 G
.page_tails
[order
] = q
;
1659 /* Move PE to the head of the list. */
1660 pe
->next
= G
.pages
[order
];
1662 G
.pages
[order
]->prev
= pe
;
1663 G
.pages
[order
] = pe
;
1666 /* Reset the hint bit to point to the only free object. */
1667 pe
->next_bit_hint
= bit_offset
;
1673 /* Subroutine of init_ggc which computes the pair of numbers used to
1674 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1676 This algorithm is taken from Granlund and Montgomery's paper
1677 "Division by Invariant Integers using Multiplication"
1678 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1682 compute_inverse (unsigned order
)
1687 size
= OBJECT_SIZE (order
);
1689 while (size
% 2 == 0)
1696 while (inv
* size
!= 1)
1697 inv
= inv
* (2 - inv
*size
);
1699 DIV_MULT (order
) = inv
;
1700 DIV_SHIFT (order
) = e
;
1703 /* Initialize the ggc-mmap allocator. */
1707 static bool init_p
= false;
1714 G
.pagesize
= getpagesize ();
1715 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1717 #ifdef HAVE_MMAP_DEV_ZERO
1718 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1719 if (G
.dev_zero_fd
== -1)
1720 internal_error ("open /dev/zero: %m");
1724 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1726 G
.debug_file
= stdout
;
1730 /* StunOS has an amazing off-by-one error for the first mmap allocation
1731 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1732 believe, is an unaligned page allocation, which would cause us to
1733 hork badly if we tried to use it. */
1735 char *p
= alloc_anon (NULL
, G
.pagesize
, true);
1736 struct page_entry
*e
;
1737 if ((uintptr_t)p
& (G
.pagesize
- 1))
1739 /* How losing. Discard this one and try another. If we still
1740 can't get something useful, give up. */
1742 p
= alloc_anon (NULL
, G
.pagesize
, true);
1743 gcc_assert (!((uintptr_t)p
& (G
.pagesize
- 1)));
1746 /* We have a good page, might as well hold onto it... */
1747 e
= XCNEW (struct page_entry
);
1748 e
->bytes
= G
.pagesize
;
1750 e
->next
= G
.free_pages
;
1755 /* Initialize the object size table. */
1756 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1757 object_size_table
[order
] = (size_t) 1 << order
;
1758 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1760 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1762 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1763 so that we're sure of getting aligned memory. */
1764 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1765 object_size_table
[order
] = s
;
1768 /* Initialize the objects-per-page and inverse tables. */
1769 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1771 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1772 if (objects_per_page_table
[order
] == 0)
1773 objects_per_page_table
[order
] = 1;
1774 compute_inverse (order
);
1777 /* Reset the size_lookup array to put appropriately sized objects in
1778 the special orders. All objects bigger than the previous power
1779 of two, but no greater than the special size, should go in the
1781 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1786 i
= OBJECT_SIZE (order
);
1787 if (i
>= NUM_SIZE_LOOKUP
)
1790 for (o
= size_lookup
[i
]; o
== size_lookup
[i
]; --i
)
1791 size_lookup
[i
] = order
;
1796 G
.depth
= XNEWVEC (unsigned int, G
.depth_max
);
1798 G
.by_depth_in_use
= 0;
1799 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1800 G
.by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
1801 G
.save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
1804 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1805 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1808 ggc_recalculate_in_use_p (page_entry
*p
)
1813 /* Because the past-the-end bit in in_use_p is always set, we
1814 pretend there is one additional object. */
1815 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1817 /* Reset the free object count. */
1818 p
->num_free_objects
= num_objects
;
1820 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1822 i
< CEIL (BITMAP_SIZE (num_objects
),
1823 sizeof (*p
->in_use_p
));
1828 /* Something is in use if it is marked, or if it was in use in a
1829 context further down the context stack. */
1830 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1832 /* Decrement the free object count for every object allocated. */
1833 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1834 p
->num_free_objects
-= (j
& 1);
1837 gcc_assert (p
->num_free_objects
< num_objects
);
1840 /* Unmark all objects. */
1847 for (order
= 2; order
< NUM_ORDERS
; order
++)
1851 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1853 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1854 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1856 /* The data should be page-aligned. */
1857 gcc_assert (!((uintptr_t) p
->page
& (G
.pagesize
- 1)));
1859 /* Pages that aren't in the topmost context are not collected;
1860 nevertheless, we need their in-use bit vectors to store GC
1861 marks. So, back them up first. */
1862 if (p
->context_depth
< G
.context_depth
)
1864 if (! save_in_use_p (p
))
1865 save_in_use_p (p
) = XNEWVAR (unsigned long, bitmap_size
);
1866 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1869 /* Reset reset the number of free objects and clear the
1870 in-use bits. These will be adjusted by mark_obj. */
1871 p
->num_free_objects
= num_objects
;
1872 memset (p
->in_use_p
, 0, bitmap_size
);
1874 /* Make sure the one-past-the-end bit is always set. */
1875 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1876 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1881 /* Check if any blocks with a registered finalizer have become unmarked. If so
1882 run the finalizer and unregister it because the block is about to be freed.
1883 Note that no garantee is made about what order finalizers will run in so
1884 touching other objects in gc memory is extremely unwise. */
1887 ggc_handle_finalizers ()
1889 if (G
.context_depth
!= 0)
1892 unsigned length
= G
.finalizers
.length ();
1893 for (unsigned int i
= 0; i
< length
;)
1895 finalizer
&f
= G
.finalizers
[i
];
1896 if (!ggc_marked_p (f
.addr ()))
1899 G
.finalizers
.unordered_remove (i
);
1907 length
= G
.vec_finalizers
.length ();
1908 for (unsigned int i
= 0; i
< length
;)
1910 vec_finalizer
&f
= G
.vec_finalizers
[i
];
1911 if (!ggc_marked_p (f
.addr ()))
1914 G
.vec_finalizers
.unordered_remove (i
);
1922 /* Free all empty pages. Partially empty pages need no attention
1923 because the `mark' bit doubles as an `unused' bit. */
1930 for (order
= 2; order
< NUM_ORDERS
; order
++)
1932 /* The last page-entry to consider, regardless of entries
1933 placed at the end of the list. */
1934 page_entry
* const last
= G
.page_tails
[order
];
1937 size_t live_objects
;
1938 page_entry
*p
, *previous
;
1948 page_entry
*next
= p
->next
;
1950 /* Loop until all entries have been examined. */
1953 num_objects
= OBJECTS_IN_PAGE (p
);
1955 /* Add all live objects on this page to the count of
1956 allocated memory. */
1957 live_objects
= num_objects
- p
->num_free_objects
;
1959 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1961 /* Only objects on pages in the topmost context should get
1963 if (p
->context_depth
< G
.context_depth
)
1966 /* Remove the page if it's empty. */
1967 else if (live_objects
== 0)
1969 /* If P was the first page in the list, then NEXT
1970 becomes the new first page in the list, otherwise
1971 splice P out of the forward pointers. */
1973 G
.pages
[order
] = next
;
1975 previous
->next
= next
;
1977 /* Splice P out of the back pointers too. */
1979 next
->prev
= previous
;
1981 /* Are we removing the last element? */
1982 if (p
== G
.page_tails
[order
])
1983 G
.page_tails
[order
] = previous
;
1988 /* If the page is full, move it to the end. */
1989 else if (p
->num_free_objects
== 0)
1991 /* Don't move it if it's already at the end. */
1992 if (p
!= G
.page_tails
[order
])
1994 /* Move p to the end of the list. */
1996 p
->prev
= G
.page_tails
[order
];
1997 G
.page_tails
[order
]->next
= p
;
1999 /* Update the tail pointer... */
2000 G
.page_tails
[order
] = p
;
2002 /* ... and the head pointer, if necessary. */
2004 G
.pages
[order
] = next
;
2006 previous
->next
= next
;
2008 /* And update the backpointer in NEXT if necessary. */
2010 next
->prev
= previous
;
2016 /* If we've fallen through to here, it's a page in the
2017 topmost context that is neither full nor empty. Such a
2018 page must precede pages at lesser context depth in the
2019 list, so move it to the head. */
2020 else if (p
!= G
.pages
[order
])
2022 previous
->next
= p
->next
;
2024 /* Update the backchain in the next node if it exists. */
2026 p
->next
->prev
= previous
;
2028 /* Move P to the head of the list. */
2029 p
->next
= G
.pages
[order
];
2031 G
.pages
[order
]->prev
= p
;
2033 /* Update the head pointer. */
2036 /* Are we moving the last element? */
2037 if (G
.page_tails
[order
] == p
)
2038 G
.page_tails
[order
] = previous
;
2047 /* Now, restore the in_use_p vectors for any pages from contexts
2048 other than the current one. */
2049 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
2050 if (p
->context_depth
!= G
.context_depth
)
2051 ggc_recalculate_in_use_p (p
);
2055 #ifdef ENABLE_GC_CHECKING
2056 /* Clobber all free objects. */
2063 for (order
= 2; order
< NUM_ORDERS
; order
++)
2065 size_t size
= OBJECT_SIZE (order
);
2068 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
2073 if (p
->context_depth
!= G
.context_depth
)
2074 /* Since we don't do any collection for pages in pushed
2075 contexts, there's no need to do any poisoning. And
2076 besides, the IN_USE_P array isn't valid until we pop
2080 num_objects
= OBJECTS_IN_PAGE (p
);
2081 for (i
= 0; i
< num_objects
; i
++)
2084 word
= i
/ HOST_BITS_PER_LONG
;
2085 bit
= i
% HOST_BITS_PER_LONG
;
2086 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
2088 char *object
= p
->page
+ i
* size
;
2090 /* Keep poison-by-write when we expect to use Valgrind,
2091 so the exact same memory semantics is kept, in case
2092 there are memory errors. We override this request
2094 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object
,
2096 memset (object
, 0xa5, size
);
2098 /* Drop the handle to avoid handle leak. */
2099 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
2106 #define poison_pages()
2109 #ifdef ENABLE_GC_ALWAYS_COLLECT
2110 /* Validate that the reportedly free objects actually are. */
2113 validate_free_objects (void)
2115 struct free_object
*f
, *next
, *still_free
= NULL
;
2117 for (f
= G
.free_object_list
; f
; f
= next
)
2119 page_entry
*pe
= lookup_page_table_entry (f
->object
);
2122 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
2123 word
= bit
/ HOST_BITS_PER_LONG
;
2124 bit
= bit
% HOST_BITS_PER_LONG
;
2127 /* Make certain it isn't visible from any root. Notice that we
2128 do this check before sweep_pages merges save_in_use_p. */
2129 gcc_assert (!(pe
->in_use_p
[word
] & (1UL << bit
)));
2131 /* If the object comes from an outer context, then retain the
2132 free_object entry, so that we can verify that the address
2133 isn't live on the stack in some outer context. */
2134 if (pe
->context_depth
!= G
.context_depth
)
2136 f
->next
= still_free
;
2143 G
.free_object_list
= still_free
;
2146 #define validate_free_objects()
2149 /* Top level mark-and-sweep routine. */
2154 /* Avoid frequent unnecessary work by skipping collection if the
2155 total allocations haven't expanded much since the last
2157 float allocated_last_gc
=
2158 MAX (G
.allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2160 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2161 if (G
.allocated
< allocated_last_gc
+ min_expand
&& !ggc_force_collect
)
2164 timevar_push (TV_GC
);
2166 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
2167 if (GGC_DEBUG_LEVEL
>= 2)
2168 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
2170 /* Zero the total allocated bytes. This will be recalculated in the
2174 /* Release the pages we freed the last time we collected, but didn't
2175 reuse in the interim. */
2178 /* Indicate that we've seen collections at this context depth. */
2179 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
2181 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2186 ggc_handle_finalizers ();
2188 if (GATHER_STATISTICS
)
2189 ggc_prune_overhead_list ();
2192 validate_free_objects ();
2196 G
.allocated_last_gc
= G
.allocated
;
2198 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2200 timevar_pop (TV_GC
);
2203 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
2204 if (GGC_DEBUG_LEVEL
>= 2)
2205 fprintf (G
.debug_file
, "END COLLECTING\n");
2208 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2209 With checking, trigger GGC so -Q compilation outputs how much of memory really is
2215 #ifndef ENABLE_CHECKING
2216 G
.allocated_last_gc
= MAX (G
.allocated_last_gc
,
2222 fprintf (stderr
, " {GC start %luk} ", (unsigned long) G
.allocated
/ 1024);
2225 /* Print allocation statistics. */
2226 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2228 : ((x) < 1024*1024*10 \
2230 : (x) / (1024*1024))))
2231 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2234 ggc_print_statistics (void)
2236 struct ggc_statistics stats
;
2238 size_t total_overhead
= 0;
2240 /* Clear the statistics. */
2241 memset (&stats
, 0, sizeof (stats
));
2243 /* Make sure collection will really occur. */
2244 G
.allocated_last_gc
= 0;
2246 /* Collect and print the statistics common across collectors. */
2247 ggc_print_common_statistics (stderr
, &stats
);
2249 /* Release free pages so that we will not count the bytes allocated
2250 there as part of the total allocated memory. */
2253 /* Collect some information about the various sizes of
2256 "Memory still allocated at the end of the compilation process\n");
2257 fprintf (stderr
, "%-8s %10s %10s %10s\n",
2258 "Size", "Allocated", "Used", "Overhead");
2259 for (i
= 0; i
< NUM_ORDERS
; ++i
)
2266 /* Skip empty entries. */
2270 overhead
= allocated
= in_use
= 0;
2272 /* Figure out the total number of bytes allocated for objects of
2273 this size, and how many of them are actually in use. Also figure
2274 out how much memory the page table is using. */
2275 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
2277 allocated
+= p
->bytes
;
2279 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
2281 overhead
+= (sizeof (page_entry
) - sizeof (long)
2282 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
2284 fprintf (stderr
, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2285 (unsigned long) OBJECT_SIZE (i
),
2286 SCALE (allocated
), STAT_LABEL (allocated
),
2287 SCALE (in_use
), STAT_LABEL (in_use
),
2288 SCALE (overhead
), STAT_LABEL (overhead
));
2289 total_overhead
+= overhead
;
2291 fprintf (stderr
, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2292 SCALE (G
.bytes_mapped
), STAT_LABEL (G
.bytes_mapped
),
2293 SCALE (G
.allocated
), STAT_LABEL (G
.allocated
),
2294 SCALE (total_overhead
), STAT_LABEL (total_overhead
));
2296 if (GATHER_STATISTICS
)
2298 fprintf (stderr
, "\nTotal allocations and overheads during "
2299 "the compilation process\n");
2301 fprintf (stderr
, "Total Overhead: %10"
2302 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead
);
2303 fprintf (stderr
, "Total Allocated: %10"
2304 HOST_LONG_LONG_FORMAT
"d\n",
2305 G
.stats
.total_allocated
);
2307 fprintf (stderr
, "Total Overhead under 32B: %10"
2308 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under32
);
2309 fprintf (stderr
, "Total Allocated under 32B: %10"
2310 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under32
);
2311 fprintf (stderr
, "Total Overhead under 64B: %10"
2312 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under64
);
2313 fprintf (stderr
, "Total Allocated under 64B: %10"
2314 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under64
);
2315 fprintf (stderr
, "Total Overhead under 128B: %10"
2316 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_overhead_under128
);
2317 fprintf (stderr
, "Total Allocated under 128B: %10"
2318 HOST_LONG_LONG_FORMAT
"d\n", G
.stats
.total_allocated_under128
);
2320 for (i
= 0; i
< NUM_ORDERS
; i
++)
2321 if (G
.stats
.total_allocated_per_order
[i
])
2323 fprintf (stderr
, "Total Overhead page size %9lu: %10"
2324 HOST_LONG_LONG_FORMAT
"d\n",
2325 (unsigned long) OBJECT_SIZE (i
),
2326 G
.stats
.total_overhead_per_order
[i
]);
2327 fprintf (stderr
, "Total Allocated page size %9lu: %10"
2328 HOST_LONG_LONG_FORMAT
"d\n",
2329 (unsigned long) OBJECT_SIZE (i
),
2330 G
.stats
.total_allocated_per_order
[i
]);
2335 struct ggc_pch_ondisk
2337 unsigned totals
[NUM_ORDERS
];
2342 struct ggc_pch_ondisk d
;
2343 uintptr_t base
[NUM_ORDERS
];
2344 size_t written
[NUM_ORDERS
];
2347 struct ggc_pch_data
*
2350 return XCNEW (struct ggc_pch_data
);
2354 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2355 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2359 if (size
< NUM_SIZE_LOOKUP
)
2360 order
= size_lookup
[size
];
2364 while (size
> OBJECT_SIZE (order
))
2368 d
->d
.totals
[order
]++;
2372 ggc_pch_total_size (struct ggc_pch_data
*d
)
2377 for (i
= 0; i
< NUM_ORDERS
; i
++)
2378 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2383 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2385 uintptr_t a
= (uintptr_t) base
;
2388 for (i
= 0; i
< NUM_ORDERS
; i
++)
2391 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2397 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2398 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2403 if (size
< NUM_SIZE_LOOKUP
)
2404 order
= size_lookup
[size
];
2408 while (size
> OBJECT_SIZE (order
))
2412 result
= (char *) d
->base
[order
];
2413 d
->base
[order
] += OBJECT_SIZE (order
);
2418 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2419 FILE *f ATTRIBUTE_UNUSED
)
2421 /* Nothing to do. */
2425 ggc_pch_write_object (struct ggc_pch_data
*d
,
2426 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2427 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2430 static const char emptyBytes
[256] = { 0 };
2432 if (size
< NUM_SIZE_LOOKUP
)
2433 order
= size_lookup
[size
];
2437 while (size
> OBJECT_SIZE (order
))
2441 if (fwrite (x
, size
, 1, f
) != 1)
2442 fatal_error (input_location
, "can%'t write PCH file: %m");
2444 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2445 object out to OBJECT_SIZE(order). This happens for strings. */
2447 if (size
!= OBJECT_SIZE (order
))
2449 unsigned padding
= OBJECT_SIZE (order
) - size
;
2451 /* To speed small writes, we use a nulled-out array that's larger
2452 than most padding requests as the source for our null bytes. This
2453 permits us to do the padding with fwrite() rather than fseek(), and
2454 limits the chance the OS may try to flush any outstanding writes. */
2455 if (padding
<= sizeof (emptyBytes
))
2457 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2458 fatal_error (input_location
, "can%'t write PCH file");
2462 /* Larger than our buffer? Just default to fseek. */
2463 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2464 fatal_error (input_location
, "can%'t write PCH file");
2468 d
->written
[order
]++;
2469 if (d
->written
[order
] == d
->d
.totals
[order
]
2470 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2473 fatal_error (input_location
, "can%'t write PCH file: %m");
2477 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2479 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2480 fatal_error (input_location
, "can%'t write PCH file: %m");
2484 /* Move the PCH PTE entries just added to the end of by_depth, to the
2488 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2492 /* First, we swap the new entries to the front of the varrays. */
2493 page_entry
**new_by_depth
;
2494 unsigned long **new_save_in_use
;
2496 new_by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
2497 new_save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
2499 memcpy (&new_by_depth
[0],
2500 &G
.by_depth
[count_old_page_tables
],
2501 count_new_page_tables
* sizeof (void *));
2502 memcpy (&new_by_depth
[count_new_page_tables
],
2504 count_old_page_tables
* sizeof (void *));
2505 memcpy (&new_save_in_use
[0],
2506 &G
.save_in_use
[count_old_page_tables
],
2507 count_new_page_tables
* sizeof (void *));
2508 memcpy (&new_save_in_use
[count_new_page_tables
],
2510 count_old_page_tables
* sizeof (void *));
2513 free (G
.save_in_use
);
2515 G
.by_depth
= new_by_depth
;
2516 G
.save_in_use
= new_save_in_use
;
2518 /* Now update all the index_by_depth fields. */
2519 for (i
= G
.by_depth_in_use
; i
> 0; --i
)
2521 page_entry
*p
= G
.by_depth
[i
-1];
2522 p
->index_by_depth
= i
-1;
2525 /* And last, we update the depth pointers in G.depth. The first
2526 entry is already 0, and context 0 entries always start at index
2527 0, so there is nothing to update in the first slot. We need a
2528 second slot, only if we have old ptes, and if we do, they start
2529 at index count_new_page_tables. */
2530 if (count_old_page_tables
)
2531 push_depth (count_new_page_tables
);
2535 ggc_pch_read (FILE *f
, void *addr
)
2537 struct ggc_pch_ondisk d
;
2539 char *offs
= (char *) addr
;
2540 unsigned long count_old_page_tables
;
2541 unsigned long count_new_page_tables
;
2543 count_old_page_tables
= G
.by_depth_in_use
;
2545 /* We've just read in a PCH file. So, every object that used to be
2546 allocated is now free. */
2548 #ifdef ENABLE_GC_CHECKING
2551 /* Since we free all the allocated objects, the free list becomes
2552 useless. Validate it now, which will also clear it. */
2553 validate_free_objects ();
2555 /* No object read from a PCH file should ever be freed. So, set the
2556 context depth to 1, and set the depth of all the currently-allocated
2557 pages to be 1 too. PCH pages will have depth 0. */
2558 gcc_assert (!G
.context_depth
);
2559 G
.context_depth
= 1;
2560 for (i
= 0; i
< NUM_ORDERS
; i
++)
2563 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2564 p
->context_depth
= G
.context_depth
;
2567 /* Allocate the appropriate page-table entries for the pages read from
2569 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2570 fatal_error (input_location
, "can%'t read PCH file: %m");
2572 for (i
= 0; i
< NUM_ORDERS
; i
++)
2574 struct page_entry
*entry
;
2580 if (d
.totals
[i
] == 0)
2583 bytes
= PAGE_ALIGN (d
.totals
[i
] * OBJECT_SIZE (i
));
2584 num_objs
= bytes
/ OBJECT_SIZE (i
);
2585 entry
= XCNEWVAR (struct page_entry
, (sizeof (struct page_entry
)
2587 + BITMAP_SIZE (num_objs
+ 1)));
2588 entry
->bytes
= bytes
;
2590 entry
->context_depth
= 0;
2592 entry
->num_free_objects
= 0;
2596 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2597 j
+= HOST_BITS_PER_LONG
)
2598 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2599 for (; j
< num_objs
+ 1; j
++)
2600 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2601 |= 1L << (j
% HOST_BITS_PER_LONG
);
2603 for (pte
= entry
->page
;
2604 pte
< entry
->page
+ entry
->bytes
;
2606 set_page_table_entry (pte
, entry
);
2608 if (G
.page_tails
[i
] != NULL
)
2609 G
.page_tails
[i
]->next
= entry
;
2612 G
.page_tails
[i
] = entry
;
2614 /* We start off by just adding all the new information to the
2615 end of the varrays, later, we will move the new information
2616 to the front of the varrays, as the PCH page tables are at
2618 push_by_depth (entry
, 0);
2621 /* Now, we update the various data structures that speed page table
2623 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2625 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2627 /* Update the statistics. */
2628 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;