1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "diagnostic-core.h"
30 #include "ggc-internal.h"
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
40 # undef HAVE_MMAP_DEV_ZERO
44 #ifdef HAVE_MMAP_DEV_ZERO
49 #define USING_MALLOC_PAGE_GROUPS
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
54 # define USING_MADVISE
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92 #define GGC_DEBUG_LEVEL (0)
94 #ifndef HOST_BITS_PER_PTR
95 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
99 /* A two-level tree is used to look up the page-entry for a given
100 pointer. Two chunks of the pointer's bits are extracted to index
101 the first and second levels of the tree, as follows:
105 msb +----------------+----+------+------+ lsb
111 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112 pages are aligned on system page boundaries. The next most
113 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
114 index values in the lookup table, respectively.
116 For 32-bit architectures and the settings below, there are no
117 leftover bits. For architectures with wider pointers, the lookup
118 tree points to a list of pages, which must be scanned to find the
121 #define PAGE_L1_BITS (8)
122 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
123 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
124 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
126 #define LOOKUP_L1(p) \
127 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
129 #define LOOKUP_L2(p) \
130 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
132 /* The number of objects per allocation page, for objects on a page of
133 the indicated ORDER. */
134 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
136 /* The number of objects in P. */
137 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
139 /* The size of an object on a page of the indicated ORDER. */
140 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
142 /* For speed, we avoid doing a general integer divide to locate the
143 offset in the allocation bitmap, by precalculating numbers M, S
144 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145 within the page which is evenly divisible by the object size Z. */
146 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
147 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148 #define OFFSET_TO_BIT(OFFSET, ORDER) \
149 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
151 /* We use this structure to determine the alignment required for
152 allocations. For power-of-two sized allocations, that's not a
153 problem, but it does matter for odd-sized allocations.
154 We do not care about alignment for floating-point types. */
156 struct max_alignment
{
164 /* The biggest alignment required. */
166 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
169 /* The number of extra orders, not corresponding to power-of-two sized
172 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174 #define RTL_SIZE(NSLOTS) \
175 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
177 #define TREE_EXP_SIZE(OPS) \
178 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
180 /* The Ith entry is the maximum size of an object to be stored in the
181 Ith extra order. Adding a new entry to this array is the *only*
182 thing you need to do to add a new special allocation size. */
184 static const size_t extra_order_size_table
[] = {
185 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186 There are a lot of structures with these sizes and explicitly
187 listing them risks orders being dropped because they changed size. */
199 sizeof (struct tree_decl_non_common
),
200 sizeof (struct tree_field_decl
),
201 sizeof (struct tree_parm_decl
),
202 sizeof (struct tree_var_decl
),
203 sizeof (struct tree_type_non_common
),
204 sizeof (struct function
),
205 sizeof (struct basic_block_def
),
206 sizeof (struct cgraph_node
),
207 sizeof (struct loop
),
210 /* The total number of orders. */
212 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
214 /* Compute the smallest nonnegative number which when added to X gives
217 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
219 /* Compute the smallest multiple of F that is >= X. */
221 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
223 /* Round X to next multiple of the page size */
225 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
227 /* The Ith entry is the number of objects on a page or order I. */
229 static unsigned objects_per_page_table
[NUM_ORDERS
];
231 /* The Ith entry is the size of an object on a page of order I. */
233 static size_t object_size_table
[NUM_ORDERS
];
235 /* The Ith entry is a pair of numbers (mult, shift) such that
236 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
237 for all k evenly divisible by OBJECT_SIZE(I). */
244 inverse_table
[NUM_ORDERS
];
246 /* A page_entry records the status of an allocation page. This
247 structure is dynamically sized to fit the bitmap in_use_p. */
248 typedef struct page_entry
250 /* The next page-entry with objects of the same size, or NULL if
251 this is the last page-entry. */
252 struct page_entry
*next
;
254 /* The previous page-entry with objects of the same size, or NULL if
255 this is the first page-entry. The PREV pointer exists solely to
256 keep the cost of ggc_free manageable. */
257 struct page_entry
*prev
;
259 /* The number of bytes allocated. (This will always be a multiple
260 of the host system page size.) */
263 /* The address at which the memory is allocated. */
266 #ifdef USING_MALLOC_PAGE_GROUPS
267 /* Back pointer to the page group this page came from. */
268 struct page_group
*group
;
271 /* This is the index in the by_depth varray where this page table
273 unsigned long index_by_depth
;
275 /* Context depth of this page. */
276 unsigned short context_depth
;
278 /* The number of free objects remaining on this page. */
279 unsigned short num_free_objects
;
281 /* A likely candidate for the bit position of a free object for the
282 next allocation from this page. */
283 unsigned short next_bit_hint
;
285 /* The lg of size of objects allocated from this page. */
288 /* Discarded page? */
291 /* A bit vector indicating whether or not objects are in use. The
292 Nth bit is one if the Nth object on this page is allocated. This
293 array is dynamically sized. */
294 unsigned long in_use_p
[1];
297 #ifdef USING_MALLOC_PAGE_GROUPS
298 /* A page_group describes a large allocation from malloc, from which
299 we parcel out aligned pages. */
300 typedef struct page_group
302 /* A linked list of all extant page groups. */
303 struct page_group
*next
;
305 /* The address we received from malloc. */
308 /* The size of the block. */
311 /* A bitmask of pages in use. */
316 #if HOST_BITS_PER_PTR <= 32
318 /* On 32-bit hosts, we use a two level page table, as pictured above. */
319 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
323 /* On 64-bit hosts, we use the same two level page tables plus a linked
324 list that disambiguates the top 32-bits. There will almost always be
325 exactly one entry in the list. */
326 typedef struct page_table_chain
328 struct page_table_chain
*next
;
330 page_entry
**table
[PAGE_L1_SIZE
];
335 #ifdef ENABLE_GC_ALWAYS_COLLECT
336 /* List of free objects to be verified as actually free on the
341 struct free_object
*next
;
345 /* The rest of the global variables. */
346 static struct globals
348 /* The Nth element in this array is a page with objects of size 2^N.
349 If there are any pages with free objects, they will be at the
350 head of the list. NULL if there are no page-entries for this
352 page_entry
*pages
[NUM_ORDERS
];
354 /* The Nth element in this array is the last page with objects of
355 size 2^N. NULL if there are no page-entries for this object
357 page_entry
*page_tails
[NUM_ORDERS
];
359 /* Lookup table for associating allocation pages with object addresses. */
362 /* The system's page size. */
366 /* Bytes currently allocated. */
369 /* Bytes currently allocated at the end of the last collection. */
370 size_t allocated_last_gc
;
372 /* Total amount of memory mapped. */
375 /* Bit N set if any allocations have been done at context depth N. */
376 unsigned long context_depth_allocations
;
378 /* Bit N set if any collections have been done at context depth N. */
379 unsigned long context_depth_collections
;
381 /* The current depth in the context stack. */
382 unsigned short context_depth
;
384 /* A file descriptor open to /dev/zero for reading. */
385 #if defined (HAVE_MMAP_DEV_ZERO)
389 /* A cache of free system pages. */
390 page_entry
*free_pages
;
392 #ifdef USING_MALLOC_PAGE_GROUPS
393 page_group
*page_groups
;
396 /* The file descriptor for debugging output. */
399 /* Current number of elements in use in depth below. */
400 unsigned int depth_in_use
;
402 /* Maximum number of elements that can be used before resizing. */
403 unsigned int depth_max
;
405 /* Each element of this array is an index in by_depth where the given
406 depth starts. This structure is indexed by that given depth we
407 are interested in. */
410 /* Current number of elements in use in by_depth below. */
411 unsigned int by_depth_in_use
;
413 /* Maximum number of elements that can be used before resizing. */
414 unsigned int by_depth_max
;
416 /* Each element of this array is a pointer to a page_entry, all
417 page_entries can be found in here by increasing depth.
418 index_by_depth in the page_entry is the index into this data
419 structure where that page_entry can be found. This is used to
420 speed up finding all page_entries at a particular depth. */
421 page_entry
**by_depth
;
423 /* Each element is a pointer to the saved in_use_p bits, if any,
424 zero otherwise. We allocate them all together, to enable a
425 better runtime data access pattern. */
426 unsigned long **save_in_use
;
428 #ifdef ENABLE_GC_ALWAYS_COLLECT
429 /* List of free objects to be verified as actually free on the
431 struct free_object
*free_object_list
;
436 /* Total GC-allocated memory. */
437 unsigned long long total_allocated
;
438 /* Total overhead for GC-allocated memory. */
439 unsigned long long total_overhead
;
441 /* Total allocations and overhead for sizes less than 32, 64 and 128.
442 These sizes are interesting because they are typical cache line
445 unsigned long long total_allocated_under32
;
446 unsigned long long total_overhead_under32
;
448 unsigned long long total_allocated_under64
;
449 unsigned long long total_overhead_under64
;
451 unsigned long long total_allocated_under128
;
452 unsigned long long total_overhead_under128
;
454 /* The allocations for each of the allocation orders. */
455 unsigned long long total_allocated_per_order
[NUM_ORDERS
];
457 /* The overhead for each of the allocation orders. */
458 unsigned long long total_overhead_per_order
[NUM_ORDERS
];
462 /* The size in bytes required to maintain a bitmap for the objects
464 #define BITMAP_SIZE(Num_objects) \
465 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
467 /* Allocate pages in chunks of this size, to throttle calls to memory
468 allocation routines. The first page is used, the rest go onto the
469 free list. This cannot be larger than HOST_BITS_PER_INT for the
470 in_use bitmask for page_group. Hosts that need a different value
471 can override this by defining GGC_QUIRE_SIZE explicitly. */
472 #ifndef GGC_QUIRE_SIZE
474 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
476 # define GGC_QUIRE_SIZE 16
480 /* Initial guess as to how many page table entries we might need. */
481 #define INITIAL_PTE_COUNT 128
483 static int ggc_allocated_p (const void *);
484 static page_entry
*lookup_page_table_entry (const void *);
485 static void set_page_table_entry (void *, page_entry
*);
487 static char *alloc_anon (char *, size_t, bool check
);
489 #ifdef USING_MALLOC_PAGE_GROUPS
490 static size_t page_group_index (char *, char *);
491 static void set_page_group_in_use (page_group
*, char *);
492 static void clear_page_group_in_use (page_group
*, char *);
494 static struct page_entry
* alloc_page (unsigned);
495 static void free_page (struct page_entry
*);
496 static void release_pages (void);
497 static void clear_marks (void);
498 static void sweep_pages (void);
499 static void ggc_recalculate_in_use_p (page_entry
*);
500 static void compute_inverse (unsigned);
501 static inline void adjust_depth (void);
502 static void move_ptes_to_front (int, int);
504 void debug_print_page_list (int);
505 static void push_depth (unsigned int);
506 static void push_by_depth (page_entry
*, unsigned long *);
508 /* Push an entry onto G.depth. */
511 push_depth (unsigned int i
)
513 if (G
.depth_in_use
>= G
.depth_max
)
516 G
.depth
= XRESIZEVEC (unsigned int, G
.depth
, G
.depth_max
);
518 G
.depth
[G
.depth_in_use
++] = i
;
521 /* Push an entry onto G.by_depth and G.save_in_use. */
524 push_by_depth (page_entry
*p
, unsigned long *s
)
526 if (G
.by_depth_in_use
>= G
.by_depth_max
)
529 G
.by_depth
= XRESIZEVEC (page_entry
*, G
.by_depth
, G
.by_depth_max
);
530 G
.save_in_use
= XRESIZEVEC (unsigned long *, G
.save_in_use
,
533 G
.by_depth
[G
.by_depth_in_use
] = p
;
534 G
.save_in_use
[G
.by_depth_in_use
++] = s
;
537 #if (GCC_VERSION < 3001)
538 #define prefetch(X) ((void) X)
540 #define prefetch(X) __builtin_prefetch (X)
543 #define save_in_use_p_i(__i) \
545 #define save_in_use_p(__p) \
546 (save_in_use_p_i (__p->index_by_depth))
548 /* Returns nonzero if P was allocated in GC'able memory. */
551 ggc_allocated_p (const void *p
)
556 #if HOST_BITS_PER_PTR <= 32
559 page_table table
= G
.lookup
;
560 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
565 if (table
->high_bits
== high_bits
)
569 base
= &table
->table
[0];
572 /* Extract the level 1 and 2 indices. */
576 return base
[L1
] && base
[L1
][L2
];
579 /* Traverse the page table and find the entry for a page.
580 Die (probably) if the object wasn't allocated via GC. */
582 static inline page_entry
*
583 lookup_page_table_entry (const void *p
)
588 #if HOST_BITS_PER_PTR <= 32
591 page_table table
= G
.lookup
;
592 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
593 while (table
->high_bits
!= high_bits
)
595 base
= &table
->table
[0];
598 /* Extract the level 1 and 2 indices. */
605 /* Set the page table entry for a page. */
608 set_page_table_entry (void *p
, page_entry
*entry
)
613 #if HOST_BITS_PER_PTR <= 32
617 uintptr_t high_bits
= (uintptr_t) p
& ~ (uintptr_t) 0xffffffff;
618 for (table
= G
.lookup
; table
; table
= table
->next
)
619 if (table
->high_bits
== high_bits
)
622 /* Not found -- allocate a new table. */
623 table
= XCNEW (struct page_table_chain
);
624 table
->next
= G
.lookup
;
625 table
->high_bits
= high_bits
;
628 base
= &table
->table
[0];
631 /* Extract the level 1 and 2 indices. */
635 if (base
[L1
] == NULL
)
636 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
638 base
[L1
][L2
] = entry
;
641 /* Prints the page-entry for object size ORDER, for debugging. */
644 debug_print_page_list (int order
)
647 printf ("Head=%p, Tail=%p:\n", (void *) G
.pages
[order
],
648 (void *) G
.page_tails
[order
]);
652 printf ("%p(%1d|%3d) -> ", (void *) p
, p
->context_depth
,
653 p
->num_free_objects
);
661 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
662 (if non-null). The ifdef structure here is intended to cause a
663 compile error unless exactly one of the HAVE_* is defined. */
666 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, bool check
)
668 #ifdef HAVE_MMAP_ANON
669 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
670 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
672 #ifdef HAVE_MMAP_DEV_ZERO
673 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
674 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
677 if (page
== (char *) MAP_FAILED
)
681 perror ("virtual memory exhausted");
682 exit (FATAL_EXIT_CODE
);
685 /* Remember that we allocated this memory. */
686 G
.bytes_mapped
+= size
;
688 /* Pretend we don't have access to the allocated pages. We'll enable
689 access to smaller pieces of the area in ggc_internal_alloc. Discard the
690 handle to avoid handle leak. */
691 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
696 #ifdef USING_MALLOC_PAGE_GROUPS
697 /* Compute the index for this page into the page group. */
700 page_group_index (char *allocation
, char *page
)
702 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
705 /* Set and clear the in_use bit for this page in the page group. */
708 set_page_group_in_use (page_group
*group
, char *page
)
710 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
714 clear_page_group_in_use (page_group
*group
, char *page
)
716 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
720 /* Allocate a new page for allocating objects of size 2^ORDER,
721 and return an entry for it. The entry is not added to the
722 appropriate page_table list. */
724 static inline struct page_entry
*
725 alloc_page (unsigned order
)
727 struct page_entry
*entry
, *p
, **pp
;
731 size_t page_entry_size
;
733 #ifdef USING_MALLOC_PAGE_GROUPS
737 num_objects
= OBJECTS_PER_PAGE (order
);
738 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
739 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
740 entry_size
= num_objects
* OBJECT_SIZE (order
);
741 if (entry_size
< G
.pagesize
)
742 entry_size
= G
.pagesize
;
743 entry_size
= PAGE_ALIGN (entry_size
);
748 /* Check the list of free pages for one we can use. */
749 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
750 if (p
->bytes
== entry_size
)
756 G
.bytes_mapped
+= p
->bytes
;
757 p
->discarded
= false;
759 /* Recycle the allocated memory from this page ... */
763 #ifdef USING_MALLOC_PAGE_GROUPS
767 /* ... and, if possible, the page entry itself. */
768 if (p
->order
== order
)
771 memset (entry
, 0, page_entry_size
);
777 else if (entry_size
== G
.pagesize
)
779 /* We want just one page. Allocate a bunch of them and put the
780 extras on the freelist. (Can only do this optimization with
781 mmap for backing store.) */
782 struct page_entry
*e
, *f
= G
.free_pages
;
783 int i
, entries
= GGC_QUIRE_SIZE
;
785 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, false);
788 page
= alloc_anon (NULL
, G
.pagesize
, true);
792 /* This loop counts down so that the chain will be in ascending
794 for (i
= entries
- 1; i
>= 1; i
--)
796 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
798 e
->bytes
= G
.pagesize
;
799 e
->page
= page
+ (i
<< G
.lg_pagesize
);
807 page
= alloc_anon (NULL
, entry_size
, true);
809 #ifdef USING_MALLOC_PAGE_GROUPS
812 /* Allocate a large block of memory and serve out the aligned
813 pages therein. This results in much less memory wastage
814 than the traditional implementation of valloc. */
816 char *allocation
, *a
, *enda
;
817 size_t alloc_size
, head_slop
, tail_slop
;
818 int multiple_pages
= (entry_size
== G
.pagesize
);
821 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
823 alloc_size
= entry_size
+ G
.pagesize
- 1;
824 allocation
= XNEWVEC (char, alloc_size
);
826 page
= (char *) (((uintptr_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
827 head_slop
= page
- allocation
;
829 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
831 tail_slop
= alloc_size
- entry_size
- head_slop
;
832 enda
= allocation
+ alloc_size
- tail_slop
;
834 /* We allocated N pages, which are likely not aligned, leaving
835 us with N-1 usable pages. We plan to place the page_group
836 structure somewhere in the slop. */
837 if (head_slop
>= sizeof (page_group
))
838 group
= (page_group
*)page
- 1;
841 /* We magically got an aligned allocation. Too bad, we have
842 to waste a page anyway. */
846 tail_slop
+= G
.pagesize
;
848 gcc_assert (tail_slop
>= sizeof (page_group
));
849 group
= (page_group
*)enda
;
850 tail_slop
-= sizeof (page_group
);
853 /* Remember that we allocated this memory. */
854 group
->next
= G
.page_groups
;
855 group
->allocation
= allocation
;
856 group
->alloc_size
= alloc_size
;
858 G
.page_groups
= group
;
859 G
.bytes_mapped
+= alloc_size
;
861 /* If we allocated multiple pages, put the rest on the free list. */
864 struct page_entry
*e
, *f
= G
.free_pages
;
865 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
867 e
= XCNEWVAR (struct page_entry
, page_entry_size
);
869 e
->bytes
= G
.pagesize
;
881 entry
= XCNEWVAR (struct page_entry
, page_entry_size
);
883 entry
->bytes
= entry_size
;
885 entry
->context_depth
= G
.context_depth
;
886 entry
->order
= order
;
887 entry
->num_free_objects
= num_objects
;
888 entry
->next_bit_hint
= 1;
890 G
.context_depth_allocations
|= (unsigned long)1 << G
.context_depth
;
892 #ifdef USING_MALLOC_PAGE_GROUPS
893 entry
->group
= group
;
894 set_page_group_in_use (group
, page
);
897 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
898 increment the hint. */
899 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
900 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
902 set_page_table_entry (page
, entry
);
904 if (GGC_DEBUG_LEVEL
>= 2)
905 fprintf (G
.debug_file
,
906 "Allocating page at %p, object size=%lu, data %p-%p\n",
907 (void *) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
908 page
+ entry_size
- 1);
913 /* Adjust the size of G.depth so that no index greater than the one
914 used by the top of the G.by_depth is used. */
921 if (G
.by_depth_in_use
)
923 top
= G
.by_depth
[G
.by_depth_in_use
-1];
925 /* Peel back indices in depth that index into by_depth, so that
926 as new elements are added to by_depth, we note the indices
927 of those elements, if they are for new context depths. */
928 while (G
.depth_in_use
> (size_t)top
->context_depth
+1)
933 /* For a page that is no longer needed, put it on the free page list. */
936 free_page (page_entry
*entry
)
938 if (GGC_DEBUG_LEVEL
>= 2)
939 fprintf (G
.debug_file
,
940 "Deallocating page at %p, data %p-%p\n", (void *) entry
,
941 entry
->page
, entry
->page
+ entry
->bytes
- 1);
943 /* Mark the page as inaccessible. Discard the handle to avoid handle
945 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->page
, entry
->bytes
));
947 set_page_table_entry (entry
->page
, NULL
);
949 #ifdef USING_MALLOC_PAGE_GROUPS
950 clear_page_group_in_use (entry
->group
, entry
->page
);
953 if (G
.by_depth_in_use
> 1)
955 page_entry
*top
= G
.by_depth
[G
.by_depth_in_use
-1];
956 int i
= entry
->index_by_depth
;
958 /* We cannot free a page from a context deeper than the current
960 gcc_assert (entry
->context_depth
== top
->context_depth
);
962 /* Put top element into freed slot. */
964 G
.save_in_use
[i
] = G
.save_in_use
[G
.by_depth_in_use
-1];
965 top
->index_by_depth
= i
;
971 entry
->next
= G
.free_pages
;
972 G
.free_pages
= entry
;
975 /* Release the free page cache to the system. */
981 page_entry
*p
, *start_p
;
985 page_entry
*next
, *prev
, *newprev
;
986 size_t free_unit
= (GGC_QUIRE_SIZE
/2) * G
.pagesize
;
988 /* First free larger continuous areas to the OS.
989 This allows other allocators to grab these areas if needed.
990 This is only done on larger chunks to avoid fragmentation.
991 This does not always work because the free_pages list is only
992 approximately sorted. */
1003 while (p
&& p
->page
== start
+ len
)
1007 mapped_len
+= p
->bytes
;
1011 if (len
>= free_unit
)
1013 while (start_p
!= p
)
1015 next
= start_p
->next
;
1019 munmap (start
, len
);
1024 G
.bytes_mapped
-= mapped_len
;
1030 /* Now give back the fragmented pages to the OS, but keep the address
1031 space to reuse it next time. */
1033 for (p
= G
.free_pages
; p
; )
1044 while (p
&& p
->page
== start
+ len
)
1049 /* Give the page back to the kernel, but don't free the mapping.
1050 This avoids fragmentation in the virtual memory map of the
1051 process. Next time we can reuse it by just touching it. */
1052 madvise (start
, len
, MADV_DONTNEED
);
1053 /* Don't count those pages as mapped to not touch the garbage collector
1055 G
.bytes_mapped
-= len
;
1056 while (start_p
!= p
)
1058 start_p
->discarded
= true;
1059 start_p
= start_p
->next
;
1063 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1064 page_entry
*p
, *next
;
1068 /* Gather up adjacent pages so they are unmapped together. */
1079 while (p
&& p
->page
== start
+ len
)
1087 munmap (start
, len
);
1088 G
.bytes_mapped
-= len
;
1091 G
.free_pages
= NULL
;
1093 #ifdef USING_MALLOC_PAGE_GROUPS
1094 page_entry
**pp
, *p
;
1095 page_group
**gp
, *g
;
1097 /* Remove all pages from free page groups from the list. */
1099 while ((p
= *pp
) != NULL
)
1100 if (p
->group
->in_use
== 0)
1108 /* Remove all free page groups, and release the storage. */
1109 gp
= &G
.page_groups
;
1110 while ((g
= *gp
) != NULL
)
1114 G
.bytes_mapped
-= g
->alloc_size
;
1115 free (g
->allocation
);
1122 /* This table provides a fast way to determine ceil(log_2(size)) for
1123 allocation requests. The minimum allocation size is eight bytes. */
1124 #define NUM_SIZE_LOOKUP 512
1125 static unsigned char size_lookup
[NUM_SIZE_LOOKUP
] =
1127 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1128 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1129 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1130 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1131 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1132 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1133 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1134 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1135 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1136 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1137 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1138 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1143 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1144 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1145 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1146 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1158 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1161 /* For a given size of memory requested for allocation, return the
1162 actual size that is going to be allocated, as well as the size
1166 ggc_round_alloc_size_1 (size_t requested_size
,
1168 size_t *alloced_size
)
1170 size_t order
, object_size
;
1172 if (requested_size
< NUM_SIZE_LOOKUP
)
1174 order
= size_lookup
[requested_size
];
1175 object_size
= OBJECT_SIZE (order
);
1180 while (requested_size
> (object_size
= OBJECT_SIZE (order
)))
1185 *size_order
= order
;
1187 *alloced_size
= object_size
;
1190 /* For a given size of memory requested for allocation, return the
1191 actual size that is going to be allocated. */
1194 ggc_round_alloc_size (size_t requested_size
)
1198 ggc_round_alloc_size_1 (requested_size
, NULL
, &size
);
1202 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1205 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL
)
1207 size_t order
, word
, bit
, object_offset
, object_size
;
1208 struct page_entry
*entry
;
1211 ggc_round_alloc_size_1 (size
, &order
, &object_size
);
1213 /* If there are non-full pages for this size allocation, they are at
1214 the head of the list. */
1215 entry
= G
.pages
[order
];
1217 /* If there is no page for this object size, or all pages in this
1218 context are full, allocate a new page. */
1219 if (entry
== NULL
|| entry
->num_free_objects
== 0)
1221 struct page_entry
*new_entry
;
1222 new_entry
= alloc_page (order
);
1224 new_entry
->index_by_depth
= G
.by_depth_in_use
;
1225 push_by_depth (new_entry
, 0);
1227 /* We can skip context depths, if we do, make sure we go all the
1228 way to the new depth. */
1229 while (new_entry
->context_depth
>= G
.depth_in_use
)
1230 push_depth (G
.by_depth_in_use
-1);
1232 /* If this is the only entry, it's also the tail. If it is not
1233 the only entry, then we must update the PREV pointer of the
1234 ENTRY (G.pages[order]) to point to our new page entry. */
1236 G
.page_tails
[order
] = new_entry
;
1238 entry
->prev
= new_entry
;
1240 /* Put new pages at the head of the page list. By definition the
1241 entry at the head of the list always has a NULL pointer. */
1242 new_entry
->next
= entry
;
1243 new_entry
->prev
= NULL
;
1245 G
.pages
[order
] = new_entry
;
1247 /* For a new page, we know the word and bit positions (in the
1248 in_use bitmap) of the first available object -- they're zero. */
1249 new_entry
->next_bit_hint
= 1;
1256 /* First try to use the hint left from the previous allocation
1257 to locate a clear bit in the in-use bitmap. We've made sure
1258 that the one-past-the-end bit is always set, so if the hint
1259 has run over, this test will fail. */
1260 unsigned hint
= entry
->next_bit_hint
;
1261 word
= hint
/ HOST_BITS_PER_LONG
;
1262 bit
= hint
% HOST_BITS_PER_LONG
;
1264 /* If the hint didn't work, scan the bitmap from the beginning. */
1265 if ((entry
->in_use_p
[word
] >> bit
) & 1)
1268 while (~entry
->in_use_p
[word
] == 0)
1271 #if GCC_VERSION >= 3004
1272 bit
= __builtin_ctzl (~entry
->in_use_p
[word
]);
1274 while ((entry
->in_use_p
[word
] >> bit
) & 1)
1278 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
1281 /* Next time, try the next bit. */
1282 entry
->next_bit_hint
= hint
+ 1;
1284 object_offset
= hint
* object_size
;
1287 /* Set the in-use bit. */
1288 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
1290 /* Keep a running total of the number of free objects. If this page
1291 fills up, we may have to move it to the end of the list if the
1292 next page isn't full. If the next page is full, all subsequent
1293 pages are full, so there's no need to move it. */
1294 if (--entry
->num_free_objects
== 0
1295 && entry
->next
!= NULL
1296 && entry
->next
->num_free_objects
> 0)
1298 /* We have a new head for the list. */
1299 G
.pages
[order
] = entry
->next
;
1301 /* We are moving ENTRY to the end of the page table list.
1302 The new page at the head of the list will have NULL in
1303 its PREV field and ENTRY will have NULL in its NEXT field. */
1304 entry
->next
->prev
= NULL
;
1307 /* Append ENTRY to the tail of the list. */
1308 entry
->prev
= G
.page_tails
[order
];
1309 G
.page_tails
[order
]->next
= entry
;
1310 G
.page_tails
[order
] = entry
;
1313 /* Calculate the object's address. */
1314 result
= entry
->page
+ object_offset
;
1315 if (GATHER_STATISTICS
)
1316 ggc_record_overhead (OBJECT_SIZE (order
), OBJECT_SIZE (order
) - size
,
1317 result FINAL_PASS_MEM_STAT
);
1319 #ifdef ENABLE_GC_CHECKING
1320 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1321 exact same semantics in presence of memory bugs, regardless of
1322 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1323 handle to avoid handle leak. */
1324 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, object_size
));
1326 /* `Poison' the entire allocated object, including any padding at
1328 memset (result
, 0xaf, object_size
);
1330 /* Make the bytes after the end of the object unaccessible. Discard the
1331 handle to avoid handle leak. */
1332 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result
+ size
,
1333 object_size
- size
));
1336 /* Tell Valgrind that the memory is there, but its content isn't
1337 defined. The bytes at the end of the object are still marked
1339 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1341 /* Keep track of how many bytes are being allocated. This
1342 information is used in deciding when to collect. */
1343 G
.allocated
+= object_size
;
1345 /* For timevar statistics. */
1346 timevar_ggc_mem_total
+= object_size
;
1348 if (GATHER_STATISTICS
)
1350 size_t overhead
= object_size
- size
;
1352 G
.stats
.total_overhead
+= overhead
;
1353 G
.stats
.total_allocated
+= object_size
;
1354 G
.stats
.total_overhead_per_order
[order
] += overhead
;
1355 G
.stats
.total_allocated_per_order
[order
] += object_size
;
1359 G
.stats
.total_overhead_under32
+= overhead
;
1360 G
.stats
.total_allocated_under32
+= object_size
;
1364 G
.stats
.total_overhead_under64
+= overhead
;
1365 G
.stats
.total_allocated_under64
+= object_size
;
1369 G
.stats
.total_overhead_under128
+= overhead
;
1370 G
.stats
.total_allocated_under128
+= object_size
;
1374 if (GGC_DEBUG_LEVEL
>= 3)
1375 fprintf (G
.debug_file
,
1376 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1377 (unsigned long) size
, (unsigned long) object_size
, result
,
1383 /* Mark function for strings. */
1386 gt_ggc_m_S (const void *p
)
1391 unsigned long offset
;
1393 if (!p
|| !ggc_allocated_p (p
))
1396 /* Look up the page on which the object is alloced. . */
1397 entry
= lookup_page_table_entry (p
);
1400 /* Calculate the index of the object on the page; this is its bit
1401 position in the in_use_p bitmap. Note that because a char* might
1402 point to the middle of an object, we need special code here to
1403 make sure P points to the start of an object. */
1404 offset
= ((const char *) p
- entry
->page
) % object_size_table
[entry
->order
];
1407 /* Here we've seen a char* which does not point to the beginning
1408 of an allocated object. We assume it points to the middle of
1410 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1411 p
= ((const char *) p
) - offset
;
1412 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p
));
1416 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1417 word
= bit
/ HOST_BITS_PER_LONG
;
1418 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1420 /* If the bit was previously set, skip it. */
1421 if (entry
->in_use_p
[word
] & mask
)
1424 /* Otherwise set it, and decrement the free object count. */
1425 entry
->in_use_p
[word
] |= mask
;
1426 entry
->num_free_objects
-= 1;
1428 if (GGC_DEBUG_LEVEL
>= 4)
1429 fprintf (G
.debug_file
, "Marking %p\n", p
);
1435 /* User-callable entry points for marking string X. */
1438 gt_ggc_mx (const char *& x
)
1444 gt_ggc_mx (unsigned char *& x
)
1450 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED
)
1454 /* If P is not marked, marks it and return false. Otherwise return true.
1455 P must have been allocated by the GC allocator; it mustn't point to
1456 static objects, stack variables, or memory allocated with malloc. */
1459 ggc_set_mark (const void *p
)
1465 /* Look up the page on which the object is alloced. If the object
1466 wasn't allocated by the collector, we'll probably die. */
1467 entry
= lookup_page_table_entry (p
);
1470 /* Calculate the index of the object on the page; this is its bit
1471 position in the in_use_p bitmap. */
1472 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1473 word
= bit
/ HOST_BITS_PER_LONG
;
1474 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1476 /* If the bit was previously set, skip it. */
1477 if (entry
->in_use_p
[word
] & mask
)
1480 /* Otherwise set it, and decrement the free object count. */
1481 entry
->in_use_p
[word
] |= mask
;
1482 entry
->num_free_objects
-= 1;
1484 if (GGC_DEBUG_LEVEL
>= 4)
1485 fprintf (G
.debug_file
, "Marking %p\n", p
);
1490 /* Return 1 if P has been marked, zero otherwise.
1491 P must have been allocated by the GC allocator; it mustn't point to
1492 static objects, stack variables, or memory allocated with malloc. */
1495 ggc_marked_p (const void *p
)
1501 /* Look up the page on which the object is alloced. If the object
1502 wasn't allocated by the collector, we'll probably die. */
1503 entry
= lookup_page_table_entry (p
);
1506 /* Calculate the index of the object on the page; this is its bit
1507 position in the in_use_p bitmap. */
1508 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1509 word
= bit
/ HOST_BITS_PER_LONG
;
1510 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1512 return (entry
->in_use_p
[word
] & mask
) != 0;
1515 /* Return the size of the gc-able object P. */
1518 ggc_get_size (const void *p
)
1520 page_entry
*pe
= lookup_page_table_entry (p
);
1521 return OBJECT_SIZE (pe
->order
);
1524 /* Release the memory for object P. */
1529 page_entry
*pe
= lookup_page_table_entry (p
);
1530 size_t order
= pe
->order
;
1531 size_t size
= OBJECT_SIZE (order
);
1533 if (GATHER_STATISTICS
)
1534 ggc_free_overhead (p
);
1536 if (GGC_DEBUG_LEVEL
>= 3)
1537 fprintf (G
.debug_file
,
1538 "Freeing object, actual size=%lu, at %p on %p\n",
1539 (unsigned long) size
, p
, (void *) pe
);
1541 #ifdef ENABLE_GC_CHECKING
1542 /* Poison the data, to indicate the data is garbage. */
1543 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p
, size
));
1544 memset (p
, 0xa5, size
);
1546 /* Let valgrind know the object is free. */
1547 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p
, size
));
1549 #ifdef ENABLE_GC_ALWAYS_COLLECT
1550 /* In the completely-anal-checking mode, we do *not* immediately free
1551 the data, but instead verify that the data is *actually* not
1552 reachable the next time we collect. */
1554 struct free_object
*fo
= XNEW (struct free_object
);
1556 fo
->next
= G
.free_object_list
;
1557 G
.free_object_list
= fo
;
1561 unsigned int bit_offset
, word
, bit
;
1563 G
.allocated
-= size
;
1565 /* Mark the object not-in-use. */
1566 bit_offset
= OFFSET_TO_BIT (((const char *) p
) - pe
->page
, order
);
1567 word
= bit_offset
/ HOST_BITS_PER_LONG
;
1568 bit
= bit_offset
% HOST_BITS_PER_LONG
;
1569 pe
->in_use_p
[word
] &= ~(1UL << bit
);
1571 if (pe
->num_free_objects
++ == 0)
1575 /* If the page is completely full, then it's supposed to
1576 be after all pages that aren't. Since we've freed one
1577 object from a page that was full, we need to move the
1578 page to the head of the list.
1580 PE is the node we want to move. Q is the previous node
1581 and P is the next node in the list. */
1583 if (q
&& q
->num_free_objects
== 0)
1589 /* If PE was at the end of the list, then Q becomes the
1590 new end of the list. If PE was not the end of the
1591 list, then we need to update the PREV field for P. */
1593 G
.page_tails
[order
] = q
;
1597 /* Move PE to the head of the list. */
1598 pe
->next
= G
.pages
[order
];
1600 G
.pages
[order
]->prev
= pe
;
1601 G
.pages
[order
] = pe
;
1604 /* Reset the hint bit to point to the only free object. */
1605 pe
->next_bit_hint
= bit_offset
;
1611 /* Subroutine of init_ggc which computes the pair of numbers used to
1612 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1614 This algorithm is taken from Granlund and Montgomery's paper
1615 "Division by Invariant Integers using Multiplication"
1616 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1620 compute_inverse (unsigned order
)
1625 size
= OBJECT_SIZE (order
);
1627 while (size
% 2 == 0)
1634 while (inv
* size
!= 1)
1635 inv
= inv
* (2 - inv
*size
);
1637 DIV_MULT (order
) = inv
;
1638 DIV_SHIFT (order
) = e
;
1641 /* Initialize the ggc-mmap allocator. */
1647 G
.pagesize
= getpagesize ();
1648 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1650 #ifdef HAVE_MMAP_DEV_ZERO
1651 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1652 if (G
.dev_zero_fd
== -1)
1653 internal_error ("open /dev/zero: %m");
1657 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1659 G
.debug_file
= stdout
;
1663 /* StunOS has an amazing off-by-one error for the first mmap allocation
1664 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1665 believe, is an unaligned page allocation, which would cause us to
1666 hork badly if we tried to use it. */
1668 char *p
= alloc_anon (NULL
, G
.pagesize
, true);
1669 struct page_entry
*e
;
1670 if ((uintptr_t)p
& (G
.pagesize
- 1))
1672 /* How losing. Discard this one and try another. If we still
1673 can't get something useful, give up. */
1675 p
= alloc_anon (NULL
, G
.pagesize
, true);
1676 gcc_assert (!((uintptr_t)p
& (G
.pagesize
- 1)));
1679 /* We have a good page, might as well hold onto it... */
1680 e
= XCNEW (struct page_entry
);
1681 e
->bytes
= G
.pagesize
;
1683 e
->next
= G
.free_pages
;
1688 /* Initialize the object size table. */
1689 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1690 object_size_table
[order
] = (size_t) 1 << order
;
1691 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1693 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1695 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1696 so that we're sure of getting aligned memory. */
1697 s
= ROUND_UP (s
, MAX_ALIGNMENT
);
1698 object_size_table
[order
] = s
;
1701 /* Initialize the objects-per-page and inverse tables. */
1702 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1704 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1705 if (objects_per_page_table
[order
] == 0)
1706 objects_per_page_table
[order
] = 1;
1707 compute_inverse (order
);
1710 /* Reset the size_lookup array to put appropriately sized objects in
1711 the special orders. All objects bigger than the previous power
1712 of two, but no greater than the special size, should go in the
1714 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1719 i
= OBJECT_SIZE (order
);
1720 if (i
>= NUM_SIZE_LOOKUP
)
1723 for (o
= size_lookup
[i
]; o
== size_lookup
[i
]; --i
)
1724 size_lookup
[i
] = order
;
1729 G
.depth
= XNEWVEC (unsigned int, G
.depth_max
);
1731 G
.by_depth_in_use
= 0;
1732 G
.by_depth_max
= INITIAL_PTE_COUNT
;
1733 G
.by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
1734 G
.save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
1737 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1738 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1741 ggc_recalculate_in_use_p (page_entry
*p
)
1746 /* Because the past-the-end bit in in_use_p is always set, we
1747 pretend there is one additional object. */
1748 num_objects
= OBJECTS_IN_PAGE (p
) + 1;
1750 /* Reset the free object count. */
1751 p
->num_free_objects
= num_objects
;
1753 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1755 i
< CEIL (BITMAP_SIZE (num_objects
),
1756 sizeof (*p
->in_use_p
));
1761 /* Something is in use if it is marked, or if it was in use in a
1762 context further down the context stack. */
1763 p
->in_use_p
[i
] |= save_in_use_p (p
)[i
];
1765 /* Decrement the free object count for every object allocated. */
1766 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1767 p
->num_free_objects
-= (j
& 1);
1770 gcc_assert (p
->num_free_objects
< num_objects
);
1773 /* Unmark all objects. */
1780 for (order
= 2; order
< NUM_ORDERS
; order
++)
1784 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1786 size_t num_objects
= OBJECTS_IN_PAGE (p
);
1787 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1789 /* The data should be page-aligned. */
1790 gcc_assert (!((uintptr_t) p
->page
& (G
.pagesize
- 1)));
1792 /* Pages that aren't in the topmost context are not collected;
1793 nevertheless, we need their in-use bit vectors to store GC
1794 marks. So, back them up first. */
1795 if (p
->context_depth
< G
.context_depth
)
1797 if (! save_in_use_p (p
))
1798 save_in_use_p (p
) = XNEWVAR (unsigned long, bitmap_size
);
1799 memcpy (save_in_use_p (p
), p
->in_use_p
, bitmap_size
);
1802 /* Reset reset the number of free objects and clear the
1803 in-use bits. These will be adjusted by mark_obj. */
1804 p
->num_free_objects
= num_objects
;
1805 memset (p
->in_use_p
, 0, bitmap_size
);
1807 /* Make sure the one-past-the-end bit is always set. */
1808 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1809 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1814 /* Free all empty pages. Partially empty pages need no attention
1815 because the `mark' bit doubles as an `unused' bit. */
1822 for (order
= 2; order
< NUM_ORDERS
; order
++)
1824 /* The last page-entry to consider, regardless of entries
1825 placed at the end of the list. */
1826 page_entry
* const last
= G
.page_tails
[order
];
1829 size_t live_objects
;
1830 page_entry
*p
, *previous
;
1840 page_entry
*next
= p
->next
;
1842 /* Loop until all entries have been examined. */
1845 num_objects
= OBJECTS_IN_PAGE (p
);
1847 /* Add all live objects on this page to the count of
1848 allocated memory. */
1849 live_objects
= num_objects
- p
->num_free_objects
;
1851 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1853 /* Only objects on pages in the topmost context should get
1855 if (p
->context_depth
< G
.context_depth
)
1858 /* Remove the page if it's empty. */
1859 else if (live_objects
== 0)
1861 /* If P was the first page in the list, then NEXT
1862 becomes the new first page in the list, otherwise
1863 splice P out of the forward pointers. */
1865 G
.pages
[order
] = next
;
1867 previous
->next
= next
;
1869 /* Splice P out of the back pointers too. */
1871 next
->prev
= previous
;
1873 /* Are we removing the last element? */
1874 if (p
== G
.page_tails
[order
])
1875 G
.page_tails
[order
] = previous
;
1880 /* If the page is full, move it to the end. */
1881 else if (p
->num_free_objects
== 0)
1883 /* Don't move it if it's already at the end. */
1884 if (p
!= G
.page_tails
[order
])
1886 /* Move p to the end of the list. */
1888 p
->prev
= G
.page_tails
[order
];
1889 G
.page_tails
[order
]->next
= p
;
1891 /* Update the tail pointer... */
1892 G
.page_tails
[order
] = p
;
1894 /* ... and the head pointer, if necessary. */
1896 G
.pages
[order
] = next
;
1898 previous
->next
= next
;
1900 /* And update the backpointer in NEXT if necessary. */
1902 next
->prev
= previous
;
1908 /* If we've fallen through to here, it's a page in the
1909 topmost context that is neither full nor empty. Such a
1910 page must precede pages at lesser context depth in the
1911 list, so move it to the head. */
1912 else if (p
!= G
.pages
[order
])
1914 previous
->next
= p
->next
;
1916 /* Update the backchain in the next node if it exists. */
1918 p
->next
->prev
= previous
;
1920 /* Move P to the head of the list. */
1921 p
->next
= G
.pages
[order
];
1923 G
.pages
[order
]->prev
= p
;
1925 /* Update the head pointer. */
1928 /* Are we moving the last element? */
1929 if (G
.page_tails
[order
] == p
)
1930 G
.page_tails
[order
] = previous
;
1939 /* Now, restore the in_use_p vectors for any pages from contexts
1940 other than the current one. */
1941 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
1942 if (p
->context_depth
!= G
.context_depth
)
1943 ggc_recalculate_in_use_p (p
);
1947 #ifdef ENABLE_GC_CHECKING
1948 /* Clobber all free objects. */
1955 for (order
= 2; order
< NUM_ORDERS
; order
++)
1957 size_t size
= OBJECT_SIZE (order
);
1960 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1965 if (p
->context_depth
!= G
.context_depth
)
1966 /* Since we don't do any collection for pages in pushed
1967 contexts, there's no need to do any poisoning. And
1968 besides, the IN_USE_P array isn't valid until we pop
1972 num_objects
= OBJECTS_IN_PAGE (p
);
1973 for (i
= 0; i
< num_objects
; i
++)
1976 word
= i
/ HOST_BITS_PER_LONG
;
1977 bit
= i
% HOST_BITS_PER_LONG
;
1978 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
1980 char *object
= p
->page
+ i
* size
;
1982 /* Keep poison-by-write when we expect to use Valgrind,
1983 so the exact same memory semantics is kept, in case
1984 there are memory errors. We override this request
1986 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object
,
1988 memset (object
, 0xa5, size
);
1990 /* Drop the handle to avoid handle leak. */
1991 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
1998 #define poison_pages()
2001 #ifdef ENABLE_GC_ALWAYS_COLLECT
2002 /* Validate that the reportedly free objects actually are. */
2005 validate_free_objects (void)
2007 struct free_object
*f
, *next
, *still_free
= NULL
;
2009 for (f
= G
.free_object_list
; f
; f
= next
)
2011 page_entry
*pe
= lookup_page_table_entry (f
->object
);
2014 bit
= OFFSET_TO_BIT ((char *)f
->object
- pe
->page
, pe
->order
);
2015 word
= bit
/ HOST_BITS_PER_LONG
;
2016 bit
= bit
% HOST_BITS_PER_LONG
;
2019 /* Make certain it isn't visible from any root. Notice that we
2020 do this check before sweep_pages merges save_in_use_p. */
2021 gcc_assert (!(pe
->in_use_p
[word
] & (1UL << bit
)));
2023 /* If the object comes from an outer context, then retain the
2024 free_object entry, so that we can verify that the address
2025 isn't live on the stack in some outer context. */
2026 if (pe
->context_depth
!= G
.context_depth
)
2028 f
->next
= still_free
;
2035 G
.free_object_list
= still_free
;
2038 #define validate_free_objects()
2041 /* Top level mark-and-sweep routine. */
2046 /* Avoid frequent unnecessary work by skipping collection if the
2047 total allocations haven't expanded much since the last
2049 float allocated_last_gc
=
2050 MAX (G
.allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2052 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2054 if (G
.allocated
< allocated_last_gc
+ min_expand
&& !ggc_force_collect
)
2057 timevar_push (TV_GC
);
2059 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
2060 if (GGC_DEBUG_LEVEL
>= 2)
2061 fprintf (G
.debug_file
, "BEGIN COLLECTING\n");
2063 /* Zero the total allocated bytes. This will be recalculated in the
2067 /* Release the pages we freed the last time we collected, but didn't
2068 reuse in the interim. */
2071 /* Indicate that we've seen collections at this context depth. */
2072 G
.context_depth_collections
= ((unsigned long)1 << (G
.context_depth
+ 1)) - 1;
2074 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2079 if (GATHER_STATISTICS
)
2080 ggc_prune_overhead_list ();
2083 validate_free_objects ();
2086 G
.allocated_last_gc
= G
.allocated
;
2088 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2090 timevar_pop (TV_GC
);
2093 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
2094 if (GGC_DEBUG_LEVEL
>= 2)
2095 fprintf (G
.debug_file
, "END COLLECTING\n");
2098 /* Print allocation statistics. */
2099 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2101 : ((x) < 1024*1024*10 \
2103 : (x) / (1024*1024))))
2104 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2107 ggc_print_statistics (void)
2109 struct ggc_statistics stats
;
2111 size_t total_overhead
= 0;
2113 /* Clear the statistics. */
2114 memset (&stats
, 0, sizeof (stats
));
2116 /* Make sure collection will really occur. */
2117 G
.allocated_last_gc
= 0;
2119 /* Collect and print the statistics common across collectors. */
2120 ggc_print_common_statistics (stderr
, &stats
);
2122 /* Release free pages so that we will not count the bytes allocated
2123 there as part of the total allocated memory. */
2126 /* Collect some information about the various sizes of
2129 "Memory still allocated at the end of the compilation process\n");
2130 fprintf (stderr
, "%-5s %10s %10s %10s\n",
2131 "Size", "Allocated", "Used", "Overhead");
2132 for (i
= 0; i
< NUM_ORDERS
; ++i
)
2139 /* Skip empty entries. */
2143 overhead
= allocated
= in_use
= 0;
2145 /* Figure out the total number of bytes allocated for objects of
2146 this size, and how many of them are actually in use. Also figure
2147 out how much memory the page table is using. */
2148 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
2150 allocated
+= p
->bytes
;
2152 (OBJECTS_IN_PAGE (p
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
2154 overhead
+= (sizeof (page_entry
) - sizeof (long)
2155 + BITMAP_SIZE (OBJECTS_IN_PAGE (p
) + 1));
2157 fprintf (stderr
, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2158 (unsigned long) OBJECT_SIZE (i
),
2159 SCALE (allocated
), STAT_LABEL (allocated
),
2160 SCALE (in_use
), STAT_LABEL (in_use
),
2161 SCALE (overhead
), STAT_LABEL (overhead
));
2162 total_overhead
+= overhead
;
2164 fprintf (stderr
, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2165 SCALE (G
.bytes_mapped
), STAT_LABEL (G
.bytes_mapped
),
2166 SCALE (G
.allocated
), STAT_LABEL (G
.allocated
),
2167 SCALE (total_overhead
), STAT_LABEL (total_overhead
));
2169 if (GATHER_STATISTICS
)
2171 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
2173 fprintf (stderr
, "Total Overhead: %10" HOST_LONG_LONG_FORMAT
"d\n",
2174 G
.stats
.total_overhead
);
2175 fprintf (stderr
, "Total Allocated: %10" HOST_LONG_LONG_FORMAT
"d\n",
2176 G
.stats
.total_allocated
);
2178 fprintf (stderr
, "Total Overhead under 32B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2179 G
.stats
.total_overhead_under32
);
2180 fprintf (stderr
, "Total Allocated under 32B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2181 G
.stats
.total_allocated_under32
);
2182 fprintf (stderr
, "Total Overhead under 64B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2183 G
.stats
.total_overhead_under64
);
2184 fprintf (stderr
, "Total Allocated under 64B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2185 G
.stats
.total_allocated_under64
);
2186 fprintf (stderr
, "Total Overhead under 128B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2187 G
.stats
.total_overhead_under128
);
2188 fprintf (stderr
, "Total Allocated under 128B: %10" HOST_LONG_LONG_FORMAT
"d\n",
2189 G
.stats
.total_allocated_under128
);
2191 for (i
= 0; i
< NUM_ORDERS
; i
++)
2192 if (G
.stats
.total_allocated_per_order
[i
])
2194 fprintf (stderr
, "Total Overhead page size %7lu: %10" HOST_LONG_LONG_FORMAT
"d\n",
2195 (unsigned long) OBJECT_SIZE (i
),
2196 G
.stats
.total_overhead_per_order
[i
]);
2197 fprintf (stderr
, "Total Allocated page size %7lu: %10" HOST_LONG_LONG_FORMAT
"d\n",
2198 (unsigned long) OBJECT_SIZE (i
),
2199 G
.stats
.total_allocated_per_order
[i
]);
2204 struct ggc_pch_ondisk
2206 unsigned totals
[NUM_ORDERS
];
2211 struct ggc_pch_ondisk d
;
2212 uintptr_t base
[NUM_ORDERS
];
2213 size_t written
[NUM_ORDERS
];
2216 struct ggc_pch_data
*
2219 return XCNEW (struct ggc_pch_data
);
2223 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2224 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2228 if (size
< NUM_SIZE_LOOKUP
)
2229 order
= size_lookup
[size
];
2233 while (size
> OBJECT_SIZE (order
))
2237 d
->d
.totals
[order
]++;
2241 ggc_pch_total_size (struct ggc_pch_data
*d
)
2246 for (i
= 0; i
< NUM_ORDERS
; i
++)
2247 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2252 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
2254 uintptr_t a
= (uintptr_t) base
;
2257 for (i
= 0; i
< NUM_ORDERS
; i
++)
2260 a
+= PAGE_ALIGN (d
->d
.totals
[i
] * OBJECT_SIZE (i
));
2266 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2267 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2272 if (size
< NUM_SIZE_LOOKUP
)
2273 order
= size_lookup
[size
];
2277 while (size
> OBJECT_SIZE (order
))
2281 result
= (char *) d
->base
[order
];
2282 d
->base
[order
] += OBJECT_SIZE (order
);
2287 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
2288 FILE *f ATTRIBUTE_UNUSED
)
2290 /* Nothing to do. */
2294 ggc_pch_write_object (struct ggc_pch_data
*d
,
2295 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
2296 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2299 static const char emptyBytes
[256] = { 0 };
2301 if (size
< NUM_SIZE_LOOKUP
)
2302 order
= size_lookup
[size
];
2306 while (size
> OBJECT_SIZE (order
))
2310 if (fwrite (x
, size
, 1, f
) != 1)
2311 fatal_error ("can%'t write PCH file: %m");
2313 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2314 object out to OBJECT_SIZE(order). This happens for strings. */
2316 if (size
!= OBJECT_SIZE (order
))
2318 unsigned padding
= OBJECT_SIZE (order
) - size
;
2320 /* To speed small writes, we use a nulled-out array that's larger
2321 than most padding requests as the source for our null bytes. This
2322 permits us to do the padding with fwrite() rather than fseek(), and
2323 limits the chance the OS may try to flush any outstanding writes. */
2324 if (padding
<= sizeof (emptyBytes
))
2326 if (fwrite (emptyBytes
, 1, padding
, f
) != padding
)
2327 fatal_error ("can%'t write PCH file");
2331 /* Larger than our buffer? Just default to fseek. */
2332 if (fseek (f
, padding
, SEEK_CUR
) != 0)
2333 fatal_error ("can%'t write PCH file");
2337 d
->written
[order
]++;
2338 if (d
->written
[order
] == d
->d
.totals
[order
]
2339 && fseek (f
, ROUND_UP_VALUE (d
->d
.totals
[order
] * OBJECT_SIZE (order
),
2342 fatal_error ("can%'t write PCH file: %m");
2346 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2348 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2349 fatal_error ("can%'t write PCH file: %m");
2353 /* Move the PCH PTE entries just added to the end of by_depth, to the
2357 move_ptes_to_front (int count_old_page_tables
, int count_new_page_tables
)
2361 /* First, we swap the new entries to the front of the varrays. */
2362 page_entry
**new_by_depth
;
2363 unsigned long **new_save_in_use
;
2365 new_by_depth
= XNEWVEC (page_entry
*, G
.by_depth_max
);
2366 new_save_in_use
= XNEWVEC (unsigned long *, G
.by_depth_max
);
2368 memcpy (&new_by_depth
[0],
2369 &G
.by_depth
[count_old_page_tables
],
2370 count_new_page_tables
* sizeof (void *));
2371 memcpy (&new_by_depth
[count_new_page_tables
],
2373 count_old_page_tables
* sizeof (void *));
2374 memcpy (&new_save_in_use
[0],
2375 &G
.save_in_use
[count_old_page_tables
],
2376 count_new_page_tables
* sizeof (void *));
2377 memcpy (&new_save_in_use
[count_new_page_tables
],
2379 count_old_page_tables
* sizeof (void *));
2382 free (G
.save_in_use
);
2384 G
.by_depth
= new_by_depth
;
2385 G
.save_in_use
= new_save_in_use
;
2387 /* Now update all the index_by_depth fields. */
2388 for (i
= G
.by_depth_in_use
; i
> 0; --i
)
2390 page_entry
*p
= G
.by_depth
[i
-1];
2391 p
->index_by_depth
= i
-1;
2394 /* And last, we update the depth pointers in G.depth. The first
2395 entry is already 0, and context 0 entries always start at index
2396 0, so there is nothing to update in the first slot. We need a
2397 second slot, only if we have old ptes, and if we do, they start
2398 at index count_new_page_tables. */
2399 if (count_old_page_tables
)
2400 push_depth (count_new_page_tables
);
2404 ggc_pch_read (FILE *f
, void *addr
)
2406 struct ggc_pch_ondisk d
;
2408 char *offs
= (char *) addr
;
2409 unsigned long count_old_page_tables
;
2410 unsigned long count_new_page_tables
;
2412 count_old_page_tables
= G
.by_depth_in_use
;
2414 /* We've just read in a PCH file. So, every object that used to be
2415 allocated is now free. */
2417 #ifdef ENABLE_GC_CHECKING
2420 /* Since we free all the allocated objects, the free list becomes
2421 useless. Validate it now, which will also clear it. */
2422 validate_free_objects ();
2424 /* No object read from a PCH file should ever be freed. So, set the
2425 context depth to 1, and set the depth of all the currently-allocated
2426 pages to be 1 too. PCH pages will have depth 0. */
2427 gcc_assert (!G
.context_depth
);
2428 G
.context_depth
= 1;
2429 for (i
= 0; i
< NUM_ORDERS
; i
++)
2432 for (p
= G
.pages
[i
]; p
!= NULL
; p
= p
->next
)
2433 p
->context_depth
= G
.context_depth
;
2436 /* Allocate the appropriate page-table entries for the pages read from
2438 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2439 fatal_error ("can%'t read PCH file: %m");
2441 for (i
= 0; i
< NUM_ORDERS
; i
++)
2443 struct page_entry
*entry
;
2449 if (d
.totals
[i
] == 0)
2452 bytes
= PAGE_ALIGN (d
.totals
[i
] * OBJECT_SIZE (i
));
2453 num_objs
= bytes
/ OBJECT_SIZE (i
);
2454 entry
= XCNEWVAR (struct page_entry
, (sizeof (struct page_entry
)
2456 + BITMAP_SIZE (num_objs
+ 1)));
2457 entry
->bytes
= bytes
;
2459 entry
->context_depth
= 0;
2461 entry
->num_free_objects
= 0;
2465 j
+ HOST_BITS_PER_LONG
<= num_objs
+ 1;
2466 j
+= HOST_BITS_PER_LONG
)
2467 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
] = -1;
2468 for (; j
< num_objs
+ 1; j
++)
2469 entry
->in_use_p
[j
/ HOST_BITS_PER_LONG
]
2470 |= 1L << (j
% HOST_BITS_PER_LONG
);
2472 for (pte
= entry
->page
;
2473 pte
< entry
->page
+ entry
->bytes
;
2475 set_page_table_entry (pte
, entry
);
2477 if (G
.page_tails
[i
] != NULL
)
2478 G
.page_tails
[i
]->next
= entry
;
2481 G
.page_tails
[i
] = entry
;
2483 /* We start off by just adding all the new information to the
2484 end of the varrays, later, we will move the new information
2485 to the front of the varrays, as the PCH page tables are at
2487 push_by_depth (entry
, 0);
2490 /* Now, we update the various data structures that speed page table
2492 count_new_page_tables
= G
.by_depth_in_use
- count_old_page_tables
;
2494 move_ptes_to_front (count_old_page_tables
, count_new_page_tables
);
2496 /* Update the statistics. */
2497 G
.allocated
= G
.allocated_last_gc
= offs
- (char *)addr
;