1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "diagnostic-core.h"
35 #include "ggc-internal.h"
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
44 # undef HAVE_MMAP_DEV_ZERO
48 #ifdef HAVE_MMAP_DEV_ZERO
53 #error Zone collector requires mmap
56 #if (GCC_VERSION < 3001)
57 #define prefetch(X) ((void) X)
58 #define prefetchw(X) ((void) X)
60 #define prefetch(X) __builtin_prefetch (X)
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
66 If we track inter-zone pointers, we can mark single zones at a
69 If we have a zone where we guarantee no inter-zone pointers, we
70 could mark that zone separately.
72 The garbage zone should not be marked, and we should return 1 in
73 ggc_set_mark for any object in the garbage zone, which cuts off
78 This garbage-collecting allocator segregates objects into zones.
79 It also segregates objects into "large" and "small" bins. Large
80 objects are greater than page size.
82 Pages for small objects are broken up into chunks. The page has
83 a bitmap which marks the start position of each chunk (whether
84 allocated or free). Free chunks are on one of the zone's free
85 lists and contain a pointer to the next free chunk. Chunks in
86 most of the free lists have a fixed size determined by the
87 free list. Chunks in the "other" sized free list have their size
88 stored right after their chain pointer.
90 Empty pages (of all sizes) are kept on a single page cache list,
91 and are considered first when new pages are required; they are
92 deallocated at the start of the next collection if they haven't
93 been recycled by then. The free page list is currently per-zone. */
95 /* Define GGC_DEBUG_LEVEL to print debugging information.
96 0: No debugging output.
97 1: GC statistics only.
98 2: Page-entry allocations/deallocations as well.
99 3: Object allocations as well.
100 4: Object marks as well. */
101 #define GGC_DEBUG_LEVEL (0)
103 #ifndef HOST_BITS_PER_PTR
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
107 /* This structure manages small free chunks. The SIZE field is only
108 initialized if the chunk is in the "other" sized free list. Large
109 chunks are allocated one at a time to their own page, and so don't
113 struct alloc_chunk
*next_free
;
117 /* The size of the fixed-size portion of a small page descriptor. */
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
120 /* The collector's idea of the page size. This must be a power of two
121 no larger than the system page size, because pages must be aligned
122 to this amount and are tracked at this granularity in the page
123 table. We choose a size at compile time for efficiency.
125 We could make a better guess at compile time if PAGE_SIZE is a
126 constant in system headers, and PAGE_SHIFT is defined... */
127 #define GGC_PAGE_SIZE 4096
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
129 #define GGC_PAGE_SHIFT 12
132 /* Alternative definitions which use the runtime page size. */
133 #define GGC_PAGE_SIZE G.pagesize
134 #define GGC_PAGE_MASK G.page_mask
135 #define GGC_PAGE_SHIFT G.lg_pagesize
138 /* The size of a small page managed by the garbage collector. This
139 must currently be GGC_PAGE_SIZE, but with a few changes could
140 be any multiple of it to reduce certain kinds of overhead. */
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
143 /* Free bin information. These numbers may be in need of re-tuning.
144 In general, decreasing the number of free bins would seem to
145 increase the time it takes to allocate... */
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
150 #define NUM_FREE_BINS 64
151 #define FREE_BIN_DELTA MAX_ALIGNMENT
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
154 /* Allocation and marking parameters. */
156 /* The smallest allocatable unit to keep track of. */
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
159 /* The smallest markable unit. If we require each allocated object
160 to contain at least two allocatable units, we can use half as many
161 bits for the mark bitmap. But this adds considerable complexity
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
167 /* We use this structure to determine the alignment required for
170 There are several things wrong with this estimation of alignment.
172 The maximum alignment for a structure is often less than the
173 maximum alignment for a basic data type; for instance, on some
174 targets long long must be aligned to sizeof (int) in a structure
175 and sizeof (long long) in a variable. i386-linux is one example;
176 Darwin is another (sometimes, depending on the compiler in use).
178 Also, long double is not included. Nothing in GCC uses long
179 double, so we assume that this is OK. On powerpc-darwin, adding
180 long double would bring the maximum alignment up to 16 bytes,
181 and until we need long double (or to vectorize compiler operations)
182 that's painfully wasteful. This will need to change, some day. */
184 struct max_alignment
{
192 /* The biggest alignment required. */
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
196 /* Compute the smallest multiple of F that is >= X. */
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
200 /* Types to use for the allocation and mark bitmaps. It might be
201 a good idea to add ffsl to libiberty and use unsigned long
202 instead; that could speed us up where long is wider than int. */
204 typedef unsigned int alloc_type
;
205 typedef unsigned int mark_type
;
206 #define alloc_ffs(x) ffs(x)
208 /* A page_entry records the status of an allocation page. This is the
209 common data between all three kinds of pages - small, large, and
211 typedef struct page_entry
213 /* The address at which the memory is allocated. */
216 /* The zone that this page entry belongs to. */
217 struct alloc_zone
*zone
;
219 #ifdef GATHER_STATISTICS
220 /* How many collections we've survived. */
224 /* Does this page contain small objects, or one large object? */
227 /* Is this page part of the loaded PCH? */
231 /* Additional data needed for small pages. */
232 struct small_page_entry
234 struct page_entry common
;
236 /* The next small page entry, or NULL if this is the last. */
237 struct small_page_entry
*next
;
239 /* If currently marking this zone, a pointer to the mark bits
240 for this page. If we aren't currently marking this zone,
241 this pointer may be stale (pointing to freed memory). */
242 mark_type
*mark_bits
;
244 /* The allocation bitmap. This array extends far enough to have
245 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
246 alloc_type alloc_bits
[1];
249 /* Additional data needed for large pages. */
250 struct large_page_entry
252 struct page_entry common
;
254 /* The next large page entry, or NULL if this is the last. */
255 struct large_page_entry
*next
;
257 /* The number of bytes allocated, not including the page entry. */
260 /* The previous page in the list, so that we can unlink this one. */
261 struct large_page_entry
*prev
;
263 /* During marking, is this object marked? */
267 /* A two-level tree is used to look up the page-entry for a given
268 pointer. Two chunks of the pointer's bits are extracted to index
269 the first and second levels of the tree, as follows:
273 msb +----------------+----+------+------+ lsb
279 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
280 pages are aligned on system page boundaries. The next most
281 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
282 index values in the lookup table, respectively.
284 For 32-bit architectures and the settings below, there are no
285 leftover bits. For architectures with wider pointers, the lookup
286 tree points to a list of pages, which must be scanned to find the
289 #define PAGE_L1_BITS (8)
290 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
291 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
292 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
294 #define LOOKUP_L1(p) \
295 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
297 #define LOOKUP_L2(p) \
298 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
300 #if HOST_BITS_PER_PTR <= 32
302 /* On 32-bit hosts, we use a two level page table, as pictured above. */
303 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
307 /* On 64-bit hosts, we use the same two level page tables plus a linked
308 list that disambiguates the top 32-bits. There will almost always be
309 exactly one entry in the list. */
310 typedef struct page_table_chain
312 struct page_table_chain
*next
;
314 page_entry
**table
[PAGE_L1_SIZE
];
319 /* The global variables. */
320 static struct globals
322 /* The linked list of zones. */
323 struct alloc_zone
*zones
;
325 /* Lookup table for associating allocation pages with object addresses. */
328 /* The system's page size, and related constants. */
333 /* The size to allocate for a small page entry. This includes
334 the size of the structure and the size of the allocation
336 size_t small_page_overhead
;
338 #if defined (HAVE_MMAP_DEV_ZERO)
339 /* A file descriptor open to /dev/zero for reading. */
343 /* Allocate pages in chunks of this size, to throttle calls to memory
344 allocation routines. The first page is used, the rest go onto the
348 /* The file descriptor for debugging output. */
352 /* A zone allocation structure. There is one of these for every
353 distinct allocation zone. */
356 /* The most recent free chunk is saved here, instead of in the linked
357 free list, to decrease list manipulation. It is most likely that we
358 will want this one. */
360 size_t cached_free_size
;
362 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
363 FREE_BIN_DELTA. All other chunks are in slot 0. */
364 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
366 /* The highest bin index which might be non-empty. It may turn out
367 to be empty, in which case we have to search downwards. */
368 size_t high_free_bin
;
370 /* Bytes currently allocated in this zone. */
373 /* Linked list of the small pages in this zone. */
374 struct small_page_entry
*pages
;
376 /* Doubly linked list of large pages in this zone. */
377 struct large_page_entry
*large_pages
;
379 /* If we are currently marking this zone, a pointer to the mark bits. */
380 mark_type
*mark_bits
;
382 /* Name of the zone. */
385 /* The number of small pages currently allocated in this zone. */
386 size_t n_small_pages
;
388 /* Bytes allocated at the end of the last collection. */
389 size_t allocated_last_gc
;
391 /* Total amount of memory mapped. */
394 /* A cache of free system pages. */
395 struct small_page_entry
*free_pages
;
397 /* Next zone in the linked list of zones. */
398 struct alloc_zone
*next_zone
;
400 /* True if this zone was collected during this collection. */
403 /* True if this zone should be destroyed after the next collection. */
406 #ifdef GATHER_STATISTICS
409 /* Total GC-allocated memory. */
410 unsigned long long total_allocated
;
411 /* Total overhead for GC-allocated memory. */
412 unsigned long long total_overhead
;
414 /* Total allocations and overhead for sizes less than 32, 64 and 128.
415 These sizes are interesting because they are typical cache line
418 unsigned long long total_allocated_under32
;
419 unsigned long long total_overhead_under32
;
421 unsigned long long total_allocated_under64
;
422 unsigned long long total_overhead_under64
;
424 unsigned long long total_allocated_under128
;
425 unsigned long long total_overhead_under128
;
430 /* Some default zones. */
431 struct alloc_zone rtl_zone
;
432 struct alloc_zone tree_zone
;
433 struct alloc_zone tree_id_zone
;
435 /* The PCH zone does not need a normal zone structure, and it does
436 not live on the linked list of zones. */
439 /* The start of the PCH zone. NULL if there is none. */
442 /* The end of the PCH zone. NULL if there is none. */
445 /* The size of the PCH zone. 0 if there is none. */
448 /* The allocation bitmap for the PCH zone. */
449 alloc_type
*alloc_bits
;
451 /* If we are currently marking, the mark bitmap for the PCH zone.
452 When it is first read in, we could avoid marking the PCH,
453 because it will not contain any pointers to GC memory outside
454 of the PCH; however, the PCH is currently mapped as writable,
455 so we must mark it in case new pointers are added. */
456 mark_type
*mark_bits
;
460 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
462 static struct small_page_entry
* alloc_small_page (struct alloc_zone
*);
463 static struct large_page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
464 static void free_chunk (char *, size_t, struct alloc_zone
*);
465 static void free_small_page (struct small_page_entry
*);
466 static void free_large_page (struct large_page_entry
*);
467 static void release_pages (struct alloc_zone
*);
468 static void sweep_pages (struct alloc_zone
*);
469 static bool ggc_collect_1 (struct alloc_zone
*, bool);
470 static void new_ggc_zone_1 (struct alloc_zone
*, const char *);
472 /* Traverse the page table and find the entry for a page.
473 Die (probably) if the object wasn't allocated via GC. */
475 static inline page_entry
*
476 lookup_page_table_entry (const void *p
)
481 #if HOST_BITS_PER_PTR <= 32
484 page_table table
= G
.lookup
;
485 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
486 while (table
->high_bits
!= high_bits
)
488 base
= &table
->table
[0];
491 /* Extract the level 1 and 2 indices. */
498 /* Traverse the page table and find the entry for a page.
499 Return NULL if the object wasn't allocated via the GC. */
501 static inline page_entry
*
502 lookup_page_table_if_allocated (const void *p
)
507 #if HOST_BITS_PER_PTR <= 32
510 page_table table
= G
.lookup
;
511 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
516 if (table
->high_bits
== high_bits
)
520 base
= &table
->table
[0];
523 /* Extract the level 1 and 2 indices. */
529 if (L2
>= PAGE_L2_SIZE
)
531 /* We might have a page entry which does not correspond exactly to a
533 if (base
[L1
][L2
] && (const char *) p
< base
[L1
][L2
]->page
)
539 /* Set the page table entry for the page that starts at P. If ENTRY
540 is NULL, clear the entry. */
543 set_page_table_entry (void *p
, page_entry
*entry
)
548 #if HOST_BITS_PER_PTR <= 32
552 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
553 for (table
= G
.lookup
; table
; table
= table
->next
)
554 if (table
->high_bits
== high_bits
)
557 /* Not found -- allocate a new table. */
558 table
= XCNEW (struct page_table_chain
);
559 table
->next
= G
.lookup
;
560 table
->high_bits
= high_bits
;
563 base
= &table
->table
[0];
566 /* Extract the level 1 and 2 indices. */
570 if (base
[L1
] == NULL
)
571 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
573 base
[L1
][L2
] = entry
;
576 /* Find the page table entry associated with OBJECT. */
578 static inline struct page_entry
*
579 zone_get_object_page (const void *object
)
581 return lookup_page_table_entry (object
);
584 /* Find which element of the alloc_bits array OBJECT should be
586 static inline unsigned int
587 zone_get_object_alloc_word (const void *object
)
589 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
590 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
593 /* Find which bit of the appropriate word in the alloc_bits array
594 OBJECT should be recorded in. */
595 static inline unsigned int
596 zone_get_object_alloc_bit (const void *object
)
598 return (((size_t) object
/ BYTES_PER_ALLOC_BIT
)
599 % (8 * sizeof (alloc_type
)));
602 /* Find which element of the mark_bits array OBJECT should be recorded
604 static inline unsigned int
605 zone_get_object_mark_word (const void *object
)
607 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
608 / (8 * sizeof (mark_type
) * BYTES_PER_MARK_BIT
));
611 /* Find which bit of the appropriate word in the mark_bits array
612 OBJECT should be recorded in. */
613 static inline unsigned int
614 zone_get_object_mark_bit (const void *object
)
616 return (((size_t) object
/ BYTES_PER_MARK_BIT
)
617 % (8 * sizeof (mark_type
)));
620 /* Set the allocation bit corresponding to OBJECT in its page's
621 bitmap. Used to split this object from the preceding one. */
623 zone_set_object_alloc_bit (const void *object
)
625 struct small_page_entry
*page
626 = (struct small_page_entry
*) zone_get_object_page (object
);
627 unsigned int start_word
= zone_get_object_alloc_word (object
);
628 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
630 page
->alloc_bits
[start_word
] |= 1L << start_bit
;
633 /* Clear the allocation bit corresponding to OBJECT in PAGE's
634 bitmap. Used to coalesce this object with the preceding
637 zone_clear_object_alloc_bit (struct small_page_entry
*page
,
640 unsigned int start_word
= zone_get_object_alloc_word (object
);
641 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
643 /* Would xor be quicker? */
644 page
->alloc_bits
[start_word
] &= ~(1L << start_bit
);
647 /* Find the size of the object which starts at START_WORD and
648 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
649 Helper function for ggc_get_size and zone_find_object_size. */
652 zone_object_size_1 (alloc_type
*alloc_bits
,
653 size_t start_word
, size_t start_bit
,
657 alloc_type alloc_word
;
660 /* Load the first word. */
661 alloc_word
= alloc_bits
[start_word
++];
663 /* If that was the last bit in this word, we'll want to continue
664 with the next word. Otherwise, handle the rest of this word. */
667 indx
= alloc_ffs (alloc_word
>> start_bit
);
669 /* indx is 1-based. We started at the bit after the object's
670 start, but we also ended at the bit after the object's end.
672 return indx
* BYTES_PER_ALLOC_BIT
;
674 /* The extra 1 accounts for the starting unit, before start_bit. */
675 size
= (sizeof (alloc_type
) * 8 - start_bit
+ 1) * BYTES_PER_ALLOC_BIT
;
677 if (size
>= max_size
)
680 alloc_word
= alloc_bits
[start_word
++];
683 size
= BYTES_PER_ALLOC_BIT
;
685 while (alloc_word
== 0)
687 size
+= sizeof (alloc_type
) * 8 * BYTES_PER_ALLOC_BIT
;
688 if (size
>= max_size
)
690 alloc_word
= alloc_bits
[start_word
++];
693 indx
= alloc_ffs (alloc_word
);
694 return size
+ (indx
- 1) * BYTES_PER_ALLOC_BIT
;
697 /* Find the size of OBJECT on small page PAGE. */
700 zone_find_object_size (struct small_page_entry
*page
,
703 const char *object_midptr
= (const char *) object
+ BYTES_PER_ALLOC_BIT
;
704 unsigned int start_word
= zone_get_object_alloc_word (object_midptr
);
705 unsigned int start_bit
= zone_get_object_alloc_bit (object_midptr
);
706 size_t max_size
= (page
->common
.page
+ SMALL_PAGE_SIZE
707 - (const char *) object
);
709 return zone_object_size_1 (page
->alloc_bits
, start_word
, start_bit
,
713 /* highest_bit assumes that alloc_type is 32 bits. */
714 extern char check_alloc_type_size
[(sizeof (alloc_type
) == 4) ? 1 : -1];
716 /* Find the highest set bit in VALUE. Returns the bit number of that
717 bit, using the same values as ffs. */
718 static inline alloc_type
719 highest_bit (alloc_type value
)
721 /* This also assumes that alloc_type is unsigned. */
726 value
|= value
>> 16;
727 value
= value
^ (value
>> 1);
728 return alloc_ffs (value
);
731 /* Find the offset from the start of an object to P, which may point
732 into the interior of the object. */
735 zone_find_object_offset (alloc_type
*alloc_bits
, size_t start_word
,
738 unsigned int offset_in_bits
;
739 alloc_type alloc_word
= alloc_bits
[start_word
];
741 /* Mask off any bits after the initial bit, but make sure to include
742 the initial bit in the result. Note that START_BIT is
744 if (start_bit
< 8 * sizeof (alloc_type
) - 1)
745 alloc_word
&= (1 << (start_bit
+ 1)) - 1;
746 offset_in_bits
= start_bit
;
748 /* Search for the start of the object. */
749 while (alloc_word
== 0 && start_word
> 0)
751 alloc_word
= alloc_bits
[--start_word
];
752 offset_in_bits
+= 8 * sizeof (alloc_type
);
754 /* We must always find a set bit. */
755 gcc_assert (alloc_word
!= 0);
756 /* Note that the result of highest_bit is 1-based. */
757 offset_in_bits
-= highest_bit (alloc_word
) - 1;
759 return BYTES_PER_ALLOC_BIT
* offset_in_bits
;
762 /* Allocate the mark bits for every zone, and set the pointers on each
765 zone_allocate_marks (void)
767 struct alloc_zone
*zone
;
769 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
771 struct small_page_entry
*page
;
772 mark_type
*cur_marks
;
773 size_t mark_words
, mark_words_per_page
;
774 #ifdef ENABLE_CHECKING
779 = (GGC_PAGE_SIZE
+ BYTES_PER_MARK_WORD
- 1) / BYTES_PER_MARK_WORD
;
780 mark_words
= zone
->n_small_pages
* mark_words_per_page
;
781 zone
->mark_bits
= (mark_type
*) xcalloc (sizeof (mark_type
),
783 cur_marks
= zone
->mark_bits
;
784 for (page
= zone
->pages
; page
; page
= page
->next
)
786 page
->mark_bits
= cur_marks
;
787 cur_marks
+= mark_words_per_page
;
788 #ifdef ENABLE_CHECKING
792 gcc_checking_assert (n
== zone
->n_small_pages
);
795 /* We don't collect the PCH zone, but we do have to mark it
799 = (mark_type
*) xcalloc (sizeof (mark_type
),
800 CEIL (pch_zone
.bytes
, BYTES_PER_MARK_WORD
));
803 /* After marking and sweeping, release the memory used for mark bits. */
805 zone_free_marks (void)
807 struct alloc_zone
*zone
;
809 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
812 free (zone
->mark_bits
);
813 zone
->mark_bits
= NULL
;
818 free (pch_zone
.mark_bits
);
819 pch_zone
.mark_bits
= NULL
;
824 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
825 (if non-null). The ifdef structure here is intended to cause a
826 compile error unless exactly one of the HAVE_* is defined. */
829 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
831 #ifdef HAVE_MMAP_ANON
832 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
833 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
835 #ifdef HAVE_MMAP_DEV_ZERO
836 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
837 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
840 if (page
== (char *) MAP_FAILED
)
842 perror ("virtual memory exhausted");
843 exit (FATAL_EXIT_CODE
);
846 /* Remember that we allocated this memory. */
847 zone
->bytes_mapped
+= size
;
849 /* Pretend we don't have access to the allocated pages. We'll enable
850 access to smaller pieces of the area in ggc_internal_alloc. Discard the
851 handle to avoid handle leak. */
852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
858 /* Allocate a new page for allocating small objects in ZONE, and
859 return an entry for it. */
861 static struct small_page_entry
*
862 alloc_small_page (struct alloc_zone
*zone
)
864 struct small_page_entry
*entry
;
866 /* Check the list of free pages for one we can use. */
867 entry
= zone
->free_pages
;
870 /* Recycle the allocated memory from this page ... */
871 zone
->free_pages
= entry
->next
;
875 /* We want just one page. Allocate a bunch of them and put the
876 extras on the freelist. (Can only do this optimization with
877 mmap for backing store.) */
878 struct small_page_entry
*e
, *f
= zone
->free_pages
;
882 page
= alloc_anon (NULL
, GGC_PAGE_SIZE
* G
.quire_size
, zone
);
884 /* This loop counts down so that the chain will be in ascending
886 for (i
= G
.quire_size
- 1; i
>= 1; i
--)
888 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
889 e
->common
.page
= page
+ (i
<< GGC_PAGE_SHIFT
);
890 e
->common
.zone
= zone
;
893 set_page_table_entry (e
->common
.page
, &e
->common
);
896 zone
->free_pages
= f
;
898 entry
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
899 entry
->common
.page
= page
;
900 entry
->common
.zone
= zone
;
901 set_page_table_entry (page
, &entry
->common
);
904 zone
->n_small_pages
++;
906 if (GGC_DEBUG_LEVEL
>= 2)
907 fprintf (G
.debug_file
,
908 "Allocating %s page at %p, data %p-%p\n",
909 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
910 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
915 /* Allocate a large page of size SIZE in ZONE. */
917 static struct large_page_entry
*
918 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
920 struct large_page_entry
*entry
;
924 needed_size
= size
+ sizeof (struct large_page_entry
);
925 page
= XNEWVAR (char, needed_size
);
927 entry
= (struct large_page_entry
*) page
;
930 entry
->common
.page
= page
+ sizeof (struct large_page_entry
);
931 entry
->common
.large_p
= true;
932 entry
->common
.pch_p
= false;
933 entry
->common
.zone
= zone
;
934 #ifdef GATHER_STATISTICS
935 entry
->common
.survived
= 0;
937 entry
->mark_p
= false;
941 set_page_table_entry (entry
->common
.page
, &entry
->common
);
943 if (GGC_DEBUG_LEVEL
>= 2)
944 fprintf (G
.debug_file
,
945 "Allocating %s large page at %p, data %p-%p\n",
946 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
947 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
953 /* For a page that is no longer needed, put it on the free page list. */
956 free_small_page (struct small_page_entry
*entry
)
958 if (GGC_DEBUG_LEVEL
>= 2)
959 fprintf (G
.debug_file
,
960 "Deallocating %s page at %p, data %p-%p\n",
961 entry
->common
.zone
->name
, (PTR
) entry
,
962 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
964 gcc_assert (!entry
->common
.large_p
);
966 /* Mark the page as inaccessible. Discard the handle to
967 avoid handle leak. */
968 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->common
.page
,
971 entry
->next
= entry
->common
.zone
->free_pages
;
972 entry
->common
.zone
->free_pages
= entry
;
973 entry
->common
.zone
->n_small_pages
--;
976 /* Release a large page that is no longer needed. */
979 free_large_page (struct large_page_entry
*entry
)
981 if (GGC_DEBUG_LEVEL
>= 2)
982 fprintf (G
.debug_file
,
983 "Deallocating %s page at %p, data %p-%p\n",
984 entry
->common
.zone
->name
, (PTR
) entry
,
985 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
987 gcc_assert (entry
->common
.large_p
);
989 set_page_table_entry (entry
->common
.page
, NULL
);
993 /* Release the free page cache to the system. */
996 release_pages (struct alloc_zone
*zone
)
999 struct small_page_entry
*p
, *next
;
1003 /* Gather up adjacent pages so they are unmapped together. */
1004 p
= zone
->free_pages
;
1008 start
= p
->common
.page
;
1010 len
= SMALL_PAGE_SIZE
;
1011 set_page_table_entry (p
->common
.page
, NULL
);
1014 while (p
&& p
->common
.page
== start
+ len
)
1017 len
+= SMALL_PAGE_SIZE
;
1018 set_page_table_entry (p
->common
.page
, NULL
);
1022 munmap (start
, len
);
1023 zone
->bytes_mapped
-= len
;
1026 zone
->free_pages
= NULL
;
1030 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1033 free_chunk (char *ptr
, size_t size
, struct alloc_zone
*zone
)
1035 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ptr
;
1038 bin
= SIZE_BIN_DOWN (size
);
1039 gcc_assert (bin
!= 0);
1040 if (bin
> NUM_FREE_BINS
)
1043 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1047 chunk
->next_free
= zone
->free_chunks
[bin
];
1048 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1060 chunk
->next_free
= zone
->free_chunks
[bin
];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1069 zone
->free_chunks
[bin
] = chunk
;
1070 if (bin
> zone
->high_free_bin
)
1071 zone
->high_free_bin
= bin
;
1072 if (GGC_DEBUG_LEVEL
>= 3)
1073 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
1076 /* For a given size of memory requested for allocation, return the
1077 actual size that is going to be allocated. */
1080 ggc_round_alloc_size (size_t requested_size
)
1084 /* Make sure that zero-sized allocations get a unique and freeable
1086 if (requested_size
== 0)
1087 size
= MAX_ALIGNMENT
;
1089 size
= (requested_size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
1094 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1097 ggc_internal_alloc_zone_stat (size_t orig_size
, struct alloc_zone
*zone
1102 struct small_page_entry
*entry
;
1103 struct alloc_chunk
*chunk
, **pp
;
1105 size_t size
= ggc_alloced_size_for_request (orig_size
);
1107 /* Try to allocate the object from several different sources. Each
1108 of these cases is responsible for setting RESULT and SIZE to
1109 describe the allocated block, before jumping to FOUND. If a
1110 chunk is split, the allocate bit for the new chunk should also be
1113 Large objects are handled specially. However, they'll just fail
1114 the next couple of conditions, so we can wait to check for them
1115 below. The large object case is relatively rare (< 1%), so this
1118 /* First try to split the last chunk we allocated. For best
1119 fragmentation behavior it would be better to look for a
1120 free bin of the appropriate size for a small object. However,
1121 we're unlikely (1% - 7%) to find one, and this gives better
1122 locality behavior anyway. This case handles the lion's share
1123 of all calls to this function. */
1124 if (size
<= zone
->cached_free_size
)
1126 result
= zone
->cached_free
;
1128 zone
->cached_free_size
-= size
;
1129 if (zone
->cached_free_size
)
1131 zone
->cached_free
+= size
;
1132 zone_set_object_alloc_bit (zone
->cached_free
);
1138 /* Next, try to find a free bin of the exactly correct size. */
1140 /* We want to round SIZE up, rather than down, but we know it's
1141 already aligned to at least FREE_BIN_DELTA, so we can just
1143 bin
= SIZE_BIN_DOWN (size
);
1145 if (bin
<= NUM_FREE_BINS
1146 && (chunk
= zone
->free_chunks
[bin
]) != NULL
)
1148 /* We have a chunk of the right size. Pull it off the free list
1151 zone
->free_chunks
[bin
] = chunk
->next_free
;
1153 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1154 == FREE_BIN_DELTA. */
1157 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1158 may now be wrong, if this was the last chunk in the high bin.
1159 Rather than fixing it up now, wait until we need to search
1165 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1166 to split. We can find one in the too-big bin, or in the largest
1167 sized bin with a chunk in it. Try the largest normal-sized bin
1170 if (zone
->high_free_bin
> bin
)
1172 /* Find the highest numbered free bin. It will be at or below
1174 while (zone
->high_free_bin
> bin
1175 && zone
->free_chunks
[zone
->high_free_bin
] == NULL
)
1176 zone
->high_free_bin
--;
1178 if (zone
->high_free_bin
> bin
)
1180 size_t tbin
= zone
->high_free_bin
;
1181 chunk
= zone
->free_chunks
[tbin
];
1183 /* Remove the chunk from its previous bin. */
1184 zone
->free_chunks
[tbin
] = chunk
->next_free
;
1186 result
= (char *) chunk
;
1188 /* Save the rest of the chunk for future allocation. */
1189 if (zone
->cached_free_size
)
1190 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1192 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1193 zone
->cached_free
= (char *) chunk
;
1194 zone
->cached_free_size
= (tbin
- bin
) * FREE_BIN_DELTA
;
1196 /* Mark the new free chunk as an object, so that we can
1197 find the size of the newly allocated object. */
1198 zone_set_object_alloc_bit (chunk
);
1200 /* HIGH_FREE_BIN may now be wrong, if this was the last
1201 chunk in the high bin. Rather than fixing it up now,
1202 wait until we need to search the free bins. */
1208 /* Failing that, look through the "other" bucket for a chunk
1209 that is large enough. */
1210 pp
= &(zone
->free_chunks
[0]);
1212 while (chunk
&& chunk
->size
< size
)
1214 pp
= &chunk
->next_free
;
1220 /* Remove the chunk from its previous bin. */
1221 *pp
= chunk
->next_free
;
1223 result
= (char *) chunk
;
1225 /* Save the rest of the chunk for future allocation, if there's any
1227 csize
= chunk
->size
;
1230 if (zone
->cached_free_size
)
1231 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1233 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1234 zone
->cached_free
= (char *) chunk
;
1235 zone
->cached_free_size
= csize
- size
;
1237 /* Mark the new free chunk as an object. */
1238 zone_set_object_alloc_bit (chunk
);
1244 /* Handle large allocations. We could choose any threshold between
1245 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1246 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1247 be guaranteed to have a unique entry in the lookup table. Large
1248 allocations will always fall through to here. */
1249 if (size
> GGC_PAGE_SIZE
)
1251 struct large_page_entry
*entry
= alloc_large_page (size
, zone
);
1253 #ifdef GATHER_STATISTICS
1254 entry
->common
.survived
= 0;
1257 entry
->next
= zone
->large_pages
;
1258 if (zone
->large_pages
)
1259 zone
->large_pages
->prev
= entry
;
1260 zone
->large_pages
= entry
;
1262 result
= entry
->common
.page
;
1267 /* Failing everything above, allocate a new small page. */
1269 entry
= alloc_small_page (zone
);
1270 entry
->next
= zone
->pages
;
1271 zone
->pages
= entry
;
1273 /* Mark the first chunk in the new page. */
1274 entry
->alloc_bits
[0] = 1;
1276 result
= entry
->common
.page
;
1277 if (size
< SMALL_PAGE_SIZE
)
1279 if (zone
->cached_free_size
)
1280 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1282 zone
->cached_free
= (char *) result
+ size
;
1283 zone
->cached_free_size
= SMALL_PAGE_SIZE
- size
;
1285 /* Mark the new free chunk as an object. */
1286 zone_set_object_alloc_bit (zone
->cached_free
);
1291 /* We could save TYPE in the chunk, but we don't use that for
1292 anything yet. If we wanted to, we could do it by adding it
1293 either before the beginning of the chunk or after its end,
1294 and adjusting the size and pointer appropriately. */
1296 /* We'll probably write to this after we return. */
1299 #ifdef ENABLE_GC_CHECKING
1300 /* `Poison' the entire allocated object. */
1301 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1302 memset (result
, 0xaf, size
);
1303 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result
+ orig_size
,
1307 /* Tell Valgrind that the memory is there, but its content isn't
1308 defined. The bytes at the end of the object are still marked
1310 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, orig_size
));
1312 /* Keep track of how many bytes are being allocated. This
1313 information is used in deciding when to collect. */
1314 zone
->allocated
+= size
;
1316 timevar_ggc_mem_total
+= size
;
1318 #ifdef GATHER_STATISTICS
1319 ggc_record_overhead (orig_size
, size
- orig_size
, result PASS_MEM_STAT
);
1322 size_t object_size
= size
;
1323 size_t overhead
= object_size
- orig_size
;
1325 zone
->stats
.total_overhead
+= overhead
;
1326 zone
->stats
.total_allocated
+= object_size
;
1328 if (orig_size
<= 32)
1330 zone
->stats
.total_overhead_under32
+= overhead
;
1331 zone
->stats
.total_allocated_under32
+= object_size
;
1333 if (orig_size
<= 64)
1335 zone
->stats
.total_overhead_under64
+= overhead
;
1336 zone
->stats
.total_allocated_under64
+= object_size
;
1338 if (orig_size
<= 128)
1340 zone
->stats
.total_overhead_under128
+= overhead
;
1341 zone
->stats
.total_allocated_under128
+= object_size
;
1346 if (GGC_DEBUG_LEVEL
>= 3)
1347 fprintf (G
.debug_file
, "Allocating object, size=%lu at %p\n",
1348 (unsigned long) size
, result
);
1353 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1354 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1357 ggc_internal_cleared_alloc_zone_stat (size_t orig_size
,
1358 struct alloc_zone
*zone MEM_STAT_DECL
)
1360 void * result
= ggc_internal_alloc_zone_pass_stat (orig_size
, zone
);
1361 memset (result
, 0, orig_size
);
1366 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1370 ggc_alloc_typed_stat (enum gt_types_enum gte
, size_t size
1375 case gt_ggc_e_14lang_tree_node
:
1376 return ggc_internal_alloc_zone_pass_stat (size
, &tree_zone
);
1378 case gt_ggc_e_7rtx_def
:
1379 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1381 case gt_ggc_e_9rtvec_def
:
1382 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1385 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1389 /* Normal GC allocation simply allocates into the main zone. */
1392 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL
)
1394 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1397 /* Poison the chunk. */
1398 #ifdef ENABLE_GC_CHECKING
1399 #define poison_region(PTR, SIZE) \
1401 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1402 memset ((PTR), 0xa5, (SIZE)); \
1403 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1406 #define poison_region(PTR, SIZE)
1409 /* Free the object at P. */
1414 struct page_entry
*page
;
1416 #ifdef GATHER_STATISTICS
1417 ggc_free_overhead (p
);
1420 poison_region (p
, ggc_get_size (p
));
1422 page
= zone_get_object_page (p
);
1426 struct large_page_entry
*large_page
1427 = (struct large_page_entry
*) page
;
1429 /* Remove the page from the linked list. */
1430 if (large_page
->prev
)
1431 large_page
->prev
->next
= large_page
->next
;
1434 gcc_assert (large_page
->common
.zone
->large_pages
== large_page
);
1435 large_page
->common
.zone
->large_pages
= large_page
->next
;
1437 if (large_page
->next
)
1438 large_page
->next
->prev
= large_page
->prev
;
1440 large_page
->common
.zone
->allocated
-= large_page
->bytes
;
1442 /* Release the memory associated with this object. */
1443 free_large_page (large_page
);
1445 else if (page
->pch_p
)
1446 /* Don't do anything. We won't allocate a new object from the
1447 PCH zone so there's no point in releasing anything. */
1451 size_t size
= ggc_get_size (p
);
1453 page
->zone
->allocated
-= size
;
1455 /* Add the chunk to the free list. We don't bother with coalescing,
1456 since we are likely to want a chunk of this size again. */
1457 free_chunk ((char *)p
, size
, page
->zone
);
1461 /* Mark function for strings. */
1464 gt_ggc_m_S (const void *p
)
1467 unsigned long offset
;
1472 /* Look up the page on which the object is alloced. . */
1473 entry
= lookup_page_table_if_allocated (p
);
1479 size_t alloc_word
, alloc_bit
, t
;
1480 t
= ((const char *) p
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
;
1481 alloc_word
= t
/ (8 * sizeof (alloc_type
));
1482 alloc_bit
= t
% (8 * sizeof (alloc_type
));
1483 offset
= zone_find_object_offset (pch_zone
.alloc_bits
, alloc_word
,
1486 else if (entry
->large_p
)
1488 struct large_page_entry
*le
= (struct large_page_entry
*) entry
;
1489 offset
= ((const char *) p
) - entry
->page
;
1490 gcc_assert (offset
< le
->bytes
);
1494 struct small_page_entry
*se
= (struct small_page_entry
*) entry
;
1495 unsigned int start_word
= zone_get_object_alloc_word (p
);
1496 unsigned int start_bit
= zone_get_object_alloc_bit (p
);
1497 offset
= zone_find_object_offset (se
->alloc_bits
, start_word
, start_bit
);
1499 /* On some platforms a char* will not necessarily line up on an
1500 allocation boundary, so we have to update the offset to
1501 account for the leftover bytes. */
1502 offset
+= (size_t) p
% BYTES_PER_ALLOC_BIT
;
1507 /* Here we've seen a char* which does not point to the beginning
1508 of an allocated object. We assume it points to the middle of
1510 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1511 p
= ((const char *) p
) - offset
;
1512 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p
));
1516 /* Inefficient, but also unlikely to matter. */
1520 /* If P is not marked, mark it and return false. Otherwise return true.
1521 P must have been allocated by the GC allocator; it mustn't point to
1522 static objects, stack variables, or memory allocated with malloc. */
1525 ggc_set_mark (const void *p
)
1527 struct page_entry
*page
;
1528 const char *ptr
= (const char *) p
;
1530 page
= zone_get_object_page (p
);
1534 size_t mark_word
, mark_bit
, offset
;
1535 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1536 mark_word
= offset
/ (8 * sizeof (mark_type
));
1537 mark_bit
= offset
% (8 * sizeof (mark_type
));
1539 if (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
))
1541 pch_zone
.mark_bits
[mark_word
] |= (1 << mark_bit
);
1543 else if (page
->large_p
)
1545 struct large_page_entry
*large_page
1546 = (struct large_page_entry
*) page
;
1548 if (large_page
->mark_p
)
1550 large_page
->mark_p
= true;
1554 struct small_page_entry
*small_page
1555 = (struct small_page_entry
*) page
;
1557 if (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1558 & (1 << zone_get_object_mark_bit (p
)))
1560 small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1561 |= (1 << zone_get_object_mark_bit (p
));
1564 if (GGC_DEBUG_LEVEL
>= 4)
1565 fprintf (G
.debug_file
, "Marking %p\n", p
);
1570 /* Return 1 if P has been marked, zero otherwise.
1571 P must have been allocated by the GC allocator; it mustn't point to
1572 static objects, stack variables, or memory allocated with malloc. */
1575 ggc_marked_p (const void *p
)
1577 struct page_entry
*page
;
1578 const char *ptr
= (const char *) p
;
1580 page
= zone_get_object_page (p
);
1584 size_t mark_word
, mark_bit
, offset
;
1585 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1586 mark_word
= offset
/ (8 * sizeof (mark_type
));
1587 mark_bit
= offset
% (8 * sizeof (mark_type
));
1589 return (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
)) != 0;
1594 struct large_page_entry
*large_page
1595 = (struct large_page_entry
*) page
;
1597 return large_page
->mark_p
;
1601 struct small_page_entry
*small_page
1602 = (struct small_page_entry
*) page
;
1604 return 0 != (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1605 & (1 << zone_get_object_mark_bit (p
)));
1609 /* Return the size of the gc-able object P. */
1612 ggc_get_size (const void *p
)
1614 struct page_entry
*page
;
1615 const char *ptr
= (const char *) p
;
1617 page
= zone_get_object_page (p
);
1621 size_t alloc_word
, alloc_bit
, offset
, max_size
;
1622 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
+ 1;
1623 alloc_word
= offset
/ (8 * sizeof (alloc_type
));
1624 alloc_bit
= offset
% (8 * sizeof (alloc_type
));
1625 max_size
= pch_zone
.bytes
- (ptr
- pch_zone
.page
);
1626 return zone_object_size_1 (pch_zone
.alloc_bits
, alloc_word
, alloc_bit
,
1631 return ((struct large_page_entry
*)page
)->bytes
;
1633 return zone_find_object_size ((struct small_page_entry
*) page
, p
);
1636 /* Initialize the ggc-zone-mmap allocator. */
1640 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1641 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1642 the current assumptions to hold. */
1644 gcc_assert (FREE_BIN_DELTA
== MAX_ALIGNMENT
);
1646 /* Set up the main zone by hand. */
1647 main_zone
.name
= "Main zone";
1648 G
.zones
= &main_zone
;
1650 /* Allocate the default zones. */
1651 new_ggc_zone_1 (&rtl_zone
, "RTL zone");
1652 new_ggc_zone_1 (&tree_zone
, "Tree zone");
1653 new_ggc_zone_1 (&tree_id_zone
, "Tree identifier zone");
1655 G
.pagesize
= getpagesize();
1656 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1657 G
.page_mask
= ~(G
.pagesize
- 1);
1659 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1660 gcc_assert ((G
.pagesize
& (GGC_PAGE_SIZE
- 1)) == 0);
1662 /* Allocate 16 system pages at a time. */
1663 G
.quire_size
= 16 * G
.pagesize
/ GGC_PAGE_SIZE
;
1665 /* Calculate the size of the allocation bitmap and other overhead. */
1666 /* Right now we allocate bits for the page header and bitmap. These
1667 are wasted, but a little tricky to eliminate. */
1668 G
.small_page_overhead
1669 = PAGE_OVERHEAD
+ (GGC_PAGE_SIZE
/ BYTES_PER_ALLOC_BIT
/ 8);
1670 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1672 #ifdef HAVE_MMAP_DEV_ZERO
1673 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1674 gcc_assert (G
.dev_zero_fd
!= -1);
1678 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1679 setlinebuf (G
.debug_file
);
1681 G
.debug_file
= stdout
;
1685 /* StunOS has an amazing off-by-one error for the first mmap allocation
1686 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1687 believe, is an unaligned page allocation, which would cause us to
1688 hork badly if we tried to use it. */
1690 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1691 struct small_page_entry
*e
;
1692 if ((size_t)p
& (G
.pagesize
- 1))
1694 /* How losing. Discard this one and try another. If we still
1695 can't get something useful, give up. */
1697 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1698 gcc_assert (!((size_t)p
& (G
.pagesize
- 1)));
1701 if (GGC_PAGE_SIZE
== G
.pagesize
)
1703 /* We have a good page, might as well hold onto it... */
1704 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
1706 e
->common
.zone
= &main_zone
;
1707 e
->next
= main_zone
.free_pages
;
1708 set_page_table_entry (e
->common
.page
, &e
->common
);
1709 main_zone
.free_pages
= e
;
1713 munmap (p
, G
.pagesize
);
1719 /* Start a new GGC zone. */
1722 new_ggc_zone_1 (struct alloc_zone
*new_zone
, const char * name
)
1724 new_zone
->name
= name
;
1725 new_zone
->next_zone
= G
.zones
->next_zone
;
1726 G
.zones
->next_zone
= new_zone
;
1729 /* Free all empty pages and objects within a page for a given zone */
1732 sweep_pages (struct alloc_zone
*zone
)
1734 struct large_page_entry
**lpp
, *lp
, *lnext
;
1735 struct small_page_entry
**spp
, *sp
, *snext
;
1737 size_t allocated
= 0;
1740 /* First, reset the free_chunks lists, since we are going to
1741 re-free free chunks in hopes of coalescing them into large chunks. */
1742 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
1743 zone
->high_free_bin
= 0;
1744 zone
->cached_free
= NULL
;
1745 zone
->cached_free_size
= 0;
1747 /* Large pages are all or none affairs. Either they are completely
1748 empty, or they are completely full. */
1749 lpp
= &zone
->large_pages
;
1750 for (lp
= zone
->large_pages
; lp
!= NULL
; lp
= lnext
)
1752 gcc_assert (lp
->common
.large_p
);
1756 #ifdef GATHER_STATISTICS
1757 /* This page has now survived another collection. */
1758 lp
->common
.survived
++;
1764 allocated
+= lp
->bytes
;
1770 #ifdef ENABLE_GC_CHECKING
1771 /* Poison the page. */
1772 memset (lp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1775 lp
->prev
->next
= lp
->next
;
1777 lp
->next
->prev
= lp
->prev
;
1778 free_large_page (lp
);
1783 for (sp
= zone
->pages
; sp
!= NULL
; sp
= snext
)
1785 char *object
, *last_object
;
1787 alloc_type
*alloc_word_p
;
1788 mark_type
*mark_word_p
;
1790 gcc_assert (!sp
->common
.large_p
);
1794 #ifdef GATHER_STATISTICS
1795 /* This page has now survived another collection. */
1796 sp
->common
.survived
++;
1799 /* Step through all chunks, consolidate those that are free and
1800 insert them into the free lists. Note that consolidation
1801 slows down collection slightly. */
1803 last_object
= object
= sp
->common
.page
;
1804 end
= sp
->common
.page
+ SMALL_PAGE_SIZE
;
1806 nomarksinpage
= true;
1807 mark_word_p
= sp
->mark_bits
;
1808 alloc_word_p
= sp
->alloc_bits
;
1810 gcc_assert (BYTES_PER_ALLOC_BIT
== BYTES_PER_MARK_BIT
);
1812 object
= sp
->common
.page
;
1816 alloc_type alloc_word
;
1817 mark_type mark_word
;
1819 alloc_word
= *alloc_word_p
++;
1820 mark_word
= *mark_word_p
++;
1823 nomarksinpage
= false;
1825 /* There ought to be some way to do this without looping... */
1827 while ((n
= alloc_ffs (alloc_word
)) != 0)
1829 /* Extend the current state for n - 1 bits. We can't
1830 shift alloc_word by n, even though it isn't used in the
1831 loop, in case only the highest bit was set. */
1832 alloc_word
>>= n
- 1;
1833 mark_word
>>= n
- 1;
1834 object
+= BYTES_PER_MARK_BIT
* (n
- 1);
1840 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1843 poison_region (last_free
, object
- last_free
);
1844 free_chunk (last_free
, object
- last_free
, zone
);
1848 allocated
+= object
- last_object
;
1849 last_object
= object
;
1853 if (last_free
== NULL
)
1856 allocated
+= object
- last_object
;
1859 zone_clear_object_alloc_bit (sp
, object
);
1862 /* Shift to just after the alloc bit we handled. */
1865 object
+= BYTES_PER_MARK_BIT
;
1870 object
+= BYTES_PER_MARK_BIT
* (8 * sizeof (alloc_type
) - i
);
1872 while (object
< end
);
1877 #ifdef ENABLE_GC_CHECKING
1878 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp
->common
.page
,
1880 /* Poison the page. */
1881 memset (sp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1883 free_small_page (sp
);
1888 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1889 object
- last_free
));
1890 poison_region (last_free
, object
- last_free
);
1891 free_chunk (last_free
, object
- last_free
, zone
);
1894 allocated
+= object
- last_object
;
1899 zone
->allocated
= allocated
;
1902 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1903 is true if we need to mark before sweeping, false if some other
1904 zone collection has already performed marking for us. Returns true
1905 if we collected, false otherwise. */
1908 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1914 for (i
= 0; i
< NUM_FREE_BINS
+ 1; i
++)
1916 struct alloc_chunk
*chunk
;
1921 chunk
= zone
->free_chunks
[i
];
1926 chunk
= chunk
->next_free
;
1928 fprintf (stderr
, "Bin %d: %d free chunks (%d bytes)\n",
1936 fprintf (stderr
, " {%s GC %luk -> ",
1937 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1939 /* Zero the total allocated bytes. This will be recalculated in the
1941 zone
->allocated
= 0;
1943 /* Release the pages we freed the last time we collected, but didn't
1944 reuse in the interim. */
1945 release_pages (zone
);
1949 zone_allocate_marks ();
1951 #ifdef GATHER_STATISTICS
1952 ggc_prune_overhead_list ();
1957 zone
->was_collected
= true;
1958 zone
->allocated_last_gc
= zone
->allocated
;
1961 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1965 #ifdef GATHER_STATISTICS
1966 /* Calculate the average page survival rate in terms of number of
1970 calculate_average_page_survival (struct alloc_zone
*zone
)
1973 float survival
= 0.0;
1974 struct small_page_entry
*p
;
1975 struct large_page_entry
*lp
;
1976 for (p
= zone
->pages
; p
; p
= p
->next
)
1979 survival
+= p
->common
.survived
;
1981 for (lp
= zone
->large_pages
; lp
; lp
= lp
->next
)
1984 survival
+= lp
->common
.survived
;
1986 return survival
/count
;
1990 /* Top level collection routine. */
1995 struct alloc_zone
*zone
;
1996 bool marked
= false;
1998 timevar_push (TV_GC
);
2000 if (!ggc_force_collect
)
2002 float allocated_last_gc
= 0, allocated
= 0, min_expand
;
2004 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2006 allocated_last_gc
+= zone
->allocated_last_gc
;
2007 allocated
+= zone
->allocated
;
2011 MAX (allocated_last_gc
,
2012 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2013 min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2015 if (allocated
< allocated_last_gc
+ min_expand
)
2017 timevar_pop (TV_GC
);
2022 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2024 /* Start by possibly collecting the main zone. */
2025 main_zone
.was_collected
= false;
2026 marked
|= ggc_collect_1 (&main_zone
, true);
2028 /* In order to keep the number of collections down, we don't
2029 collect other zones unless we are collecting the main zone. This
2030 gives us roughly the same number of collections as we used to
2031 have with the old gc. The number of collection is important
2032 because our main slowdown (according to profiling) is now in
2033 marking. So if we mark twice as often as we used to, we'll be
2034 twice as slow. Hopefully we'll avoid this cost when we mark
2036 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2037 keep this code in case the heuristics are further refined. */
2039 if (main_zone
.was_collected
)
2041 struct alloc_zone
*zone
;
2043 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
2045 zone
->was_collected
= false;
2046 marked
|= ggc_collect_1 (zone
, !marked
);
2050 #ifdef GATHER_STATISTICS
2051 /* Print page survival stats, if someone wants them. */
2052 if (GGC_DEBUG_LEVEL
>= 2)
2054 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2056 if (zone
->was_collected
)
2058 float f
= calculate_average_page_survival (zone
);
2059 printf ("Average page survival in zone `%s' is %f\n",
2069 /* Free dead zones. */
2070 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
2072 if (zone
->next_zone
->dead
)
2074 struct alloc_zone
*dead_zone
= zone
->next_zone
;
2076 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
2078 /* The zone must be empty. */
2079 gcc_assert (!dead_zone
->allocated
);
2081 /* Unchain the dead zone, release all its pages and free it. */
2082 zone
->next_zone
= zone
->next_zone
->next_zone
;
2083 release_pages (dead_zone
);
2088 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2090 timevar_pop (TV_GC
);
2093 /* Print allocation statistics. */
2094 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2096 : ((x) < 1024*1024*10 \
2098 : (x) / (1024*1024))))
2099 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2102 ggc_print_statistics (void)
2104 struct alloc_zone
*zone
;
2105 struct ggc_statistics stats
;
2106 size_t total_overhead
= 0, total_allocated
= 0, total_bytes_mapped
= 0;
2107 size_t pte_overhead
, i
;
2109 /* Clear the statistics. */
2110 memset (&stats
, 0, sizeof (stats
));
2112 /* Make sure collection will really occur. */
2113 ggc_force_collect
= true;
2115 /* Collect and print the statistics common across collectors. */
2116 ggc_print_common_statistics (stderr
, &stats
);
2118 ggc_force_collect
= false;
2120 /* Release free pages so that we will not count the bytes allocated
2121 there as part of the total allocated memory. */
2122 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2123 release_pages (zone
);
2125 /* Collect some information about the various sizes of
2128 "Memory still allocated at the end of the compilation process\n");
2130 fprintf (stderr
, "%20s %10s %10s %10s\n",
2131 "Zone", "Allocated", "Used", "Overhead");
2132 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2134 struct large_page_entry
*large_page
;
2135 size_t overhead
, allocated
, in_use
;
2137 /* Skip empty zones. */
2138 if (!zone
->pages
&& !zone
->large_pages
)
2141 allocated
= in_use
= 0;
2143 overhead
= sizeof (struct alloc_zone
);
2145 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2146 large_page
= large_page
->next
)
2148 allocated
+= large_page
->bytes
;
2149 in_use
+= large_page
->bytes
;
2150 overhead
+= sizeof (struct large_page_entry
);
2153 /* There's no easy way to walk through the small pages finding
2154 used and unused objects. Instead, add all the pages, and
2155 subtract out the free list. */
2157 allocated
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2158 in_use
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2159 overhead
+= G
.small_page_overhead
* zone
->n_small_pages
;
2161 for (i
= 0; i
<= NUM_FREE_BINS
; i
++)
2163 struct alloc_chunk
*chunk
= zone
->free_chunks
[i
];
2166 in_use
-= ggc_get_size (chunk
);
2167 chunk
= chunk
->next_free
;
2171 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n",
2173 SCALE (allocated
), LABEL (allocated
),
2174 SCALE (in_use
), LABEL (in_use
),
2175 SCALE (overhead
), LABEL (overhead
));
2177 gcc_assert (in_use
== zone
->allocated
);
2179 total_overhead
+= overhead
;
2180 total_allocated
+= zone
->allocated
;
2181 total_bytes_mapped
+= zone
->bytes_mapped
;
2184 /* Count the size of the page table as best we can. */
2185 #if HOST_BITS_PER_PTR <= 32
2186 pte_overhead
= sizeof (G
.lookup
);
2187 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2189 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2192 page_table table
= G
.lookup
;
2196 pte_overhead
+= sizeof (*table
);
2197 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2198 if (table
->table
[i
])
2199 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2200 table
= table
->next
;
2204 fprintf (stderr
, "%20s %11s %11s %10lu%c\n", "Page Table",
2205 "", "", SCALE (pte_overhead
), LABEL (pte_overhead
));
2206 total_overhead
+= pte_overhead
;
2208 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2209 SCALE (total_bytes_mapped
), LABEL (total_bytes_mapped
),
2210 SCALE (total_allocated
), LABEL(total_allocated
),
2211 SCALE (total_overhead
), LABEL (total_overhead
));
2213 #ifdef GATHER_STATISTICS
2215 unsigned long long all_overhead
= 0, all_allocated
= 0;
2216 unsigned long long all_overhead_under32
= 0, all_allocated_under32
= 0;
2217 unsigned long long all_overhead_under64
= 0, all_allocated_under64
= 0;
2218 unsigned long long all_overhead_under128
= 0, all_allocated_under128
= 0;
2220 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
2222 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2224 all_overhead
+= zone
->stats
.total_overhead
;
2225 all_allocated
+= zone
->stats
.total_allocated
;
2227 all_allocated_under32
+= zone
->stats
.total_allocated_under32
;
2228 all_overhead_under32
+= zone
->stats
.total_overhead_under32
;
2230 all_allocated_under64
+= zone
->stats
.total_allocated_under64
;
2231 all_overhead_under64
+= zone
->stats
.total_overhead_under64
;
2233 all_allocated_under128
+= zone
->stats
.total_allocated_under128
;
2234 all_overhead_under128
+= zone
->stats
.total_overhead_under128
;
2236 fprintf (stderr
, "%20s: %10lld\n",
2237 zone
->name
, zone
->stats
.total_allocated
);
2240 fprintf (stderr
, "\n");
2242 fprintf (stderr
, "Total Overhead: %10lld\n",
2244 fprintf (stderr
, "Total Allocated: %10lld\n",
2247 fprintf (stderr
, "Total Overhead under 32B: %10lld\n",
2248 all_overhead_under32
);
2249 fprintf (stderr
, "Total Allocated under 32B: %10lld\n",
2250 all_allocated_under32
);
2251 fprintf (stderr
, "Total Overhead under 64B: %10lld\n",
2252 all_overhead_under64
);
2253 fprintf (stderr
, "Total Allocated under 64B: %10lld\n",
2254 all_allocated_under64
);
2255 fprintf (stderr
, "Total Overhead under 128B: %10lld\n",
2256 all_overhead_under128
);
2257 fprintf (stderr
, "Total Allocated under 128B: %10lld\n",
2258 all_allocated_under128
);
2263 /* Precompiled header support. */
2265 /* For precompiled headers, we sort objects based on their type. We
2266 also sort various objects into their own buckets; currently this
2267 covers strings and IDENTIFIER_NODE trees. The choices of how
2268 to sort buckets have not yet been tuned. */
2270 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2272 #define OTHER_BUCKET (gt_types_enum_last + 0)
2273 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2274 #define STRING_BUCKET (gt_types_enum_last + 2)
2276 struct ggc_pch_ondisk
2279 size_t type_totals
[NUM_PCH_BUCKETS
];
2284 struct ggc_pch_ondisk d
;
2288 alloc_type
*alloc_bits
;
2289 size_t type_bases
[NUM_PCH_BUCKETS
];
2290 size_t start_offset
;
2293 /* Initialize the PCH data structure. */
2295 struct ggc_pch_data
*
2298 return XCNEW (struct ggc_pch_data
);
2301 /* Return which of the page-aligned buckets the object at X, with type
2302 TYPE, should be sorted into in the PCH. Strings will have
2303 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2304 of unknown type will also have TYPE equal to gt_types_enum_last. */
2307 pch_bucket (void *x
, enum gt_types_enum type
,
2310 /* Sort identifiers into their own bucket, to improve locality
2311 when searching the identifier hash table. */
2312 if (type
== gt_ggc_e_14lang_tree_node
2313 && TREE_CODE ((tree
) x
) == IDENTIFIER_NODE
)
2314 return IDENTIFIER_BUCKET
;
2315 else if (type
== gt_types_enum_last
)
2318 return STRING_BUCKET
;
2319 return OTHER_BUCKET
;
2324 /* Add the size of object X to the size of the PCH data. */
2327 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2328 size_t size
, bool is_string
, enum gt_types_enum type
)
2330 /* NOTE: Right now we don't need to align up the size of any objects.
2331 Strings can be unaligned, and everything else is allocated to a
2332 MAX_ALIGNMENT boundary already. */
2334 d
->d
.type_totals
[pch_bucket (x
, type
, is_string
)] += size
;
2337 /* Return the total size of the PCH data. */
2340 ggc_pch_total_size (struct ggc_pch_data
*d
)
2343 size_t alloc_size
, total_size
;
2346 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2348 d
->d
.type_totals
[i
] = ROUND_UP (d
->d
.type_totals
[i
], GGC_PAGE_SIZE
);
2349 total_size
+= d
->d
.type_totals
[i
];
2351 d
->d
.total
= total_size
;
2353 /* Include the size of the allocation bitmap. */
2354 alloc_size
= CEIL (d
->d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2355 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2356 d
->alloc_size
= alloc_size
;
2358 return d
->d
.total
+ alloc_size
;
2361 /* Set the base address for the objects in the PCH file. */
2364 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base_
)
2367 size_t base
= (size_t) base_
;
2369 d
->base
= d
->orig_base
= base
;
2370 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2372 d
->type_bases
[i
] = base
;
2373 base
+= d
->d
.type_totals
[i
];
2376 if (d
->alloc_bits
== NULL
)
2377 d
->alloc_bits
= XCNEWVAR (alloc_type
, d
->alloc_size
);
2380 /* Allocate a place for object X of size SIZE in the PCH file. */
2383 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
2384 size_t size
, bool is_string
,
2385 enum gt_types_enum type
)
2387 size_t alloc_word
, alloc_bit
;
2389 int bucket
= pch_bucket (x
, type
, is_string
);
2391 /* Record the start of the object in the allocation bitmap. We
2392 can't assert that the allocation bit is previously clear, because
2393 strings may violate the invariant that they are at least
2394 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2395 should not be called for strings. */
2396 alloc_word
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2397 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
2398 alloc_bit
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2399 / BYTES_PER_ALLOC_BIT
) % (8 * sizeof (alloc_type
));
2400 d
->alloc_bits
[alloc_word
] |= 1L << alloc_bit
;
2402 /* Place the object at the current pointer for this bucket. */
2403 result
= (char *) d
->type_bases
[bucket
];
2404 d
->type_bases
[bucket
] += size
;
2408 /* Prepare to write out the PCH data to file F. */
2411 ggc_pch_prepare_write (struct ggc_pch_data
*d
,
2414 /* We seek around a lot while writing. Record where the end
2415 of the padding in the PCH file is, so that we can
2416 locate each object's offset. */
2417 d
->start_offset
= ftell (f
);
2420 /* Write out object X of SIZE to file F. */
2423 ggc_pch_write_object (struct ggc_pch_data
*d
,
2424 FILE *f
, void *x
, void *newx
,
2425 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2427 if (fseek (f
, (size_t) newx
- d
->orig_base
+ d
->start_offset
, SEEK_SET
) != 0)
2428 fatal_error ("can%'t seek PCH file: %m");
2430 if (fwrite (x
, size
, 1, f
) != 1)
2431 fatal_error ("can%'t write PCH file: %m");
2435 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2437 /* Write out the allocation bitmap. */
2438 if (fseek (f
, d
->start_offset
+ d
->d
.total
, SEEK_SET
) != 0)
2439 fatal_error ("can%'t seek PCH file: %m");
2441 if (fwrite (d
->alloc_bits
, d
->alloc_size
, 1, f
) != 1)
2442 fatal_error ("can%'t write PCH file: %m");
2444 /* Done with the PCH, so write out our footer. */
2445 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2446 fatal_error ("can%'t write PCH file: %m");
2448 free (d
->alloc_bits
);
2452 /* The PCH file from F has been mapped at ADDR. Read in any
2453 additional data from the file and set up the GC state. */
2456 ggc_pch_read (FILE *f
, void *addr
)
2458 struct ggc_pch_ondisk d
;
2460 struct alloc_zone
*zone
;
2461 struct page_entry
*pch_page
;
2464 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2465 fatal_error ("can%'t read PCH file: %m");
2467 alloc_size
= CEIL (d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2468 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2470 pch_zone
.bytes
= d
.total
;
2471 pch_zone
.alloc_bits
= (alloc_type
*) ((char *) addr
+ pch_zone
.bytes
);
2472 pch_zone
.page
= (char *) addr
;
2473 pch_zone
.end
= (char *) pch_zone
.alloc_bits
;
2475 /* We've just read in a PCH file. So, every object that used to be
2476 allocated is now free. */
2477 #ifdef GATHER_STATISTICS
2478 zone_allocate_marks ();
2479 ggc_prune_overhead_list ();
2483 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2485 struct small_page_entry
*page
, *next_page
;
2486 struct large_page_entry
*large_page
, *next_large_page
;
2488 zone
->allocated
= 0;
2490 /* Clear the zone's free chunk list. */
2491 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
2492 zone
->high_free_bin
= 0;
2493 zone
->cached_free
= NULL
;
2494 zone
->cached_free_size
= 0;
2496 /* Move all the small pages onto the free list. */
2497 for (page
= zone
->pages
; page
!= NULL
; page
= next_page
)
2499 next_page
= page
->next
;
2500 memset (page
->alloc_bits
, 0,
2501 G
.small_page_overhead
- PAGE_OVERHEAD
);
2502 free_small_page (page
);
2505 /* Discard all the large pages. */
2506 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2507 large_page
= next_large_page
)
2509 next_large_page
= large_page
->next
;
2510 free_large_page (large_page
);
2514 zone
->large_pages
= NULL
;
2517 /* Allocate the dummy page entry for the PCH, and set all pages
2518 mapped into the PCH to reference it. */
2519 pch_page
= XCNEW (struct page_entry
);
2520 pch_page
->page
= pch_zone
.page
;
2521 pch_page
->pch_p
= true;
2523 for (p
= pch_zone
.page
; p
< pch_zone
.end
; p
+= GGC_PAGE_SIZE
)
2524 set_page_table_entry (p
, pch_page
);