1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2010 Free Software Foundation, Inc.
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "diagnostic-core.h"
36 #include "ggc-internal.h"
42 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
43 file open. Prefer either to valloc. */
45 # undef HAVE_MMAP_DEV_ZERO
47 # include <sys/mman.h>
49 # define MAP_FAILED -1
51 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
52 # define MAP_ANONYMOUS MAP_ANON
57 #ifdef HAVE_MMAP_DEV_ZERO
58 # include <sys/mman.h>
60 # define MAP_FAILED -1
66 #error Zone collector requires mmap
69 #if (GCC_VERSION < 3001)
70 #define prefetch(X) ((void) X)
71 #define prefetchw(X) ((void) X)
73 #define prefetch(X) __builtin_prefetch (X)
74 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
79 If we track inter-zone pointers, we can mark single zones at a
82 If we have a zone where we guarantee no inter-zone pointers, we
83 could mark that zone separately.
85 The garbage zone should not be marked, and we should return 1 in
86 ggc_set_mark for any object in the garbage zone, which cuts off
91 This garbage-collecting allocator segregates objects into zones.
92 It also segregates objects into "large" and "small" bins. Large
93 objects are greater than page size.
95 Pages for small objects are broken up into chunks. The page has
96 a bitmap which marks the start position of each chunk (whether
97 allocated or free). Free chunks are on one of the zone's free
98 lists and contain a pointer to the next free chunk. Chunks in
99 most of the free lists have a fixed size determined by the
100 free list. Chunks in the "other" sized free list have their size
101 stored right after their chain pointer.
103 Empty pages (of all sizes) are kept on a single page cache list,
104 and are considered first when new pages are required; they are
105 deallocated at the start of the next collection if they haven't
106 been recycled by then. The free page list is currently per-zone. */
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
120 /* This structure manages small free chunks. The SIZE field is only
121 initialized if the chunk is in the "other" sized free list. Large
122 chunks are allocated one at a time to their own page, and so don't
126 struct alloc_chunk
*next_free
;
130 /* The size of the fixed-size portion of a small page descriptor. */
131 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
133 /* The collector's idea of the page size. This must be a power of two
134 no larger than the system page size, because pages must be aligned
135 to this amount and are tracked at this granularity in the page
136 table. We choose a size at compile time for efficiency.
138 We could make a better guess at compile time if PAGE_SIZE is a
139 constant in system headers, and PAGE_SHIFT is defined... */
140 #define GGC_PAGE_SIZE 4096
141 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
142 #define GGC_PAGE_SHIFT 12
145 /* Alternative definitions which use the runtime page size. */
146 #define GGC_PAGE_SIZE G.pagesize
147 #define GGC_PAGE_MASK G.page_mask
148 #define GGC_PAGE_SHIFT G.lg_pagesize
151 /* The size of a small page managed by the garbage collector. This
152 must currently be GGC_PAGE_SIZE, but with a few changes could
153 be any multiple of it to reduce certain kinds of overhead. */
154 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
156 /* Free bin information. These numbers may be in need of re-tuning.
157 In general, decreasing the number of free bins would seem to
158 increase the time it takes to allocate... */
160 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
163 #define NUM_FREE_BINS 64
164 #define FREE_BIN_DELTA MAX_ALIGNMENT
165 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
167 /* Allocation and marking parameters. */
169 /* The smallest allocatable unit to keep track of. */
170 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
172 /* The smallest markable unit. If we require each allocated object
173 to contain at least two allocatable units, we can use half as many
174 bits for the mark bitmap. But this adds considerable complexity
176 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
178 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
180 /* We use this structure to determine the alignment required for
183 There are several things wrong with this estimation of alignment.
185 The maximum alignment for a structure is often less than the
186 maximum alignment for a basic data type; for instance, on some
187 targets long long must be aligned to sizeof (int) in a structure
188 and sizeof (long long) in a variable. i386-linux is one example;
189 Darwin is another (sometimes, depending on the compiler in use).
191 Also, long double is not included. Nothing in GCC uses long
192 double, so we assume that this is OK. On powerpc-darwin, adding
193 long double would bring the maximum alignment up to 16 bytes,
194 and until we need long double (or to vectorize compiler operations)
195 that's painfully wasteful. This will need to change, some day. */
197 struct max_alignment
{
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest multiple of F that is >= X. */
211 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
213 /* Types to use for the allocation and mark bitmaps. It might be
214 a good idea to add ffsl to libiberty and use unsigned long
215 instead; that could speed us up where long is wider than int. */
217 typedef unsigned int alloc_type
;
218 typedef unsigned int mark_type
;
219 #define alloc_ffs(x) ffs(x)
221 /* A page_entry records the status of an allocation page. This is the
222 common data between all three kinds of pages - small, large, and
224 typedef struct page_entry
226 /* The address at which the memory is allocated. */
229 /* The zone that this page entry belongs to. */
230 struct alloc_zone
*zone
;
232 #ifdef GATHER_STATISTICS
233 /* How many collections we've survived. */
237 /* Does this page contain small objects, or one large object? */
240 /* Is this page part of the loaded PCH? */
244 /* Additional data needed for small pages. */
245 struct small_page_entry
247 struct page_entry common
;
249 /* The next small page entry, or NULL if this is the last. */
250 struct small_page_entry
*next
;
252 /* If currently marking this zone, a pointer to the mark bits
253 for this page. If we aren't currently marking this zone,
254 this pointer may be stale (pointing to freed memory). */
255 mark_type
*mark_bits
;
257 /* The allocation bitmap. This array extends far enough to have
258 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
259 alloc_type alloc_bits
[1];
262 /* Additional data needed for large pages. */
263 struct large_page_entry
265 struct page_entry common
;
267 /* The next large page entry, or NULL if this is the last. */
268 struct large_page_entry
*next
;
270 /* The number of bytes allocated, not including the page entry. */
273 /* The previous page in the list, so that we can unlink this one. */
274 struct large_page_entry
*prev
;
276 /* During marking, is this object marked? */
280 /* A two-level tree is used to look up the page-entry for a given
281 pointer. Two chunks of the pointer's bits are extracted to index
282 the first and second levels of the tree, as follows:
286 msb +----------------+----+------+------+ lsb
292 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
293 pages are aligned on system page boundaries. The next most
294 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
295 index values in the lookup table, respectively.
297 For 32-bit architectures and the settings below, there are no
298 leftover bits. For architectures with wider pointers, the lookup
299 tree points to a list of pages, which must be scanned to find the
302 #define PAGE_L1_BITS (8)
303 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
304 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
305 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
307 #define LOOKUP_L1(p) \
308 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
310 #define LOOKUP_L2(p) \
311 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
313 #if HOST_BITS_PER_PTR <= 32
315 /* On 32-bit hosts, we use a two level page table, as pictured above. */
316 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
320 /* On 64-bit hosts, we use the same two level page tables plus a linked
321 list that disambiguates the top 32-bits. There will almost always be
322 exactly one entry in the list. */
323 typedef struct page_table_chain
325 struct page_table_chain
*next
;
327 page_entry
**table
[PAGE_L1_SIZE
];
332 /* The global variables. */
333 static struct globals
335 /* The linked list of zones. */
336 struct alloc_zone
*zones
;
338 /* Lookup table for associating allocation pages with object addresses. */
341 /* The system's page size, and related constants. */
346 /* The size to allocate for a small page entry. This includes
347 the size of the structure and the size of the allocation
349 size_t small_page_overhead
;
351 #if defined (HAVE_MMAP_DEV_ZERO)
352 /* A file descriptor open to /dev/zero for reading. */
356 /* Allocate pages in chunks of this size, to throttle calls to memory
357 allocation routines. The first page is used, the rest go onto the
361 /* The file descriptor for debugging output. */
365 /* A zone allocation structure. There is one of these for every
366 distinct allocation zone. */
369 /* The most recent free chunk is saved here, instead of in the linked
370 free list, to decrease list manipulation. It is most likely that we
371 will want this one. */
373 size_t cached_free_size
;
375 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
376 FREE_BIN_DELTA. All other chunks are in slot 0. */
377 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
379 /* The highest bin index which might be non-empty. It may turn out
380 to be empty, in which case we have to search downwards. */
381 size_t high_free_bin
;
383 /* Bytes currently allocated in this zone. */
386 /* Linked list of the small pages in this zone. */
387 struct small_page_entry
*pages
;
389 /* Doubly linked list of large pages in this zone. */
390 struct large_page_entry
*large_pages
;
392 /* If we are currently marking this zone, a pointer to the mark bits. */
393 mark_type
*mark_bits
;
395 /* Name of the zone. */
398 /* The number of small pages currently allocated in this zone. */
399 size_t n_small_pages
;
401 /* Bytes allocated at the end of the last collection. */
402 size_t allocated_last_gc
;
404 /* Total amount of memory mapped. */
407 /* A cache of free system pages. */
408 struct small_page_entry
*free_pages
;
410 /* Next zone in the linked list of zones. */
411 struct alloc_zone
*next_zone
;
413 /* True if this zone was collected during this collection. */
416 /* True if this zone should be destroyed after the next collection. */
419 #ifdef GATHER_STATISTICS
422 /* Total GC-allocated memory. */
423 unsigned long long total_allocated
;
424 /* Total overhead for GC-allocated memory. */
425 unsigned long long total_overhead
;
427 /* Total allocations and overhead for sizes less than 32, 64 and 128.
428 These sizes are interesting because they are typical cache line
431 unsigned long long total_allocated_under32
;
432 unsigned long long total_overhead_under32
;
434 unsigned long long total_allocated_under64
;
435 unsigned long long total_overhead_under64
;
437 unsigned long long total_allocated_under128
;
438 unsigned long long total_overhead_under128
;
443 /* Some default zones. */
444 struct alloc_zone rtl_zone
;
445 struct alloc_zone tree_zone
;
446 struct alloc_zone tree_id_zone
;
448 /* The PCH zone does not need a normal zone structure, and it does
449 not live on the linked list of zones. */
452 /* The start of the PCH zone. NULL if there is none. */
455 /* The end of the PCH zone. NULL if there is none. */
458 /* The size of the PCH zone. 0 if there is none. */
461 /* The allocation bitmap for the PCH zone. */
462 alloc_type
*alloc_bits
;
464 /* If we are currently marking, the mark bitmap for the PCH zone.
465 When it is first read in, we could avoid marking the PCH,
466 because it will not contain any pointers to GC memory outside
467 of the PCH; however, the PCH is currently mapped as writable,
468 so we must mark it in case new pointers are added. */
469 mark_type
*mark_bits
;
473 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
475 static struct small_page_entry
* alloc_small_page (struct alloc_zone
*);
476 static struct large_page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
477 static void free_chunk (char *, size_t, struct alloc_zone
*);
478 static void free_small_page (struct small_page_entry
*);
479 static void free_large_page (struct large_page_entry
*);
480 static void release_pages (struct alloc_zone
*);
481 static void sweep_pages (struct alloc_zone
*);
482 static bool ggc_collect_1 (struct alloc_zone
*, bool);
483 static void new_ggc_zone_1 (struct alloc_zone
*, const char *);
485 /* Traverse the page table and find the entry for a page.
486 Die (probably) if the object wasn't allocated via GC. */
488 static inline page_entry
*
489 lookup_page_table_entry (const void *p
)
494 #if HOST_BITS_PER_PTR <= 32
497 page_table table
= G
.lookup
;
498 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
499 while (table
->high_bits
!= high_bits
)
501 base
= &table
->table
[0];
504 /* Extract the level 1 and 2 indices. */
511 /* Traverse the page table and find the entry for a page.
512 Return NULL if the object wasn't allocated via the GC. */
514 static inline page_entry
*
515 lookup_page_table_if_allocated (const void *p
)
520 #if HOST_BITS_PER_PTR <= 32
523 page_table table
= G
.lookup
;
524 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
529 if (table
->high_bits
== high_bits
)
533 base
= &table
->table
[0];
536 /* Extract the level 1 and 2 indices. */
542 if (L2
>= PAGE_L2_SIZE
)
544 /* We might have a page entry which does not correspond exactly to a
546 if (base
[L1
][L2
] && (const char *) p
< base
[L1
][L2
]->page
)
552 /* Set the page table entry for the page that starts at P. If ENTRY
553 is NULL, clear the entry. */
556 set_page_table_entry (void *p
, page_entry
*entry
)
561 #if HOST_BITS_PER_PTR <= 32
565 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
566 for (table
= G
.lookup
; table
; table
= table
->next
)
567 if (table
->high_bits
== high_bits
)
570 /* Not found -- allocate a new table. */
571 table
= XCNEW (struct page_table_chain
);
572 table
->next
= G
.lookup
;
573 table
->high_bits
= high_bits
;
576 base
= &table
->table
[0];
579 /* Extract the level 1 and 2 indices. */
583 if (base
[L1
] == NULL
)
584 base
[L1
] = XCNEWVEC (page_entry
*, PAGE_L2_SIZE
);
586 base
[L1
][L2
] = entry
;
589 /* Find the page table entry associated with OBJECT. */
591 static inline struct page_entry
*
592 zone_get_object_page (const void *object
)
594 return lookup_page_table_entry (object
);
597 /* Find which element of the alloc_bits array OBJECT should be
599 static inline unsigned int
600 zone_get_object_alloc_word (const void *object
)
602 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
603 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
606 /* Find which bit of the appropriate word in the alloc_bits array
607 OBJECT should be recorded in. */
608 static inline unsigned int
609 zone_get_object_alloc_bit (const void *object
)
611 return (((size_t) object
/ BYTES_PER_ALLOC_BIT
)
612 % (8 * sizeof (alloc_type
)));
615 /* Find which element of the mark_bits array OBJECT should be recorded
617 static inline unsigned int
618 zone_get_object_mark_word (const void *object
)
620 return (((size_t) object
& (GGC_PAGE_SIZE
- 1))
621 / (8 * sizeof (mark_type
) * BYTES_PER_MARK_BIT
));
624 /* Find which bit of the appropriate word in the mark_bits array
625 OBJECT should be recorded in. */
626 static inline unsigned int
627 zone_get_object_mark_bit (const void *object
)
629 return (((size_t) object
/ BYTES_PER_MARK_BIT
)
630 % (8 * sizeof (mark_type
)));
633 /* Set the allocation bit corresponding to OBJECT in its page's
634 bitmap. Used to split this object from the preceding one. */
636 zone_set_object_alloc_bit (const void *object
)
638 struct small_page_entry
*page
639 = (struct small_page_entry
*) zone_get_object_page (object
);
640 unsigned int start_word
= zone_get_object_alloc_word (object
);
641 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
643 page
->alloc_bits
[start_word
] |= 1L << start_bit
;
646 /* Clear the allocation bit corresponding to OBJECT in PAGE's
647 bitmap. Used to coalesce this object with the preceding
650 zone_clear_object_alloc_bit (struct small_page_entry
*page
,
653 unsigned int start_word
= zone_get_object_alloc_word (object
);
654 unsigned int start_bit
= zone_get_object_alloc_bit (object
);
656 /* Would xor be quicker? */
657 page
->alloc_bits
[start_word
] &= ~(1L << start_bit
);
660 /* Find the size of the object which starts at START_WORD and
661 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
662 Helper function for ggc_get_size and zone_find_object_size. */
665 zone_object_size_1 (alloc_type
*alloc_bits
,
666 size_t start_word
, size_t start_bit
,
670 alloc_type alloc_word
;
673 /* Load the first word. */
674 alloc_word
= alloc_bits
[start_word
++];
676 /* If that was the last bit in this word, we'll want to continue
677 with the next word. Otherwise, handle the rest of this word. */
680 indx
= alloc_ffs (alloc_word
>> start_bit
);
682 /* indx is 1-based. We started at the bit after the object's
683 start, but we also ended at the bit after the object's end.
685 return indx
* BYTES_PER_ALLOC_BIT
;
687 /* The extra 1 accounts for the starting unit, before start_bit. */
688 size
= (sizeof (alloc_type
) * 8 - start_bit
+ 1) * BYTES_PER_ALLOC_BIT
;
690 if (size
>= max_size
)
693 alloc_word
= alloc_bits
[start_word
++];
696 size
= BYTES_PER_ALLOC_BIT
;
698 while (alloc_word
== 0)
700 size
+= sizeof (alloc_type
) * 8 * BYTES_PER_ALLOC_BIT
;
701 if (size
>= max_size
)
703 alloc_word
= alloc_bits
[start_word
++];
706 indx
= alloc_ffs (alloc_word
);
707 return size
+ (indx
- 1) * BYTES_PER_ALLOC_BIT
;
710 /* Find the size of OBJECT on small page PAGE. */
713 zone_find_object_size (struct small_page_entry
*page
,
716 const char *object_midptr
= (const char *) object
+ BYTES_PER_ALLOC_BIT
;
717 unsigned int start_word
= zone_get_object_alloc_word (object_midptr
);
718 unsigned int start_bit
= zone_get_object_alloc_bit (object_midptr
);
719 size_t max_size
= (page
->common
.page
+ SMALL_PAGE_SIZE
720 - (const char *) object
);
722 return zone_object_size_1 (page
->alloc_bits
, start_word
, start_bit
,
726 /* highest_bit assumes that alloc_type is 32 bits. */
727 extern char check_alloc_type_size
[(sizeof (alloc_type
) == 4) ? 1 : -1];
729 /* Find the highest set bit in VALUE. Returns the bit number of that
730 bit, using the same values as ffs. */
731 static inline alloc_type
732 highest_bit (alloc_type value
)
734 /* This also assumes that alloc_type is unsigned. */
739 value
|= value
>> 16;
740 value
= value
^ (value
>> 1);
741 return alloc_ffs (value
);
744 /* Find the offset from the start of an object to P, which may point
745 into the interior of the object. */
748 zone_find_object_offset (alloc_type
*alloc_bits
, size_t start_word
,
751 unsigned int offset_in_bits
;
752 alloc_type alloc_word
= alloc_bits
[start_word
];
754 /* Mask off any bits after the initial bit, but make sure to include
755 the initial bit in the result. Note that START_BIT is
757 if (start_bit
< 8 * sizeof (alloc_type
) - 1)
758 alloc_word
&= (1 << (start_bit
+ 1)) - 1;
759 offset_in_bits
= start_bit
;
761 /* Search for the start of the object. */
762 while (alloc_word
== 0 && start_word
> 0)
764 alloc_word
= alloc_bits
[--start_word
];
765 offset_in_bits
+= 8 * sizeof (alloc_type
);
767 /* We must always find a set bit. */
768 gcc_assert (alloc_word
!= 0);
769 /* Note that the result of highest_bit is 1-based. */
770 offset_in_bits
-= highest_bit (alloc_word
) - 1;
772 return BYTES_PER_ALLOC_BIT
* offset_in_bits
;
775 /* Allocate the mark bits for every zone, and set the pointers on each
778 zone_allocate_marks (void)
780 struct alloc_zone
*zone
;
782 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
784 struct small_page_entry
*page
;
785 mark_type
*cur_marks
;
786 size_t mark_words
, mark_words_per_page
;
787 #ifdef ENABLE_CHECKING
792 = (GGC_PAGE_SIZE
+ BYTES_PER_MARK_WORD
- 1) / BYTES_PER_MARK_WORD
;
793 mark_words
= zone
->n_small_pages
* mark_words_per_page
;
794 zone
->mark_bits
= (mark_type
*) xcalloc (sizeof (mark_type
),
796 cur_marks
= zone
->mark_bits
;
797 for (page
= zone
->pages
; page
; page
= page
->next
)
799 page
->mark_bits
= cur_marks
;
800 cur_marks
+= mark_words_per_page
;
801 #ifdef ENABLE_CHECKING
805 gcc_checking_assert (n
== zone
->n_small_pages
);
808 /* We don't collect the PCH zone, but we do have to mark it
812 = (mark_type
*) xcalloc (sizeof (mark_type
),
813 CEIL (pch_zone
.bytes
, BYTES_PER_MARK_WORD
));
816 /* After marking and sweeping, release the memory used for mark bits. */
818 zone_free_marks (void)
820 struct alloc_zone
*zone
;
822 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
825 free (zone
->mark_bits
);
826 zone
->mark_bits
= NULL
;
831 free (pch_zone
.mark_bits
);
832 pch_zone
.mark_bits
= NULL
;
837 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
838 (if non-null). The ifdef structure here is intended to cause a
839 compile error unless exactly one of the HAVE_* is defined. */
842 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
844 #ifdef HAVE_MMAP_ANON
845 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
846 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
848 #ifdef HAVE_MMAP_DEV_ZERO
849 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
850 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
853 if (page
== (char *) MAP_FAILED
)
855 perror ("virtual memory exhausted");
856 exit (FATAL_EXIT_CODE
);
859 /* Remember that we allocated this memory. */
860 zone
->bytes_mapped
+= size
;
862 /* Pretend we don't have access to the allocated pages. We'll enable
863 access to smaller pieces of the area in ggc_internal_alloc. Discard the
864 handle to avoid handle leak. */
865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page
, size
));
871 /* Allocate a new page for allocating small objects in ZONE, and
872 return an entry for it. */
874 static struct small_page_entry
*
875 alloc_small_page (struct alloc_zone
*zone
)
877 struct small_page_entry
*entry
;
879 /* Check the list of free pages for one we can use. */
880 entry
= zone
->free_pages
;
883 /* Recycle the allocated memory from this page ... */
884 zone
->free_pages
= entry
->next
;
888 /* We want just one page. Allocate a bunch of them and put the
889 extras on the freelist. (Can only do this optimization with
890 mmap for backing store.) */
891 struct small_page_entry
*e
, *f
= zone
->free_pages
;
895 page
= alloc_anon (NULL
, GGC_PAGE_SIZE
* G
.quire_size
, zone
);
897 /* This loop counts down so that the chain will be in ascending
899 for (i
= G
.quire_size
- 1; i
>= 1; i
--)
901 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
902 e
->common
.page
= page
+ (i
<< GGC_PAGE_SHIFT
);
903 e
->common
.zone
= zone
;
906 set_page_table_entry (e
->common
.page
, &e
->common
);
909 zone
->free_pages
= f
;
911 entry
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
912 entry
->common
.page
= page
;
913 entry
->common
.zone
= zone
;
914 set_page_table_entry (page
, &entry
->common
);
917 zone
->n_small_pages
++;
919 if (GGC_DEBUG_LEVEL
>= 2)
920 fprintf (G
.debug_file
,
921 "Allocating %s page at %p, data %p-%p\n",
922 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
923 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
928 /* Allocate a large page of size SIZE in ZONE. */
930 static struct large_page_entry
*
931 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
933 struct large_page_entry
*entry
;
937 needed_size
= size
+ sizeof (struct large_page_entry
);
938 page
= XNEWVAR (char, needed_size
);
940 entry
= (struct large_page_entry
*) page
;
943 entry
->common
.page
= page
+ sizeof (struct large_page_entry
);
944 entry
->common
.large_p
= true;
945 entry
->common
.pch_p
= false;
946 entry
->common
.zone
= zone
;
947 #ifdef GATHER_STATISTICS
948 entry
->common
.survived
= 0;
950 entry
->mark_p
= false;
954 set_page_table_entry (entry
->common
.page
, &entry
->common
);
956 if (GGC_DEBUG_LEVEL
>= 2)
957 fprintf (G
.debug_file
,
958 "Allocating %s large page at %p, data %p-%p\n",
959 entry
->common
.zone
->name
, (PTR
) entry
, entry
->common
.page
,
960 entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
966 /* For a page that is no longer needed, put it on the free page list. */
969 free_small_page (struct small_page_entry
*entry
)
971 if (GGC_DEBUG_LEVEL
>= 2)
972 fprintf (G
.debug_file
,
973 "Deallocating %s page at %p, data %p-%p\n",
974 entry
->common
.zone
->name
, (PTR
) entry
,
975 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
977 gcc_assert (!entry
->common
.large_p
);
979 /* Mark the page as inaccessible. Discard the handle to
980 avoid handle leak. */
981 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry
->common
.page
,
984 entry
->next
= entry
->common
.zone
->free_pages
;
985 entry
->common
.zone
->free_pages
= entry
;
986 entry
->common
.zone
->n_small_pages
--;
989 /* Release a large page that is no longer needed. */
992 free_large_page (struct large_page_entry
*entry
)
994 if (GGC_DEBUG_LEVEL
>= 2)
995 fprintf (G
.debug_file
,
996 "Deallocating %s page at %p, data %p-%p\n",
997 entry
->common
.zone
->name
, (PTR
) entry
,
998 entry
->common
.page
, entry
->common
.page
+ SMALL_PAGE_SIZE
- 1);
1000 gcc_assert (entry
->common
.large_p
);
1002 set_page_table_entry (entry
->common
.page
, NULL
);
1006 /* Release the free page cache to the system. */
1009 release_pages (struct alloc_zone
*zone
)
1012 struct small_page_entry
*p
, *next
;
1016 /* Gather up adjacent pages so they are unmapped together. */
1017 p
= zone
->free_pages
;
1021 start
= p
->common
.page
;
1023 len
= SMALL_PAGE_SIZE
;
1024 set_page_table_entry (p
->common
.page
, NULL
);
1027 while (p
&& p
->common
.page
== start
+ len
)
1030 len
+= SMALL_PAGE_SIZE
;
1031 set_page_table_entry (p
->common
.page
, NULL
);
1035 munmap (start
, len
);
1036 zone
->bytes_mapped
-= len
;
1039 zone
->free_pages
= NULL
;
1043 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1046 free_chunk (char *ptr
, size_t size
, struct alloc_zone
*zone
)
1048 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ptr
;
1051 bin
= SIZE_BIN_DOWN (size
);
1052 gcc_assert (bin
!= 0);
1053 if (bin
> NUM_FREE_BINS
)
1056 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1060 chunk
->next_free
= zone
->free_chunks
[bin
];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1070 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk
,
1073 chunk
->next_free
= zone
->free_chunks
[bin
];
1074 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1082 zone
->free_chunks
[bin
] = chunk
;
1083 if (bin
> zone
->high_free_bin
)
1084 zone
->high_free_bin
= bin
;
1085 if (GGC_DEBUG_LEVEL
>= 3)
1086 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
1089 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1092 ggc_internal_alloc_zone_stat (size_t orig_size
, struct alloc_zone
*zone
1097 struct small_page_entry
*entry
;
1098 struct alloc_chunk
*chunk
, **pp
;
1100 size_t size
= orig_size
;
1102 /* Make sure that zero-sized allocations get a unique and freeable
1105 size
= MAX_ALIGNMENT
;
1107 size
= (size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
1109 /* Try to allocate the object from several different sources. Each
1110 of these cases is responsible for setting RESULT and SIZE to
1111 describe the allocated block, before jumping to FOUND. If a
1112 chunk is split, the allocate bit for the new chunk should also be
1115 Large objects are handled specially. However, they'll just fail
1116 the next couple of conditions, so we can wait to check for them
1117 below. The large object case is relatively rare (< 1%), so this
1120 /* First try to split the last chunk we allocated. For best
1121 fragmentation behavior it would be better to look for a
1122 free bin of the appropriate size for a small object. However,
1123 we're unlikely (1% - 7%) to find one, and this gives better
1124 locality behavior anyway. This case handles the lion's share
1125 of all calls to this function. */
1126 if (size
<= zone
->cached_free_size
)
1128 result
= zone
->cached_free
;
1130 zone
->cached_free_size
-= size
;
1131 if (zone
->cached_free_size
)
1133 zone
->cached_free
+= size
;
1134 zone_set_object_alloc_bit (zone
->cached_free
);
1140 /* Next, try to find a free bin of the exactly correct size. */
1142 /* We want to round SIZE up, rather than down, but we know it's
1143 already aligned to at least FREE_BIN_DELTA, so we can just
1145 bin
= SIZE_BIN_DOWN (size
);
1147 if (bin
<= NUM_FREE_BINS
1148 && (chunk
= zone
->free_chunks
[bin
]) != NULL
)
1150 /* We have a chunk of the right size. Pull it off the free list
1153 zone
->free_chunks
[bin
] = chunk
->next_free
;
1155 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1156 == FREE_BIN_DELTA. */
1159 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1160 may now be wrong, if this was the last chunk in the high bin.
1161 Rather than fixing it up now, wait until we need to search
1167 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1168 to split. We can find one in the too-big bin, or in the largest
1169 sized bin with a chunk in it. Try the largest normal-sized bin
1172 if (zone
->high_free_bin
> bin
)
1174 /* Find the highest numbered free bin. It will be at or below
1176 while (zone
->high_free_bin
> bin
1177 && zone
->free_chunks
[zone
->high_free_bin
] == NULL
)
1178 zone
->high_free_bin
--;
1180 if (zone
->high_free_bin
> bin
)
1182 size_t tbin
= zone
->high_free_bin
;
1183 chunk
= zone
->free_chunks
[tbin
];
1185 /* Remove the chunk from its previous bin. */
1186 zone
->free_chunks
[tbin
] = chunk
->next_free
;
1188 result
= (char *) chunk
;
1190 /* Save the rest of the chunk for future allocation. */
1191 if (zone
->cached_free_size
)
1192 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1194 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1195 zone
->cached_free
= (char *) chunk
;
1196 zone
->cached_free_size
= (tbin
- bin
) * FREE_BIN_DELTA
;
1198 /* Mark the new free chunk as an object, so that we can
1199 find the size of the newly allocated object. */
1200 zone_set_object_alloc_bit (chunk
);
1202 /* HIGH_FREE_BIN may now be wrong, if this was the last
1203 chunk in the high bin. Rather than fixing it up now,
1204 wait until we need to search the free bins. */
1210 /* Failing that, look through the "other" bucket for a chunk
1211 that is large enough. */
1212 pp
= &(zone
->free_chunks
[0]);
1214 while (chunk
&& chunk
->size
< size
)
1216 pp
= &chunk
->next_free
;
1222 /* Remove the chunk from its previous bin. */
1223 *pp
= chunk
->next_free
;
1225 result
= (char *) chunk
;
1227 /* Save the rest of the chunk for future allocation, if there's any
1229 csize
= chunk
->size
;
1232 if (zone
->cached_free_size
)
1233 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1235 chunk
= (struct alloc_chunk
*) ((char *) result
+ size
);
1236 zone
->cached_free
= (char *) chunk
;
1237 zone
->cached_free_size
= csize
- size
;
1239 /* Mark the new free chunk as an object. */
1240 zone_set_object_alloc_bit (chunk
);
1246 /* Handle large allocations. We could choose any threshold between
1247 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1248 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1249 be guaranteed to have a unique entry in the lookup table. Large
1250 allocations will always fall through to here. */
1251 if (size
> GGC_PAGE_SIZE
)
1253 struct large_page_entry
*entry
= alloc_large_page (size
, zone
);
1255 #ifdef GATHER_STATISTICS
1256 entry
->common
.survived
= 0;
1259 entry
->next
= zone
->large_pages
;
1260 if (zone
->large_pages
)
1261 zone
->large_pages
->prev
= entry
;
1262 zone
->large_pages
= entry
;
1264 result
= entry
->common
.page
;
1269 /* Failing everything above, allocate a new small page. */
1271 entry
= alloc_small_page (zone
);
1272 entry
->next
= zone
->pages
;
1273 zone
->pages
= entry
;
1275 /* Mark the first chunk in the new page. */
1276 entry
->alloc_bits
[0] = 1;
1278 result
= entry
->common
.page
;
1279 if (size
< SMALL_PAGE_SIZE
)
1281 if (zone
->cached_free_size
)
1282 free_chunk (zone
->cached_free
, zone
->cached_free_size
, zone
);
1284 zone
->cached_free
= (char *) result
+ size
;
1285 zone
->cached_free_size
= SMALL_PAGE_SIZE
- size
;
1287 /* Mark the new free chunk as an object. */
1288 zone_set_object_alloc_bit (zone
->cached_free
);
1293 /* We could save TYPE in the chunk, but we don't use that for
1294 anything yet. If we wanted to, we could do it by adding it
1295 either before the beginning of the chunk or after its end,
1296 and adjusting the size and pointer appropriately. */
1298 /* We'll probably write to this after we return. */
1301 #ifdef ENABLE_GC_CHECKING
1302 /* `Poison' the entire allocated object. */
1303 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, size
));
1304 memset (result
, 0xaf, size
);
1305 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result
+ orig_size
,
1309 /* Tell Valgrind that the memory is there, but its content isn't
1310 defined. The bytes at the end of the object are still marked
1312 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result
, orig_size
));
1314 /* Keep track of how many bytes are being allocated. This
1315 information is used in deciding when to collect. */
1316 zone
->allocated
+= size
;
1318 timevar_ggc_mem_total
+= size
;
1320 #ifdef GATHER_STATISTICS
1321 ggc_record_overhead (orig_size
, size
- orig_size
, result PASS_MEM_STAT
);
1324 size_t object_size
= size
;
1325 size_t overhead
= object_size
- orig_size
;
1327 zone
->stats
.total_overhead
+= overhead
;
1328 zone
->stats
.total_allocated
+= object_size
;
1330 if (orig_size
<= 32)
1332 zone
->stats
.total_overhead_under32
+= overhead
;
1333 zone
->stats
.total_allocated_under32
+= object_size
;
1335 if (orig_size
<= 64)
1337 zone
->stats
.total_overhead_under64
+= overhead
;
1338 zone
->stats
.total_allocated_under64
+= object_size
;
1340 if (orig_size
<= 128)
1342 zone
->stats
.total_overhead_under128
+= overhead
;
1343 zone
->stats
.total_allocated_under128
+= object_size
;
1348 if (GGC_DEBUG_LEVEL
>= 3)
1349 fprintf (G
.debug_file
, "Allocating object, size=%lu at %p\n",
1350 (unsigned long) size
, result
);
1355 #define ggc_internal_alloc_zone_pass_stat(s,z) \
1356 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT)
1359 ggc_internal_cleared_alloc_zone_stat (size_t orig_size
,
1360 struct alloc_zone
*zone MEM_STAT_DECL
)
1362 void * result
= ggc_internal_alloc_zone_pass_stat (orig_size
, zone
);
1363 memset (result
, 0, orig_size
);
1368 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1372 ggc_alloc_typed_stat (enum gt_types_enum gte
, size_t size
1377 case gt_ggc_e_14lang_tree_node
:
1378 return ggc_internal_alloc_zone_pass_stat (size
, &tree_zone
);
1380 case gt_ggc_e_7rtx_def
:
1381 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1383 case gt_ggc_e_9rtvec_def
:
1384 return ggc_internal_alloc_zone_pass_stat (size
, &rtl_zone
);
1387 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1391 /* Normal GC allocation simply allocates into the main zone. */
1394 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL
)
1396 return ggc_internal_alloc_zone_pass_stat (size
, &main_zone
);
1399 /* Poison the chunk. */
1400 #ifdef ENABLE_GC_CHECKING
1401 #define poison_region(PTR, SIZE) \
1403 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \
1404 memset ((PTR), 0xa5, (SIZE)); \
1405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \
1408 #define poison_region(PTR, SIZE)
1411 /* Free the object at P. */
1416 struct page_entry
*page
;
1418 #ifdef GATHER_STATISTICS
1419 ggc_free_overhead (p
);
1422 poison_region (p
, ggc_get_size (p
));
1424 page
= zone_get_object_page (p
);
1428 struct large_page_entry
*large_page
1429 = (struct large_page_entry
*) page
;
1431 /* Remove the page from the linked list. */
1432 if (large_page
->prev
)
1433 large_page
->prev
->next
= large_page
->next
;
1436 gcc_assert (large_page
->common
.zone
->large_pages
== large_page
);
1437 large_page
->common
.zone
->large_pages
= large_page
->next
;
1439 if (large_page
->next
)
1440 large_page
->next
->prev
= large_page
->prev
;
1442 large_page
->common
.zone
->allocated
-= large_page
->bytes
;
1444 /* Release the memory associated with this object. */
1445 free_large_page (large_page
);
1447 else if (page
->pch_p
)
1448 /* Don't do anything. We won't allocate a new object from the
1449 PCH zone so there's no point in releasing anything. */
1453 size_t size
= ggc_get_size (p
);
1455 page
->zone
->allocated
-= size
;
1457 /* Add the chunk to the free list. We don't bother with coalescing,
1458 since we are likely to want a chunk of this size again. */
1459 free_chunk ((char *)p
, size
, page
->zone
);
1463 /* Mark function for strings. */
1466 gt_ggc_m_S (const void *p
)
1469 unsigned long offset
;
1474 /* Look up the page on which the object is alloced. . */
1475 entry
= lookup_page_table_if_allocated (p
);
1481 size_t alloc_word
, alloc_bit
, t
;
1482 t
= ((const char *) p
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
;
1483 alloc_word
= t
/ (8 * sizeof (alloc_type
));
1484 alloc_bit
= t
% (8 * sizeof (alloc_type
));
1485 offset
= zone_find_object_offset (pch_zone
.alloc_bits
, alloc_word
,
1488 else if (entry
->large_p
)
1490 struct large_page_entry
*le
= (struct large_page_entry
*) entry
;
1491 offset
= ((const char *) p
) - entry
->page
;
1492 gcc_assert (offset
< le
->bytes
);
1496 struct small_page_entry
*se
= (struct small_page_entry
*) entry
;
1497 unsigned int start_word
= zone_get_object_alloc_word (p
);
1498 unsigned int start_bit
= zone_get_object_alloc_bit (p
);
1499 offset
= zone_find_object_offset (se
->alloc_bits
, start_word
, start_bit
);
1501 /* On some platforms a char* will not necessarily line up on an
1502 allocation boundary, so we have to update the offset to
1503 account for the leftover bytes. */
1504 offset
+= (size_t) p
% BYTES_PER_ALLOC_BIT
;
1509 /* Here we've seen a char* which does not point to the beginning
1510 of an allocated object. We assume it points to the middle of
1512 gcc_assert (offset
== offsetof (struct tree_string
, str
));
1513 p
= ((const char *) p
) - offset
;
1514 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p
));
1518 /* Inefficient, but also unlikely to matter. */
1522 /* If P is not marked, mark it and return false. Otherwise return true.
1523 P must have been allocated by the GC allocator; it mustn't point to
1524 static objects, stack variables, or memory allocated with malloc. */
1527 ggc_set_mark (const void *p
)
1529 struct page_entry
*page
;
1530 const char *ptr
= (const char *) p
;
1532 page
= zone_get_object_page (p
);
1536 size_t mark_word
, mark_bit
, offset
;
1537 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1538 mark_word
= offset
/ (8 * sizeof (mark_type
));
1539 mark_bit
= offset
% (8 * sizeof (mark_type
));
1541 if (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
))
1543 pch_zone
.mark_bits
[mark_word
] |= (1 << mark_bit
);
1545 else if (page
->large_p
)
1547 struct large_page_entry
*large_page
1548 = (struct large_page_entry
*) page
;
1550 if (large_page
->mark_p
)
1552 large_page
->mark_p
= true;
1556 struct small_page_entry
*small_page
1557 = (struct small_page_entry
*) page
;
1559 if (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1560 & (1 << zone_get_object_mark_bit (p
)))
1562 small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1563 |= (1 << zone_get_object_mark_bit (p
));
1566 if (GGC_DEBUG_LEVEL
>= 4)
1567 fprintf (G
.debug_file
, "Marking %p\n", p
);
1572 /* Return 1 if P has been marked, zero otherwise.
1573 P must have been allocated by the GC allocator; it mustn't point to
1574 static objects, stack variables, or memory allocated with malloc. */
1577 ggc_marked_p (const void *p
)
1579 struct page_entry
*page
;
1580 const char *ptr
= (const char *) p
;
1582 page
= zone_get_object_page (p
);
1586 size_t mark_word
, mark_bit
, offset
;
1587 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_MARK_BIT
;
1588 mark_word
= offset
/ (8 * sizeof (mark_type
));
1589 mark_bit
= offset
% (8 * sizeof (mark_type
));
1591 return (pch_zone
.mark_bits
[mark_word
] & (1 << mark_bit
)) != 0;
1596 struct large_page_entry
*large_page
1597 = (struct large_page_entry
*) page
;
1599 return large_page
->mark_p
;
1603 struct small_page_entry
*small_page
1604 = (struct small_page_entry
*) page
;
1606 return 0 != (small_page
->mark_bits
[zone_get_object_mark_word (p
)]
1607 & (1 << zone_get_object_mark_bit (p
)));
1611 /* Return the size of the gc-able object P. */
1614 ggc_get_size (const void *p
)
1616 struct page_entry
*page
;
1617 const char *ptr
= (const char *) p
;
1619 page
= zone_get_object_page (p
);
1623 size_t alloc_word
, alloc_bit
, offset
, max_size
;
1624 offset
= (ptr
- pch_zone
.page
) / BYTES_PER_ALLOC_BIT
+ 1;
1625 alloc_word
= offset
/ (8 * sizeof (alloc_type
));
1626 alloc_bit
= offset
% (8 * sizeof (alloc_type
));
1627 max_size
= pch_zone
.bytes
- (ptr
- pch_zone
.page
);
1628 return zone_object_size_1 (pch_zone
.alloc_bits
, alloc_word
, alloc_bit
,
1633 return ((struct large_page_entry
*)page
)->bytes
;
1635 return zone_find_object_size ((struct small_page_entry
*) page
, p
);
1638 /* Initialize the ggc-zone-mmap allocator. */
1642 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1643 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1644 the current assumptions to hold. */
1646 gcc_assert (FREE_BIN_DELTA
== MAX_ALIGNMENT
);
1648 /* Set up the main zone by hand. */
1649 main_zone
.name
= "Main zone";
1650 G
.zones
= &main_zone
;
1652 /* Allocate the default zones. */
1653 new_ggc_zone_1 (&rtl_zone
, "RTL zone");
1654 new_ggc_zone_1 (&tree_zone
, "Tree zone");
1655 new_ggc_zone_1 (&tree_id_zone
, "Tree identifier zone");
1657 G
.pagesize
= getpagesize();
1658 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1659 G
.page_mask
= ~(G
.pagesize
- 1);
1661 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1662 gcc_assert ((G
.pagesize
& (GGC_PAGE_SIZE
- 1)) == 0);
1664 /* Allocate 16 system pages at a time. */
1665 G
.quire_size
= 16 * G
.pagesize
/ GGC_PAGE_SIZE
;
1667 /* Calculate the size of the allocation bitmap and other overhead. */
1668 /* Right now we allocate bits for the page header and bitmap. These
1669 are wasted, but a little tricky to eliminate. */
1670 G
.small_page_overhead
1671 = PAGE_OVERHEAD
+ (GGC_PAGE_SIZE
/ BYTES_PER_ALLOC_BIT
/ 8);
1672 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1674 #ifdef HAVE_MMAP_DEV_ZERO
1675 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1676 gcc_assert (G
.dev_zero_fd
!= -1);
1680 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1681 setlinebuf (G
.debug_file
);
1683 G
.debug_file
= stdout
;
1687 /* StunOS has an amazing off-by-one error for the first mmap allocation
1688 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1689 believe, is an unaligned page allocation, which would cause us to
1690 hork badly if we tried to use it. */
1692 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1693 struct small_page_entry
*e
;
1694 if ((size_t)p
& (G
.pagesize
- 1))
1696 /* How losing. Discard this one and try another. If we still
1697 can't get something useful, give up. */
1699 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1700 gcc_assert (!((size_t)p
& (G
.pagesize
- 1)));
1703 if (GGC_PAGE_SIZE
== G
.pagesize
)
1705 /* We have a good page, might as well hold onto it... */
1706 e
= XCNEWVAR (struct small_page_entry
, G
.small_page_overhead
);
1708 e
->common
.zone
= &main_zone
;
1709 e
->next
= main_zone
.free_pages
;
1710 set_page_table_entry (e
->common
.page
, &e
->common
);
1711 main_zone
.free_pages
= e
;
1715 munmap (p
, G
.pagesize
);
1721 /* Start a new GGC zone. */
1724 new_ggc_zone_1 (struct alloc_zone
*new_zone
, const char * name
)
1726 new_zone
->name
= name
;
1727 new_zone
->next_zone
= G
.zones
->next_zone
;
1728 G
.zones
->next_zone
= new_zone
;
1731 /* Free all empty pages and objects within a page for a given zone */
1734 sweep_pages (struct alloc_zone
*zone
)
1736 struct large_page_entry
**lpp
, *lp
, *lnext
;
1737 struct small_page_entry
**spp
, *sp
, *snext
;
1739 size_t allocated
= 0;
1742 /* First, reset the free_chunks lists, since we are going to
1743 re-free free chunks in hopes of coalescing them into large chunks. */
1744 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
1745 zone
->high_free_bin
= 0;
1746 zone
->cached_free
= NULL
;
1747 zone
->cached_free_size
= 0;
1749 /* Large pages are all or none affairs. Either they are completely
1750 empty, or they are completely full. */
1751 lpp
= &zone
->large_pages
;
1752 for (lp
= zone
->large_pages
; lp
!= NULL
; lp
= lnext
)
1754 gcc_assert (lp
->common
.large_p
);
1758 #ifdef GATHER_STATISTICS
1759 /* This page has now survived another collection. */
1760 lp
->common
.survived
++;
1766 allocated
+= lp
->bytes
;
1772 #ifdef ENABLE_GC_CHECKING
1773 /* Poison the page. */
1774 memset (lp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1777 lp
->prev
->next
= lp
->next
;
1779 lp
->next
->prev
= lp
->prev
;
1780 free_large_page (lp
);
1785 for (sp
= zone
->pages
; sp
!= NULL
; sp
= snext
)
1787 char *object
, *last_object
;
1789 alloc_type
*alloc_word_p
;
1790 mark_type
*mark_word_p
;
1792 gcc_assert (!sp
->common
.large_p
);
1796 #ifdef GATHER_STATISTICS
1797 /* This page has now survived another collection. */
1798 sp
->common
.survived
++;
1801 /* Step through all chunks, consolidate those that are free and
1802 insert them into the free lists. Note that consolidation
1803 slows down collection slightly. */
1805 last_object
= object
= sp
->common
.page
;
1806 end
= sp
->common
.page
+ SMALL_PAGE_SIZE
;
1808 nomarksinpage
= true;
1809 mark_word_p
= sp
->mark_bits
;
1810 alloc_word_p
= sp
->alloc_bits
;
1812 gcc_assert (BYTES_PER_ALLOC_BIT
== BYTES_PER_MARK_BIT
);
1814 object
= sp
->common
.page
;
1818 alloc_type alloc_word
;
1819 mark_type mark_word
;
1821 alloc_word
= *alloc_word_p
++;
1822 mark_word
= *mark_word_p
++;
1825 nomarksinpage
= false;
1827 /* There ought to be some way to do this without looping... */
1829 while ((n
= alloc_ffs (alloc_word
)) != 0)
1831 /* Extend the current state for n - 1 bits. We can't
1832 shift alloc_word by n, even though it isn't used in the
1833 loop, in case only the highest bit was set. */
1834 alloc_word
>>= n
- 1;
1835 mark_word
>>= n
- 1;
1836 object
+= BYTES_PER_MARK_BIT
* (n
- 1);
1842 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1845 poison_region (last_free
, object
- last_free
);
1846 free_chunk (last_free
, object
- last_free
, zone
);
1850 allocated
+= object
- last_object
;
1851 last_object
= object
;
1855 if (last_free
== NULL
)
1858 allocated
+= object
- last_object
;
1861 zone_clear_object_alloc_bit (sp
, object
);
1864 /* Shift to just after the alloc bit we handled. */
1867 object
+= BYTES_PER_MARK_BIT
;
1872 object
+= BYTES_PER_MARK_BIT
* (8 * sizeof (alloc_type
) - i
);
1874 while (object
< end
);
1879 #ifdef ENABLE_GC_CHECKING
1880 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp
->common
.page
,
1882 /* Poison the page. */
1883 memset (sp
->common
.page
, 0xb5, SMALL_PAGE_SIZE
);
1885 free_small_page (sp
);
1890 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free
,
1891 object
- last_free
));
1892 poison_region (last_free
, object
- last_free
);
1893 free_chunk (last_free
, object
- last_free
, zone
);
1896 allocated
+= object
- last_object
;
1901 zone
->allocated
= allocated
;
1904 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1905 is true if we need to mark before sweeping, false if some other
1906 zone collection has already performed marking for us. Returns true
1907 if we collected, false otherwise. */
1910 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1916 for (i
= 0; i
< NUM_FREE_BINS
+ 1; i
++)
1918 struct alloc_chunk
*chunk
;
1923 chunk
= zone
->free_chunks
[i
];
1928 chunk
= chunk
->next_free
;
1930 fprintf (stderr
, "Bin %d: %d free chunks (%d bytes)\n",
1938 fprintf (stderr
, " {%s GC %luk -> ",
1939 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1941 /* Zero the total allocated bytes. This will be recalculated in the
1943 zone
->allocated
= 0;
1945 /* Release the pages we freed the last time we collected, but didn't
1946 reuse in the interim. */
1947 release_pages (zone
);
1951 zone_allocate_marks ();
1953 #ifdef GATHER_STATISTICS
1954 ggc_prune_overhead_list ();
1959 zone
->was_collected
= true;
1960 zone
->allocated_last_gc
= zone
->allocated
;
1963 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1967 #ifdef GATHER_STATISTICS
1968 /* Calculate the average page survival rate in terms of number of
1972 calculate_average_page_survival (struct alloc_zone
*zone
)
1975 float survival
= 0.0;
1976 struct small_page_entry
*p
;
1977 struct large_page_entry
*lp
;
1978 for (p
= zone
->pages
; p
; p
= p
->next
)
1981 survival
+= p
->common
.survived
;
1983 for (lp
= zone
->large_pages
; lp
; lp
= lp
->next
)
1986 survival
+= lp
->common
.survived
;
1988 return survival
/count
;
1992 /* Top level collection routine. */
1997 struct alloc_zone
*zone
;
1998 bool marked
= false;
2000 timevar_push (TV_GC
);
2002 if (!ggc_force_collect
)
2004 float allocated_last_gc
= 0, allocated
= 0, min_expand
;
2006 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2008 allocated_last_gc
+= zone
->allocated_last_gc
;
2009 allocated
+= zone
->allocated
;
2013 MAX (allocated_last_gc
,
2014 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
2015 min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
2017 if (allocated
< allocated_last_gc
+ min_expand
)
2019 timevar_pop (TV_GC
);
2024 invoke_plugin_callbacks (PLUGIN_GGC_START
, NULL
);
2026 /* Start by possibly collecting the main zone. */
2027 main_zone
.was_collected
= false;
2028 marked
|= ggc_collect_1 (&main_zone
, true);
2030 /* In order to keep the number of collections down, we don't
2031 collect other zones unless we are collecting the main zone. This
2032 gives us roughly the same number of collections as we used to
2033 have with the old gc. The number of collection is important
2034 because our main slowdown (according to profiling) is now in
2035 marking. So if we mark twice as often as we used to, we'll be
2036 twice as slow. Hopefully we'll avoid this cost when we mark
2038 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2039 keep this code in case the heuristics are further refined. */
2041 if (main_zone
.was_collected
)
2043 struct alloc_zone
*zone
;
2045 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
2047 zone
->was_collected
= false;
2048 marked
|= ggc_collect_1 (zone
, !marked
);
2052 #ifdef GATHER_STATISTICS
2053 /* Print page survival stats, if someone wants them. */
2054 if (GGC_DEBUG_LEVEL
>= 2)
2056 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2058 if (zone
->was_collected
)
2060 float f
= calculate_average_page_survival (zone
);
2061 printf ("Average page survival in zone `%s' is %f\n",
2071 /* Free dead zones. */
2072 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
2074 if (zone
->next_zone
->dead
)
2076 struct alloc_zone
*dead_zone
= zone
->next_zone
;
2078 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
2080 /* The zone must be empty. */
2081 gcc_assert (!dead_zone
->allocated
);
2083 /* Unchain the dead zone, release all its pages and free it. */
2084 zone
->next_zone
= zone
->next_zone
->next_zone
;
2085 release_pages (dead_zone
);
2090 invoke_plugin_callbacks (PLUGIN_GGC_END
, NULL
);
2092 timevar_pop (TV_GC
);
2095 /* Print allocation statistics. */
2096 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2098 : ((x) < 1024*1024*10 \
2100 : (x) / (1024*1024))))
2101 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2104 ggc_print_statistics (void)
2106 struct alloc_zone
*zone
;
2107 struct ggc_statistics stats
;
2108 size_t total_overhead
= 0, total_allocated
= 0, total_bytes_mapped
= 0;
2109 size_t pte_overhead
, i
;
2111 /* Clear the statistics. */
2112 memset (&stats
, 0, sizeof (stats
));
2114 /* Make sure collection will really occur. */
2115 ggc_force_collect
= true;
2117 /* Collect and print the statistics common across collectors. */
2118 ggc_print_common_statistics (stderr
, &stats
);
2120 ggc_force_collect
= false;
2122 /* Release free pages so that we will not count the bytes allocated
2123 there as part of the total allocated memory. */
2124 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2125 release_pages (zone
);
2127 /* Collect some information about the various sizes of
2130 "Memory still allocated at the end of the compilation process\n");
2132 fprintf (stderr
, "%20s %10s %10s %10s\n",
2133 "Zone", "Allocated", "Used", "Overhead");
2134 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2136 struct large_page_entry
*large_page
;
2137 size_t overhead
, allocated
, in_use
;
2139 /* Skip empty zones. */
2140 if (!zone
->pages
&& !zone
->large_pages
)
2143 allocated
= in_use
= 0;
2145 overhead
= sizeof (struct alloc_zone
);
2147 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2148 large_page
= large_page
->next
)
2150 allocated
+= large_page
->bytes
;
2151 in_use
+= large_page
->bytes
;
2152 overhead
+= sizeof (struct large_page_entry
);
2155 /* There's no easy way to walk through the small pages finding
2156 used and unused objects. Instead, add all the pages, and
2157 subtract out the free list. */
2159 allocated
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2160 in_use
+= GGC_PAGE_SIZE
* zone
->n_small_pages
;
2161 overhead
+= G
.small_page_overhead
* zone
->n_small_pages
;
2163 for (i
= 0; i
<= NUM_FREE_BINS
; i
++)
2165 struct alloc_chunk
*chunk
= zone
->free_chunks
[i
];
2168 in_use
-= ggc_get_size (chunk
);
2169 chunk
= chunk
->next_free
;
2173 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n",
2175 SCALE (allocated
), LABEL (allocated
),
2176 SCALE (in_use
), LABEL (in_use
),
2177 SCALE (overhead
), LABEL (overhead
));
2179 gcc_assert (in_use
== zone
->allocated
);
2181 total_overhead
+= overhead
;
2182 total_allocated
+= zone
->allocated
;
2183 total_bytes_mapped
+= zone
->bytes_mapped
;
2186 /* Count the size of the page table as best we can. */
2187 #if HOST_BITS_PER_PTR <= 32
2188 pte_overhead
= sizeof (G
.lookup
);
2189 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2191 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2194 page_table table
= G
.lookup
;
2198 pte_overhead
+= sizeof (*table
);
2199 for (i
= 0; i
< PAGE_L1_SIZE
; i
++)
2200 if (table
->table
[i
])
2201 pte_overhead
+= PAGE_L2_SIZE
* sizeof (struct page_entry
*);
2202 table
= table
->next
;
2206 fprintf (stderr
, "%20s %11s %11s %10lu%c\n", "Page Table",
2207 "", "", SCALE (pte_overhead
), LABEL (pte_overhead
));
2208 total_overhead
+= pte_overhead
;
2210 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2211 SCALE (total_bytes_mapped
), LABEL (total_bytes_mapped
),
2212 SCALE (total_allocated
), LABEL(total_allocated
),
2213 SCALE (total_overhead
), LABEL (total_overhead
));
2215 #ifdef GATHER_STATISTICS
2217 unsigned long long all_overhead
= 0, all_allocated
= 0;
2218 unsigned long long all_overhead_under32
= 0, all_allocated_under32
= 0;
2219 unsigned long long all_overhead_under64
= 0, all_allocated_under64
= 0;
2220 unsigned long long all_overhead_under128
= 0, all_allocated_under128
= 0;
2222 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
2224 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2226 all_overhead
+= zone
->stats
.total_overhead
;
2227 all_allocated
+= zone
->stats
.total_allocated
;
2229 all_allocated_under32
+= zone
->stats
.total_allocated_under32
;
2230 all_overhead_under32
+= zone
->stats
.total_overhead_under32
;
2232 all_allocated_under64
+= zone
->stats
.total_allocated_under64
;
2233 all_overhead_under64
+= zone
->stats
.total_overhead_under64
;
2235 all_allocated_under128
+= zone
->stats
.total_allocated_under128
;
2236 all_overhead_under128
+= zone
->stats
.total_overhead_under128
;
2238 fprintf (stderr
, "%20s: %10lld\n",
2239 zone
->name
, zone
->stats
.total_allocated
);
2242 fprintf (stderr
, "\n");
2244 fprintf (stderr
, "Total Overhead: %10lld\n",
2246 fprintf (stderr
, "Total Allocated: %10lld\n",
2249 fprintf (stderr
, "Total Overhead under 32B: %10lld\n",
2250 all_overhead_under32
);
2251 fprintf (stderr
, "Total Allocated under 32B: %10lld\n",
2252 all_allocated_under32
);
2253 fprintf (stderr
, "Total Overhead under 64B: %10lld\n",
2254 all_overhead_under64
);
2255 fprintf (stderr
, "Total Allocated under 64B: %10lld\n",
2256 all_allocated_under64
);
2257 fprintf (stderr
, "Total Overhead under 128B: %10lld\n",
2258 all_overhead_under128
);
2259 fprintf (stderr
, "Total Allocated under 128B: %10lld\n",
2260 all_allocated_under128
);
2265 /* Precompiled header support. */
2267 /* For precompiled headers, we sort objects based on their type. We
2268 also sort various objects into their own buckets; currently this
2269 covers strings and IDENTIFIER_NODE trees. The choices of how
2270 to sort buckets have not yet been tuned. */
2272 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2274 #define OTHER_BUCKET (gt_types_enum_last + 0)
2275 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2276 #define STRING_BUCKET (gt_types_enum_last + 2)
2278 struct ggc_pch_ondisk
2281 size_t type_totals
[NUM_PCH_BUCKETS
];
2286 struct ggc_pch_ondisk d
;
2290 alloc_type
*alloc_bits
;
2291 size_t type_bases
[NUM_PCH_BUCKETS
];
2292 size_t start_offset
;
2295 /* Initialize the PCH data structure. */
2297 struct ggc_pch_data
*
2300 return XCNEW (struct ggc_pch_data
);
2303 /* Return which of the page-aligned buckets the object at X, with type
2304 TYPE, should be sorted into in the PCH. Strings will have
2305 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2306 of unknown type will also have TYPE equal to gt_types_enum_last. */
2309 pch_bucket (void *x
, enum gt_types_enum type
,
2312 /* Sort identifiers into their own bucket, to improve locality
2313 when searching the identifier hash table. */
2314 if (type
== gt_ggc_e_14lang_tree_node
2315 && TREE_CODE ((tree
) x
) == IDENTIFIER_NODE
)
2316 return IDENTIFIER_BUCKET
;
2317 else if (type
== gt_types_enum_last
)
2320 return STRING_BUCKET
;
2321 return OTHER_BUCKET
;
2326 /* Add the size of object X to the size of the PCH data. */
2329 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
2330 size_t size
, bool is_string
, enum gt_types_enum type
)
2332 /* NOTE: Right now we don't need to align up the size of any objects.
2333 Strings can be unaligned, and everything else is allocated to a
2334 MAX_ALIGNMENT boundary already. */
2336 d
->d
.type_totals
[pch_bucket (x
, type
, is_string
)] += size
;
2339 /* Return the total size of the PCH data. */
2342 ggc_pch_total_size (struct ggc_pch_data
*d
)
2345 size_t alloc_size
, total_size
;
2348 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2350 d
->d
.type_totals
[i
] = ROUND_UP (d
->d
.type_totals
[i
], GGC_PAGE_SIZE
);
2351 total_size
+= d
->d
.type_totals
[i
];
2353 d
->d
.total
= total_size
;
2355 /* Include the size of the allocation bitmap. */
2356 alloc_size
= CEIL (d
->d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2357 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2358 d
->alloc_size
= alloc_size
;
2360 return d
->d
.total
+ alloc_size
;
2363 /* Set the base address for the objects in the PCH file. */
2366 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base_
)
2369 size_t base
= (size_t) base_
;
2371 d
->base
= d
->orig_base
= base
;
2372 for (i
= 0; i
< NUM_PCH_BUCKETS
; i
++)
2374 d
->type_bases
[i
] = base
;
2375 base
+= d
->d
.type_totals
[i
];
2378 if (d
->alloc_bits
== NULL
)
2379 d
->alloc_bits
= XCNEWVAR (alloc_type
, d
->alloc_size
);
2382 /* Allocate a place for object X of size SIZE in the PCH file. */
2385 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
2386 size_t size
, bool is_string
,
2387 enum gt_types_enum type
)
2389 size_t alloc_word
, alloc_bit
;
2391 int bucket
= pch_bucket (x
, type
, is_string
);
2393 /* Record the start of the object in the allocation bitmap. We
2394 can't assert that the allocation bit is previously clear, because
2395 strings may violate the invariant that they are at least
2396 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2397 should not be called for strings. */
2398 alloc_word
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2399 / (8 * sizeof (alloc_type
) * BYTES_PER_ALLOC_BIT
));
2400 alloc_bit
= ((d
->type_bases
[bucket
] - d
->orig_base
)
2401 / BYTES_PER_ALLOC_BIT
) % (8 * sizeof (alloc_type
));
2402 d
->alloc_bits
[alloc_word
] |= 1L << alloc_bit
;
2404 /* Place the object at the current pointer for this bucket. */
2405 result
= (char *) d
->type_bases
[bucket
];
2406 d
->type_bases
[bucket
] += size
;
2410 /* Prepare to write out the PCH data to file F. */
2413 ggc_pch_prepare_write (struct ggc_pch_data
*d
,
2416 /* We seek around a lot while writing. Record where the end
2417 of the padding in the PCH file is, so that we can
2418 locate each object's offset. */
2419 d
->start_offset
= ftell (f
);
2422 /* Write out object X of SIZE to file F. */
2425 ggc_pch_write_object (struct ggc_pch_data
*d
,
2426 FILE *f
, void *x
, void *newx
,
2427 size_t size
, bool is_string ATTRIBUTE_UNUSED
)
2429 if (fseek (f
, (size_t) newx
- d
->orig_base
+ d
->start_offset
, SEEK_SET
) != 0)
2430 fatal_error ("can%'t seek PCH file: %m");
2432 if (fwrite (x
, size
, 1, f
) != 1)
2433 fatal_error ("can%'t write PCH file: %m");
2437 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
2439 /* Write out the allocation bitmap. */
2440 if (fseek (f
, d
->start_offset
+ d
->d
.total
, SEEK_SET
) != 0)
2441 fatal_error ("can%'t seek PCH file: %m");
2443 if (fwrite (d
->alloc_bits
, d
->alloc_size
, 1, f
) != 1)
2444 fatal_error ("can%'t write PCH file: %m");
2446 /* Done with the PCH, so write out our footer. */
2447 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
2448 fatal_error ("can%'t write PCH file: %m");
2450 free (d
->alloc_bits
);
2454 /* The PCH file from F has been mapped at ADDR. Read in any
2455 additional data from the file and set up the GC state. */
2458 ggc_pch_read (FILE *f
, void *addr
)
2460 struct ggc_pch_ondisk d
;
2462 struct alloc_zone
*zone
;
2463 struct page_entry
*pch_page
;
2466 if (fread (&d
, sizeof (d
), 1, f
) != 1)
2467 fatal_error ("can%'t read PCH file: %m");
2469 alloc_size
= CEIL (d
.total
, BYTES_PER_ALLOC_BIT
* 8);
2470 alloc_size
= ROUND_UP (alloc_size
, MAX_ALIGNMENT
);
2472 pch_zone
.bytes
= d
.total
;
2473 pch_zone
.alloc_bits
= (alloc_type
*) ((char *) addr
+ pch_zone
.bytes
);
2474 pch_zone
.page
= (char *) addr
;
2475 pch_zone
.end
= (char *) pch_zone
.alloc_bits
;
2477 /* We've just read in a PCH file. So, every object that used to be
2478 allocated is now free. */
2479 #ifdef 0 && GATHER_STATISTICS
2480 zone_allocate_marks ();
2481 ggc_prune_overhead_list ();
2485 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
2487 struct small_page_entry
*page
, *next_page
;
2488 struct large_page_entry
*large_page
, *next_large_page
;
2490 zone
->allocated
= 0;
2492 /* Clear the zone's free chunk list. */
2493 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
2494 zone
->high_free_bin
= 0;
2495 zone
->cached_free
= NULL
;
2496 zone
->cached_free_size
= 0;
2498 /* Move all the small pages onto the free list. */
2499 for (page
= zone
->pages
; page
!= NULL
; page
= next_page
)
2501 next_page
= page
->next
;
2502 memset (page
->alloc_bits
, 0,
2503 G
.small_page_overhead
- PAGE_OVERHEAD
);
2504 free_small_page (page
);
2507 /* Discard all the large pages. */
2508 for (large_page
= zone
->large_pages
; large_page
!= NULL
;
2509 large_page
= next_large_page
)
2511 next_large_page
= large_page
->next
;
2512 free_large_page (large_page
);
2516 zone
->large_pages
= NULL
;
2519 /* Allocate the dummy page entry for the PCH, and set all pages
2520 mapped into the PCH to reference it. */
2521 pch_page
= XCNEW (struct page_entry
);
2522 pch_page
->page
= pch_zone
.page
;
2523 pch_page
->pch_p
= true;
2525 for (p
= pch_zone
.page
; p
< pch_zone
.end
; p
+= GGC_PAGE_SIZE
)
2526 set_page_table_entry (p
, pch_page
);