1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
46 # include <valgrind.h>
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
61 # define MAP_FAILED -1
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
74 # define MAP_FAILED -1
81 #error "Zone collector requires mmap"
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
87 #define prefetch(X) __builtin_prefetch (X)
91 If we track inter-zone pointers, we can mark single zones at a
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
145 #ifdef COOKIE_CHECKING
149 unsigned int typecode
:14;
150 unsigned int large
:1;
151 unsigned int size
:15;
154 struct alloc_chunk
*next_free
;
157 /* Make sure the data is sufficiently aligned. */
158 HOST_WIDEST_INT align_i
;
159 #ifdef HAVE_LONG_DOUBLE
167 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
169 /* We maintain several bins of free lists for chunks for very small
170 objects. We never exhaustively search other bins -- if we don't
171 find one of the proper size, we allocate from the "larger" bin. */
173 /* Decreasing the number of free bins increases the time it takes to allocate.
174 Similar with increasing max_free_bin_size without increasing num_free_bins.
176 After much histogramming of allocation sizes and time spent on gc,
177 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
178 these were determined to be the optimal values. */
179 #define NUM_FREE_BINS 64
180 #define MAX_FREE_BIN_SIZE (64 * sizeof (void *))
181 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
182 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
183 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
185 /* Marker used as chunk->size for a large object. Should correspond
186 to the size of the bitfield above. */
187 #define LARGE_OBJECT_SIZE 0x7fff
189 /* We use this structure to determine the alignment required for
190 allocations. For power-of-two sized allocations, that's not a
191 problem, but it does matter for odd-sized allocations. */
193 struct max_alignment
{
197 #ifdef HAVE_LONG_DOUBLE
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest nonnegative number which when added to X gives
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Compute the smallest multiple of F that is >= X. */
216 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* A page_entry records the status of an allocation page. */
220 typedef struct page_entry
222 /* The next page-entry with objects of the same size, or NULL if
223 this is the last page-entry. */
224 struct page_entry
*next
;
226 /* The number of bytes allocated. (This will always be a multiple
227 of the host system page size.) */
230 /* How many collections we've survived. */
233 /* The address at which the memory is allocated. */
236 /* Context depth of this page. */
237 unsigned short context_depth
;
239 /* Does this page contain small objects, or one large object? */
242 /* The zone that this page entry belongs to. */
243 struct alloc_zone
*zone
;
247 /* The global variables. */
248 static struct globals
250 /* The linked list of zones. */
251 struct alloc_zone
*zones
;
253 /* The system's page size. */
257 /* A file descriptor open to /dev/zero for reading. */
258 #if defined (HAVE_MMAP_DEV_ZERO)
262 /* The file descriptor for debugging output. */
266 /* The zone allocation structure. */
269 /* Name of the zone. */
272 /* Linked list of pages in a zone. */
275 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
276 FREE_BIN_DELTA. All other chunks are in slot 0. */
277 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
279 /* Bytes currently allocated. */
282 /* Bytes currently allocated at the end of the last collection. */
283 size_t allocated_last_gc
;
285 /* Total amount of memory mapped. */
288 /* Bit N set if any allocations have been done at context depth N. */
289 unsigned long context_depth_allocations
;
291 /* Bit N set if any collections have been done at context depth N. */
292 unsigned long context_depth_collections
;
294 /* The current depth in the context stack. */
295 unsigned short context_depth
;
297 /* A cache of free system pages. */
298 page_entry
*free_pages
;
300 /* Next zone in the linked list of zones. */
301 struct alloc_zone
*next_zone
;
303 /* True if this zone was collected during this collection. */
306 /* True if this zone should be destroyed after the next collection. */
309 #ifdef GATHER_STATISTICS
312 /* Total memory allocated with ggc_alloc. */
313 unsigned long long total_allocated
;
314 /* Total overhead for memory to be allocated with ggc_alloc. */
315 unsigned long long total_overhead
;
317 /* Total allocations and overhead for sizes less than 32, 64 and 128.
318 These sizes are interesting because they are typical cache line
321 unsigned long long total_allocated_under32
;
322 unsigned long long total_overhead_under32
;
324 unsigned long long total_allocated_under64
;
325 unsigned long long total_overhead_under64
;
327 unsigned long long total_allocated_under128
;
328 unsigned long long total_overhead_under128
;
333 struct alloc_zone
*rtl_zone
;
334 struct alloc_zone
*garbage_zone
;
335 struct alloc_zone
*tree_zone
;
337 static int always_collect
;
339 /* Allocate pages in chunks of this size, to throttle calls to memory
340 allocation routines. The first page is used, the rest go onto the
341 free list. This cannot be larger than HOST_BITS_PER_INT for the
342 in_use bitmask for page_group. */
343 #define GGC_QUIRE_SIZE 16
345 static int ggc_allocated_p (const void *);
347 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
349 static struct page_entry
* alloc_small_page ( struct alloc_zone
*);
350 static struct page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
351 static void free_chunk (struct alloc_chunk
*, size_t, struct alloc_zone
*);
352 static void free_page (struct page_entry
*);
353 static void release_pages (struct alloc_zone
*);
354 static void sweep_pages (struct alloc_zone
*);
355 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone
*, short MEM_STAT_DECL
);
356 static bool ggc_collect_1 (struct alloc_zone
*, bool);
357 static void check_cookies (void);
360 /* Returns nonzero if P was allocated in GC'able memory. */
363 ggc_allocated_p (const void *p
)
365 struct alloc_chunk
*chunk
;
366 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
367 #ifdef COOKIE_CHECKING
368 if (chunk
->magic
!= CHUNK_MAGIC
)
371 if (chunk
->type
== 1)
378 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
379 (if non-null). The ifdef structure here is intended to cause a
380 compile error unless exactly one of the HAVE_* is defined. */
383 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
385 #ifdef HAVE_MMAP_ANON
386 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
387 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
389 #ifdef HAVE_MMAP_DEV_ZERO
390 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
391 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
393 VALGRIND_MALLOCLIKE_BLOCK(page
, size
, 0, 0);
395 if (page
== (char *) MAP_FAILED
)
397 perror ("virtual memory exhausted");
398 exit (FATAL_EXIT_CODE
);
401 /* Remember that we allocated this memory. */
402 zone
->bytes_mapped
+= size
;
403 /* Pretend we don't have access to the allocated pages. We'll enable
404 access to smaller pieces of the area in ggc_alloc. Discard the
405 handle to avoid handle leak. */
406 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page
, size
));
411 /* Allocate a new page for allocating objects of size 2^ORDER,
412 and return an entry for it. */
414 static inline struct page_entry
*
415 alloc_small_page (struct alloc_zone
*zone
)
417 struct page_entry
*entry
;
422 /* Check the list of free pages for one we can use. */
423 entry
= zone
->free_pages
;
426 /* Recycle the allocated memory from this page ... */
427 zone
->free_pages
= entry
->next
;
435 /* We want just one page. Allocate a bunch of them and put the
436 extras on the freelist. (Can only do this optimization with
437 mmap for backing store.) */
438 struct page_entry
*e
, *f
= zone
->free_pages
;
441 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, zone
);
443 /* This loop counts down so that the chain will be in ascending
445 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
447 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
448 e
->bytes
= G
.pagesize
;
449 e
->page
= page
+ (i
<< G
.lg_pagesize
);
454 zone
->free_pages
= f
;
458 entry
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
461 entry
->bytes
= G
.pagesize
;
463 entry
->context_depth
= zone
->context_depth
;
464 entry
->large_p
= false;
466 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
468 if (GGC_DEBUG_LEVEL
>= 2)
469 fprintf (G
.debug_file
,
470 "Allocating %s page at %p, data %p-%p\n", entry
->zone
->name
,
471 (PTR
) entry
, page
, page
+ G
.pagesize
- 1);
475 /* Compute the smallest multiple of F that is >= X. */
477 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
479 /* Allocate a large page of size SIZE in ZONE. */
481 static inline struct page_entry
*
482 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
484 struct page_entry
*entry
;
486 size
= ROUND_UP (size
, 1024);
487 page
= (char *) xmalloc (size
+ CHUNK_OVERHEAD
+ sizeof (struct page_entry
));
488 entry
= (struct page_entry
*) (page
+ size
+ CHUNK_OVERHEAD
);
493 entry
->context_depth
= zone
->context_depth
;
494 entry
->large_p
= true;
496 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
498 if (GGC_DEBUG_LEVEL
>= 2)
499 fprintf (G
.debug_file
,
500 "Allocating %s large page at %p, data %p-%p\n", entry
->zone
->name
,
501 (PTR
) entry
, page
, page
+ size
- 1);
507 /* For a page that is no longer needed, put it on the free page list. */
510 free_page (page_entry
*entry
)
512 if (GGC_DEBUG_LEVEL
>= 2)
513 fprintf (G
.debug_file
,
514 "Deallocating %s page at %p, data %p-%p\n", entry
->zone
->name
, (PTR
) entry
,
515 entry
->page
, entry
->page
+ entry
->bytes
- 1);
520 VALGRIND_FREELIKE_BLOCK (entry
->page
, entry
->bytes
);
524 /* Mark the page as inaccessible. Discard the handle to
525 avoid handle leak. */
526 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry
->page
, entry
->bytes
));
528 entry
->next
= entry
->zone
->free_pages
;
529 entry
->zone
->free_pages
= entry
;
533 /* Release the free page cache to the system. */
536 release_pages (struct alloc_zone
*zone
)
539 page_entry
*p
, *next
;
543 /* Gather up adjacent pages so they are unmapped together. */
544 p
= zone
->free_pages
;
554 while (p
&& p
->page
== start
+ len
)
563 zone
->bytes_mapped
-= len
;
566 zone
->free_pages
= NULL
;
570 /* Place CHUNK of size SIZE on the free list for ZONE. */
573 free_chunk (struct alloc_chunk
*chunk
, size_t size
, struct alloc_zone
*zone
)
577 bin
= SIZE_BIN_DOWN (size
);
580 if (bin
> NUM_FREE_BINS
)
582 #ifdef COOKIE_CHECKING
583 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
585 chunk
->magic
= DEADCHUNK_MAGIC
;
587 chunk
->u
.next_free
= zone
->free_chunks
[bin
];
588 zone
->free_chunks
[bin
] = chunk
;
589 if (GGC_DEBUG_LEVEL
>= 3)
590 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
591 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk
, sizeof (struct alloc_chunk
)));
594 /* Allocate a chunk of memory of SIZE bytes. */
597 ggc_alloc_zone_1 (size_t orig_size
, struct alloc_zone
*zone
, short type
602 struct page_entry
*entry
;
603 struct alloc_chunk
*chunk
, *lchunk
, **pp
;
605 size_t size
= orig_size
;
607 /* Align size, so that we're assured of aligned allocations. */
608 if (size
< FREE_BIN_DELTA
)
609 size
= FREE_BIN_DELTA
;
610 size
= (size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
612 /* Large objects are handled specially. */
613 if (size
>= G
.pagesize
- 2*CHUNK_OVERHEAD
- FREE_BIN_DELTA
)
615 size
= ROUND_UP (size
, 1024);
616 entry
= alloc_large_page (size
, zone
);
618 entry
->next
= entry
->zone
->pages
;
619 entry
->zone
->pages
= entry
;
621 chunk
= (struct alloc_chunk
*) entry
->page
;
622 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
624 chunk
->size
= CEIL (size
, 1024);
629 /* First look for a tiny object already segregated into its own
631 bin
= SIZE_BIN_UP (size
);
632 if (bin
<= NUM_FREE_BINS
)
634 chunk
= zone
->free_chunks
[bin
];
637 zone
->free_chunks
[bin
] = chunk
->u
.next_free
;
638 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
643 /* Failing that, look through the "other" bucket for a chunk
644 that is large enough. */
645 pp
= &(zone
->free_chunks
[0]);
647 while (chunk
&& chunk
->size
< size
)
649 pp
= &chunk
->u
.next_free
;
653 /* Failing that, allocate new storage. */
656 entry
= alloc_small_page (zone
);
657 entry
->next
= entry
->zone
->pages
;
658 entry
->zone
->pages
= entry
;
660 chunk
= (struct alloc_chunk
*) entry
->page
;
661 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
662 chunk
->size
= G
.pagesize
- CHUNK_OVERHEAD
;
667 *pp
= chunk
->u
.next_free
;
668 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
671 /* Release extra memory from a chunk that's too big. */
672 lsize
= chunk
->size
- size
;
673 if (lsize
>= CHUNK_OVERHEAD
+ FREE_BIN_DELTA
)
675 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
678 lsize
-= CHUNK_OVERHEAD
;
679 lchunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ size
);
680 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk
, sizeof (struct alloc_chunk
)));
681 #ifdef COOKIE_CHECKING
682 lchunk
->magic
= CHUNK_MAGIC
;
686 lchunk
->size
= lsize
;
688 free_chunk (lchunk
, lsize
, zone
);
692 /* Calculate the object's address. */
694 #ifdef COOKIE_CHECKING
695 chunk
->magic
= CHUNK_MAGIC
;
699 chunk
->typecode
= type
;
700 result
= chunk
->u
.data
;
702 #ifdef ENABLE_GC_CHECKING
703 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
704 exact same semantics in presence of memory bugs, regardless of
705 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
706 handle to avoid handle leak. */
707 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
709 /* `Poison' the entire allocated object. */
710 memset (result
, 0xaf, size
);
713 /* Tell Valgrind that the memory is there, but its content isn't
714 defined. The bytes at the end of the object are still marked
716 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
718 /* Keep track of how many bytes are being allocated. This
719 information is used in deciding when to collect. */
720 zone
->allocated
+= size
;
722 #ifdef GATHER_STATISTICS
723 ggc_record_overhead (orig_size
, size
+ CHUNK_OVERHEAD
- orig_size PASS_MEM_STAT
);
726 size_t object_size
= size
+ CHUNK_OVERHEAD
;
727 size_t overhead
= object_size
- orig_size
;
729 zone
->stats
.total_overhead
+= overhead
;
730 zone
->stats
.total_allocated
+= object_size
;
734 zone
->stats
.total_overhead_under32
+= overhead
;
735 zone
->stats
.total_allocated_under32
+= object_size
;
739 zone
->stats
.total_overhead_under64
+= overhead
;
740 zone
->stats
.total_allocated_under64
+= object_size
;
742 if (orig_size
<= 128)
744 zone
->stats
.total_overhead_under128
+= overhead
;
745 zone
->stats
.total_allocated_under128
+= object_size
;
750 if (GGC_DEBUG_LEVEL
>= 3)
751 fprintf (G
.debug_file
, "Allocating object, chunk=%p size=%lu at %p\n",
752 (void *)chunk
, (unsigned long) size
, result
);
757 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
761 ggc_alloc_typed_stat (enum gt_types_enum gte
, size_t size
766 case gt_ggc_e_14lang_tree_node
:
767 return ggc_alloc_zone_1 (size
, tree_zone
, gte PASS_MEM_STAT
);
769 case gt_ggc_e_7rtx_def
:
770 return ggc_alloc_zone_1 (size
, rtl_zone
, gte PASS_MEM_STAT
);
772 case gt_ggc_e_9rtvec_def
:
773 return ggc_alloc_zone_1 (size
, rtl_zone
, gte PASS_MEM_STAT
);
776 return ggc_alloc_zone_1 (size
, &main_zone
, gte PASS_MEM_STAT
);
780 /* Normal ggc_alloc simply allocates into the main zone. */
783 ggc_alloc_stat (size_t size MEM_STAT_DECL
)
785 return ggc_alloc_zone_1 (size
, &main_zone
, -1 PASS_MEM_STAT
);
788 /* Zone allocation allocates into the specified zone. */
791 ggc_alloc_zone_stat (size_t size
, struct alloc_zone
*zone MEM_STAT_DECL
)
793 return ggc_alloc_zone_1 (size
, zone
, -1 PASS_MEM_STAT
);
796 /* Poison the chunk. */
797 #ifdef ENABLE_GC_CHECKING
798 #define poison_chunk(CHUNK, SIZE) \
799 memset ((CHUNK)->u.data, 0xa5, (SIZE))
801 #define poison_chunk(CHUNK, SIZE)
804 /* Free the object at P. */
809 struct alloc_chunk
*chunk
;
811 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
813 /* Poison the chunk. */
814 poison_chunk (chunk
, ggc_get_size (p
));
817 /* If P is not marked, mark it and return false. Otherwise return true.
818 P must have been allocated by the GC allocator; it mustn't point to
819 static objects, stack variables, or memory allocated with malloc. */
822 ggc_set_mark (const void *p
)
824 struct alloc_chunk
*chunk
;
826 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
827 #ifdef COOKIE_CHECKING
828 if (chunk
->magic
!= CHUNK_MAGIC
)
835 if (GGC_DEBUG_LEVEL
>= 4)
836 fprintf (G
.debug_file
, "Marking %p\n", p
);
841 /* Return 1 if P has been marked, zero otherwise.
842 P must have been allocated by the GC allocator; it mustn't point to
843 static objects, stack variables, or memory allocated with malloc. */
846 ggc_marked_p (const void *p
)
848 struct alloc_chunk
*chunk
;
850 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
851 #ifdef COOKIE_CHECKING
852 if (chunk
->magic
!= CHUNK_MAGIC
)
858 /* Return the size of the gc-able object P. */
861 ggc_get_size (const void *p
)
863 struct alloc_chunk
*chunk
;
865 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
866 #ifdef COOKIE_CHECKING
867 if (chunk
->magic
!= CHUNK_MAGIC
)
871 return chunk
->size
* 1024;
876 /* Initialize the ggc-zone-mmap allocator. */
880 /* Set up the main zone by hand. */
881 main_zone
.name
= "Main zone";
882 G
.zones
= &main_zone
;
884 /* Allocate the default zones. */
885 rtl_zone
= new_ggc_zone ("RTL zone");
886 tree_zone
= new_ggc_zone ("Tree zone");
887 garbage_zone
= new_ggc_zone ("Garbage zone");
889 G
.pagesize
= getpagesize();
890 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
891 #ifdef HAVE_MMAP_DEV_ZERO
892 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
893 if (G
.dev_zero_fd
== -1)
898 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
899 setlinebuf (G
.debug_file
);
901 G
.debug_file
= stdout
;
905 /* StunOS has an amazing off-by-one error for the first mmap allocation
906 after fiddling with RLIMIT_STACK. The result, as hard as it is to
907 believe, is an unaligned page allocation, which would cause us to
908 hork badly if we tried to use it. */
910 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
911 struct page_entry
*e
;
912 if ((size_t)p
& (G
.pagesize
- 1))
914 /* How losing. Discard this one and try another. If we still
915 can't get something useful, give up. */
917 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
918 if ((size_t)p
& (G
.pagesize
- 1))
922 /* We have a good page, might as well hold onto it... */
923 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
924 e
->bytes
= G
.pagesize
;
926 e
->next
= main_zone
.free_pages
;
927 main_zone
.free_pages
= e
;
932 /* Start a new GGC zone. */
935 new_ggc_zone (const char * name
)
937 struct alloc_zone
*new_zone
= xcalloc (1, sizeof (struct alloc_zone
));
938 new_zone
->name
= name
;
939 new_zone
->next_zone
= G
.zones
->next_zone
;
940 G
.zones
->next_zone
= new_zone
;
944 /* Destroy a GGC zone. */
946 destroy_ggc_zone (struct alloc_zone
* dead_zone
)
948 struct alloc_zone
*z
;
950 for (z
= G
.zones
; z
&& z
->next_zone
!= dead_zone
; z
= z
->next_zone
)
951 /* Just find that zone. */ ;
953 #ifdef ENABLE_CHECKING
954 /* We should have found the zone in the list. Anything else is fatal. */
959 /* z is dead, baby. z is dead. */
963 /* Increment the `GC context'. Objects allocated in an outer context
964 are never freed, eliminating the need to register their roots. */
967 ggc_push_context (void)
969 struct alloc_zone
*zone
;
970 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
971 ++(zone
->context_depth
);
973 if (main_zone
.context_depth
>= HOST_BITS_PER_LONG
)
977 /* Decrement the `GC context'. All objects allocated since the
978 previous ggc_push_context are migrated to the outer context. */
981 ggc_pop_context_1 (struct alloc_zone
*zone
)
987 depth
= --(zone
->context_depth
);
988 omask
= (unsigned long)1 << (depth
+ 1);
990 if (!((zone
->context_depth_allocations
| zone
->context_depth_collections
) & omask
))
993 zone
->context_depth_allocations
|= (zone
->context_depth_allocations
& omask
) >> 1;
994 zone
->context_depth_allocations
&= omask
- 1;
995 zone
->context_depth_collections
&= omask
- 1;
997 /* Any remaining pages in the popped context are lowered to the new
998 current context; i.e. objects allocated in the popped context and
999 left over are imported into the previous context. */
1000 for (p
= zone
->pages
; p
!= NULL
; p
= p
->next
)
1001 if (p
->context_depth
> depth
)
1002 p
->context_depth
= depth
;
1005 /* Pop all the zone contexts. */
1008 ggc_pop_context (void)
1010 struct alloc_zone
*zone
;
1011 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1012 ggc_pop_context_1 (zone
);
1015 /* Free all empty pages and objects within a page for a given zone */
1018 sweep_pages (struct alloc_zone
*zone
)
1020 page_entry
**pp
, *p
, *next
;
1021 struct alloc_chunk
*chunk
, *last_free
, *end
;
1022 size_t last_free_size
, allocated
= 0;
1024 /* First, reset the free_chunks lists, since we are going to
1025 re-free free chunks in hopes of coalescing them into large chunks. */
1026 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
1028 for (p
= zone
->pages
; p
; p
= next
)
1031 /* Large pages are all or none affairs. Either they are
1032 completely empty, or they are completely full.
1034 XXX: Should we bother to increment allocated. */
1037 if (((struct alloc_chunk
*)p
->page
)->mark
== 1)
1039 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1040 allocated
+= p
->bytes
- CHUNK_OVERHEAD
;
1046 #ifdef ENABLE_GC_CHECKING
1047 /* Poison the page. */
1048 memset (p
->page
, 0xb5, p
->bytes
);
1055 /* This page has now survived another collection. */
1058 /* Which leaves full and partial pages. Step through all chunks,
1059 consolidate those that are free and insert them into the free
1060 lists. Note that consolidation slows down collection
1063 chunk
= (struct alloc_chunk
*)p
->page
;
1064 end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1067 nomarksinpage
= true;
1070 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1071 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1073 nomarksinpage
= false;
1076 last_free
->type
= 0;
1077 last_free
->size
= last_free_size
;
1078 last_free
->mark
= 0;
1079 poison_chunk (last_free
, last_free_size
);
1080 free_chunk (last_free
, last_free_size
, zone
);
1085 allocated
+= chunk
->size
;
1093 last_free_size
+= CHUNK_OVERHEAD
+ chunk
->size
;
1098 last_free_size
= chunk
->size
;
1102 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1104 while (chunk
< end
);
1109 #ifdef ENABLE_GC_CHECKING
1110 /* Poison the page. */
1111 memset (p
->page
, 0xb5, p
->bytes
);
1118 last_free
->type
= 0;
1119 last_free
->size
= last_free_size
;
1120 last_free
->mark
= 0;
1121 poison_chunk (last_free
, last_free_size
);
1122 free_chunk (last_free
, last_free_size
, zone
);
1127 zone
->allocated
= allocated
;
1130 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1131 is true if we need to mark before sweeping, false if some other
1132 zone collection has already performed marking for us. Returns true
1133 if we collected, false otherwise. */
1136 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1139 fprintf (stderr
, " {%s GC %luk -> ",
1140 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1142 /* Zero the total allocated bytes. This will be recalculated in the
1144 zone
->allocated
= 0;
1146 /* Release the pages we freed the last time we collected, but didn't
1147 reuse in the interim. */
1148 release_pages (zone
);
1150 /* Indicate that we've seen collections at this context depth. */
1151 zone
->context_depth_collections
1152 = ((unsigned long)1 << (zone
->context_depth
+ 1)) - 1;
1156 zone
->was_collected
= true;
1157 zone
->allocated_last_gc
= zone
->allocated
;
1160 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1164 /* Calculate the average page survival rate in terms of number of
1168 calculate_average_page_survival (struct alloc_zone
*zone
)
1171 float survival
= 0.0;
1173 for (p
= zone
->pages
; p
; p
= p
->next
)
1176 survival
+= p
->survived
;
1178 return survival
/count
;
1181 /* Check the magic cookies all of the chunks contain, to make sure we
1182 aren't doing anything stupid, like stomping on alloc_chunk
1186 check_cookies (void)
1188 #ifdef COOKIE_CHECKING
1190 struct alloc_zone
*zone
;
1192 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1194 for (p
= zone
->pages
; p
; p
= p
->next
)
1198 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1199 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1202 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
1204 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1206 while (chunk
< end
);
1212 /* Top level collection routine. */
1217 struct alloc_zone
*zone
;
1218 bool marked
= false;
1221 timevar_push (TV_GC
);
1224 if (!always_collect
)
1226 float allocated_last_gc
= 0, allocated
= 0, min_expand
;
1228 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1230 allocated_last_gc
+= zone
->allocated_last_gc
;
1231 allocated
+= zone
->allocated
;
1235 MAX (allocated_last_gc
,
1236 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1237 min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1239 if (allocated
< allocated_last_gc
+ min_expand
)
1241 timevar_pop (TV_GC
);
1246 /* Start by possibly collecting the main zone. */
1247 main_zone
.was_collected
= false;
1248 marked
|= ggc_collect_1 (&main_zone
, true);
1250 /* In order to keep the number of collections down, we don't
1251 collect other zones unless we are collecting the main zone. This
1252 gives us roughly the same number of collections as we used to
1253 have with the old gc. The number of collection is important
1254 because our main slowdown (according to profiling) is now in
1255 marking. So if we mark twice as often as we used to, we'll be
1256 twice as slow. Hopefully we'll avoid this cost when we mark
1258 /* NOTE drow/2004-07-28: We now always collect the main zone, but
1259 keep this code in case the heuristics are further refined. */
1261 if (main_zone
.was_collected
)
1263 struct alloc_zone
*zone
;
1265 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
1268 zone
->was_collected
= false;
1269 marked
|= ggc_collect_1 (zone
, !marked
);
1273 /* Print page survival stats, if someone wants them. */
1274 if (GGC_DEBUG_LEVEL
>= 2)
1276 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1278 if (zone
->was_collected
)
1280 f
= calculate_average_page_survival (zone
);
1281 printf ("Average page survival in zone `%s' is %f\n",
1287 /* Since we don't mark zone at a time right now, marking in any
1288 zone means marking in every zone. So we have to clear all the
1289 marks in all the zones that weren't collected already. */
1293 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1295 if (zone
->was_collected
)
1297 for (p
= zone
->pages
; p
; p
= p
->next
)
1301 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1302 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1305 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1306 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1310 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1312 while (chunk
< end
);
1316 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1322 /* Free dead zones. */
1323 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
1325 if (zone
->next_zone
->dead
)
1327 struct alloc_zone
*dead_zone
= zone
->next_zone
;
1329 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
1331 /* The zone must be empty. */
1332 if (dead_zone
->allocated
!= 0)
1335 /* Unchain the dead zone, release all its pages and free it. */
1336 zone
->next_zone
= zone
->next_zone
->next_zone
;
1337 release_pages (dead_zone
);
1342 timevar_pop (TV_GC
);
1345 /* Print allocation statistics. */
1346 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1348 : ((x) < 1024*1024*10 \
1350 : (x) / (1024*1024))))
1351 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1354 ggc_print_statistics (void)
1356 struct alloc_zone
*zone
;
1357 struct ggc_statistics stats
;
1358 size_t total_overhead
= 0, total_allocated
= 0, total_bytes_mapped
= 0;
1360 /* Clear the statistics. */
1361 memset (&stats
, 0, sizeof (stats
));
1363 /* Make sure collection will really occur, in all zones. */
1366 /* Collect and print the statistics common across collectors. */
1367 ggc_print_common_statistics (stderr
, &stats
);
1371 /* Release free pages so that we will not count the bytes allocated
1372 there as part of the total allocated memory. */
1373 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1374 release_pages (zone
);
1376 /* Collect some information about the various sizes of
1379 "Memory still allocated at the end of the compilation process\n");
1381 fprintf (stderr
, "%20s %10s %10s %10s\n",
1382 "Zone", "Allocated", "Used", "Overhead");
1383 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1390 /* Skip empty entries. */
1394 overhead
= allocated
= in_use
= 0;
1396 /* Figure out the total number of bytes allocated for objects of
1397 this size, and how many of them are actually in use. Also figure
1398 out how much memory the page table is using. */
1399 for (p
= zone
->pages
; p
; p
= p
->next
)
1401 struct alloc_chunk
*chunk
;
1403 /* We've also allocated sizeof (page_entry), but it's not in the
1404 "managed" area... */
1405 allocated
+= p
->bytes
;
1406 overhead
+= sizeof (page_entry
);
1410 in_use
+= p
->bytes
- CHUNK_OVERHEAD
;
1411 chunk
= (struct alloc_chunk
*) p
->page
;
1412 overhead
+= CHUNK_OVERHEAD
;
1420 for (chunk
= (struct alloc_chunk
*) p
->page
;
1421 (char *) chunk
< (char *) p
->page
+ p
->bytes
;
1422 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
))
1424 overhead
+= CHUNK_OVERHEAD
;
1426 in_use
+= chunk
->size
;
1431 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n",
1433 SCALE (allocated
), LABEL (allocated
),
1434 SCALE (in_use
), LABEL (in_use
),
1435 SCALE (overhead
), LABEL (overhead
));
1437 if (in_use
!= zone
->allocated
)
1440 total_overhead
+= overhead
;
1441 total_allocated
+= zone
->allocated
;
1442 total_bytes_mapped
+= zone
->bytes_mapped
;
1445 fprintf (stderr
, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
1446 SCALE (total_bytes_mapped
), LABEL (total_bytes_mapped
),
1447 SCALE (total_allocated
), LABEL(total_allocated
),
1448 SCALE (total_overhead
), LABEL (total_overhead
));
1450 #ifdef GATHER_STATISTICS
1452 unsigned long long all_overhead
= 0, all_allocated
= 0;
1453 unsigned long long all_overhead_under32
= 0, all_allocated_under32
= 0;
1454 unsigned long long all_overhead_under64
= 0, all_allocated_under64
= 0;
1455 unsigned long long all_overhead_under128
= 0, all_allocated_under128
= 0;
1457 fprintf (stderr
, "\nTotal allocations and overheads during the compilation process\n");
1459 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1461 all_overhead
+= zone
->stats
.total_overhead
;
1462 all_allocated
+= zone
->stats
.total_allocated
;
1464 all_allocated_under32
+= zone
->stats
.total_allocated_under32
;
1465 all_overhead_under32
+= zone
->stats
.total_overhead_under32
;
1467 all_allocated_under64
+= zone
->stats
.total_allocated_under64
;
1468 all_overhead_under64
+= zone
->stats
.total_overhead_under64
;
1470 all_allocated_under128
+= zone
->stats
.total_allocated_under128
;
1471 all_overhead_under128
+= zone
->stats
.total_overhead_under128
;
1473 fprintf (stderr
, "%20s: %10lld\n",
1474 zone
->name
, zone
->stats
.total_allocated
);
1477 fprintf (stderr
, "\n");
1479 fprintf (stderr
, "Total Overhead: %10lld\n",
1481 fprintf (stderr
, "Total Allocated: %10lld\n",
1484 fprintf (stderr
, "Total Overhead under 32B: %10lld\n",
1485 all_overhead_under32
);
1486 fprintf (stderr
, "Total Allocated under 32B: %10lld\n",
1487 all_allocated_under32
);
1488 fprintf (stderr
, "Total Overhead under 64B: %10lld\n",
1489 all_overhead_under64
);
1490 fprintf (stderr
, "Total Allocated under 64B: %10lld\n",
1491 all_allocated_under64
);
1492 fprintf (stderr
, "Total Overhead under 128B: %10lld\n",
1493 all_overhead_under128
);
1494 fprintf (stderr
, "Total Allocated under 128B: %10lld\n",
1495 all_allocated_under128
);
1502 struct ggc_pch_ondisk
1510 /* Initialize the PCH data structure. */
1512 struct ggc_pch_data
*
1515 return xcalloc (sizeof (struct ggc_pch_data
), 1);
1518 /* Add the size of object X to the size of the PCH data. */
1521 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
1522 size_t size
, bool is_string
)
1526 d
->d
.total
+= size
+ CHUNK_OVERHEAD
;
1532 /* Return the total size of the PCH data. */
1535 ggc_pch_total_size (struct ggc_pch_data
*d
)
1540 /* Set the base address for the objects in the PCH file. */
1543 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
1545 d
->base
= (size_t) base
;
1548 /* Allocate a place for object X of size SIZE in the PCH file. */
1551 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
1552 size_t size
, bool is_string
)
1555 result
= (char *)d
->base
;
1558 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1560 d
->base
+= ggc_get_size (x
) + CHUNK_OVERHEAD
;
1562 d
->base
+= chunk
->size
+ CHUNK_OVERHEAD
;
1563 return result
+ CHUNK_OVERHEAD
;
1573 /* Prepare to write out the PCH data to file F. */
1576 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1577 FILE *f ATTRIBUTE_UNUSED
)
1579 /* Nothing to do. */
1582 /* Write out object X of SIZE to file F. */
1585 ggc_pch_write_object (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1586 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
1587 size_t size
, bool is_string
)
1591 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1592 size
= ggc_get_size (x
);
1593 if (fwrite (chunk
, size
+ CHUNK_OVERHEAD
, 1, f
) != 1)
1594 fatal_error ("can't write PCH file: %m");
1595 d
->written
+= size
+ CHUNK_OVERHEAD
;
1599 if (fwrite (x
, size
, 1, f
) != 1)
1600 fatal_error ("can't write PCH file: %m");
1606 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
1608 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
1609 fatal_error ("can't write PCH file: %m");
1613 ggc_pch_read (FILE *f
, void *addr
)
1615 struct ggc_pch_ondisk d
;
1616 struct page_entry
*entry
;
1617 struct alloc_zone
*pch_zone
;
1618 if (fread (&d
, sizeof (d
), 1, f
) != 1)
1619 fatal_error ("can't read PCH file: %m");
1620 entry
= xcalloc (1, sizeof (struct page_entry
));
1621 entry
->bytes
= d
.total
;
1623 entry
->context_depth
= 0;
1624 pch_zone
= new_ggc_zone ("PCH zone");
1625 entry
->zone
= pch_zone
;
1626 entry
->next
= entry
->zone
->pages
;
1627 entry
->zone
->pages
= entry
;