1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
46 # include <valgrind.h>
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
61 # define MAP_FAILED -1
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
74 # define MAP_FAILED -1
81 #error "Zone collector requires mmap"
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
87 #define prefetch(X) __builtin_prefetch (X)
91 If we track inter-zone pointers, we can mark single zones at a
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
129 #ifdef COOKIE_CHECKING
130 #define CHUNK_MAGIC 0x95321123
131 #define DEADCHUNK_MAGIC 0x12817317
134 /* This structure manages small chunks. When the chunk is free, it's
135 linked with other chunks via free_next. When the chunk is allocated,
136 the data starts at u. Large chunks are allocated one at a time to
137 their own page, and so don't come in here.
139 The "type" field is a placeholder for a future change to do
140 generational collection. At present it is 0 when free and
141 and 1 when allocated. */
144 #ifdef COOKIE_CHECKING
148 unsigned int typecode
:14;
149 unsigned int large
:1;
150 unsigned int size
:15;
153 struct alloc_chunk
*next_free
;
156 /* Make sure the data is sufficiently aligned. */
157 HOST_WIDEST_INT align_i
;
158 #ifdef HAVE_LONG_DOUBLE
164 } __attribute__ ((packed
));
166 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
168 /* We maintain several bins of free lists for chunks for very small
169 objects. We never exhaustively search other bins -- if we don't
170 find one of the proper size, we allocate from the "larger" bin. */
172 /* Decreasing the number of free bins increases the time it takes to allocate.
173 Similar with increasing max_free_bin_size without increasing num_free_bins.
175 After much histogramming of allocation sizes and time spent on gc,
176 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
177 these were determined to be the optimal values. */
178 #define NUM_FREE_BINS 64
179 #define MAX_FREE_BIN_SIZE 256
180 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
181 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
182 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
184 /* Marker used as chunk->size for a large object. Should correspond
185 to the size of the bitfield above. */
186 #define LARGE_OBJECT_SIZE 0x7fff
188 /* We use this structure to determine the alignment required for
189 allocations. For power-of-two sized allocations, that's not a
190 problem, but it does matter for odd-sized allocations. */
192 struct max_alignment
{
196 #ifdef HAVE_LONG_DOUBLE
204 /* The biggest alignment required. */
206 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
208 /* Compute the smallest nonnegative number which when added to X gives
211 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 /* Compute the smallest multiple of F that is >= X. */
215 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
218 /* A page_entry records the status of an allocation page. */
219 typedef struct page_entry
221 /* The next page-entry with objects of the same size, or NULL if
222 this is the last page-entry. */
223 struct page_entry
*next
;
225 /* The number of bytes allocated. (This will always be a multiple
226 of the host system page size.) */
229 /* How many collections we've survived. */
232 /* The address at which the memory is allocated. */
235 /* Context depth of this page. */
236 unsigned short context_depth
;
238 /* Does this page contain small objects, or one large object? */
241 /* The zone that this page entry belongs to. */
242 struct alloc_zone
*zone
;
246 /* The global variables. */
247 static struct globals
249 /* The linked list of zones. */
250 struct alloc_zone
*zones
;
252 /* The system's page size. */
256 /* A file descriptor open to /dev/zero for reading. */
257 #if defined (HAVE_MMAP_DEV_ZERO)
261 /* The file descriptor for debugging output. */
265 /* The zone allocation structure. */
268 /* Name of the zone. */
271 /* Linked list of pages in a zone. */
274 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
275 FREE_BIN_DELTA. All other chunks are in slot 0. */
276 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
278 /* Bytes currently allocated. */
281 /* Bytes currently allocated at the end of the last collection. */
282 size_t allocated_last_gc
;
284 /* Total amount of memory mapped. */
287 /* Bit N set if any allocations have been done at context depth N. */
288 unsigned long context_depth_allocations
;
290 /* Bit N set if any collections have been done at context depth N. */
291 unsigned long context_depth_collections
;
293 /* The current depth in the context stack. */
294 unsigned short context_depth
;
296 /* A cache of free system pages. */
297 page_entry
*free_pages
;
299 /* Next zone in the linked list of zones. */
300 struct alloc_zone
*next_zone
;
302 /* True if this zone was collected during this collection. */
305 /* True if this zone should be destroyed after the next collection. */
309 struct alloc_zone
*rtl_zone
;
310 struct alloc_zone
*garbage_zone
;
311 struct alloc_zone
*tree_zone
;
313 /* Allocate pages in chunks of this size, to throttle calls to memory
314 allocation routines. The first page is used, the rest go onto the
315 free list. This cannot be larger than HOST_BITS_PER_INT for the
316 in_use bitmask for page_group. */
317 #define GGC_QUIRE_SIZE 16
319 static int ggc_allocated_p (const void *);
321 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
323 static struct page_entry
* alloc_small_page ( struct alloc_zone
*);
324 static struct page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
325 static void free_chunk (struct alloc_chunk
*, size_t, struct alloc_zone
*);
326 static void free_page (struct page_entry
*);
327 static void release_pages (struct alloc_zone
*);
328 static void sweep_pages (struct alloc_zone
*);
329 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone
*, short);
330 static bool ggc_collect_1 (struct alloc_zone
*, bool);
331 static void check_cookies (void);
334 /* Returns nonzero if P was allocated in GC'able memory. */
337 ggc_allocated_p (const void *p
)
339 struct alloc_chunk
*chunk
;
340 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
341 #ifdef COOKIE_CHECKING
342 if (chunk
->magic
!= CHUNK_MAGIC
)
345 if (chunk
->type
== 1)
352 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
353 (if non-null). The ifdef structure here is intended to cause a
354 compile error unless exactly one of the HAVE_* is defined. */
357 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
359 #ifdef HAVE_MMAP_ANON
360 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
361 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
363 #ifdef HAVE_MMAP_DEV_ZERO
364 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
365 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
367 VALGRIND_MALLOCLIKE_BLOCK(page
, size
, 0, 0);
369 if (page
== (char *) MAP_FAILED
)
371 perror ("virtual memory exhausted");
372 exit (FATAL_EXIT_CODE
);
375 /* Remember that we allocated this memory. */
376 zone
->bytes_mapped
+= size
;
377 /* Pretend we don't have access to the allocated pages. We'll enable
378 access to smaller pieces of the area in ggc_alloc. Discard the
379 handle to avoid handle leak. */
380 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page
, size
));
385 /* Allocate a new page for allocating objects of size 2^ORDER,
386 and return an entry for it. */
388 static inline struct page_entry
*
389 alloc_small_page (struct alloc_zone
*zone
)
391 struct page_entry
*entry
;
396 /* Check the list of free pages for one we can use. */
397 entry
= zone
->free_pages
;
400 /* Recycle the allocated memory from this page ... */
401 zone
->free_pages
= entry
->next
;
409 /* We want just one page. Allocate a bunch of them and put the
410 extras on the freelist. (Can only do this optimization with
411 mmap for backing store.) */
412 struct page_entry
*e
, *f
= zone
->free_pages
;
415 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, zone
);
417 /* This loop counts down so that the chain will be in ascending
419 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
421 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
422 e
->bytes
= G
.pagesize
;
423 e
->page
= page
+ (i
<< G
.lg_pagesize
);
428 zone
->free_pages
= f
;
432 entry
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
435 entry
->bytes
= G
.pagesize
;
437 entry
->context_depth
= zone
->context_depth
;
438 entry
->large_p
= false;
440 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
442 if (GGC_DEBUG_LEVEL
>= 2)
443 fprintf (G
.debug_file
,
444 "Allocating %s page at %p, data %p-%p\n", entry
->zone
->name
,
445 (PTR
) entry
, page
, page
+ G
.pagesize
- 1);
449 /* Compute the smallest multiple of F that is >= X. */
451 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
453 /* Allocate a large page of size SIZE in ZONE. */
455 static inline struct page_entry
*
456 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
458 struct page_entry
*entry
;
460 size
= ROUND_UP (size
, 1024);
461 page
= (char *) xmalloc (size
+ CHUNK_OVERHEAD
+ sizeof (struct page_entry
));
462 entry
= (struct page_entry
*) (page
+ size
+ CHUNK_OVERHEAD
);
467 entry
->context_depth
= zone
->context_depth
;
468 entry
->large_p
= true;
470 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
472 if (GGC_DEBUG_LEVEL
>= 2)
473 fprintf (G
.debug_file
,
474 "Allocating %s large page at %p, data %p-%p\n", entry
->zone
->name
,
475 (PTR
) entry
, page
, page
+ size
- 1);
481 /* For a page that is no longer needed, put it on the free page list. */
484 free_page (page_entry
*entry
)
486 if (GGC_DEBUG_LEVEL
>= 2)
487 fprintf (G
.debug_file
,
488 "Deallocating %s page at %p, data %p-%p\n", entry
->zone
->name
, (PTR
) entry
,
489 entry
->page
, entry
->page
+ entry
->bytes
- 1);
494 VALGRIND_FREELIKE_BLOCK (entry
->page
, entry
->bytes
);
498 /* Mark the page as inaccessible. Discard the handle to
499 avoid handle leak. */
500 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry
->page
, entry
->bytes
));
502 entry
->next
= entry
->zone
->free_pages
;
503 entry
->zone
->free_pages
= entry
;
507 /* Release the free page cache to the system. */
510 release_pages (struct alloc_zone
*zone
)
513 page_entry
*p
, *next
;
517 /* Gather up adjacent pages so they are unmapped together. */
518 p
= zone
->free_pages
;
528 while (p
&& p
->page
== start
+ len
)
537 zone
->bytes_mapped
-= len
;
540 zone
->free_pages
= NULL
;
544 /* Place CHUNK of size SIZE on the free list for ZONE. */
547 free_chunk (struct alloc_chunk
*chunk
, size_t size
, struct alloc_zone
*zone
)
551 bin
= SIZE_BIN_DOWN (size
);
554 if (bin
> NUM_FREE_BINS
)
556 #ifdef COOKIE_CHECKING
557 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
559 chunk
->magic
= DEADCHUNK_MAGIC
;
561 chunk
->u
.next_free
= zone
->free_chunks
[bin
];
562 zone
->free_chunks
[bin
] = chunk
;
563 if (GGC_DEBUG_LEVEL
>= 3)
564 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
565 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk
, sizeof (struct alloc_chunk
)));
568 /* Allocate a chunk of memory of SIZE bytes. */
571 ggc_alloc_zone_1 (size_t size
, struct alloc_zone
*zone
, short type
)
575 struct page_entry
*entry
;
576 struct alloc_chunk
*chunk
, *lchunk
, **pp
;
579 /* Align size, so that we're assured of aligned allocations. */
580 if (size
< FREE_BIN_DELTA
)
581 size
= FREE_BIN_DELTA
;
582 size
= (size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
584 /* Large objects are handled specially. */
585 if (size
>= G
.pagesize
- 2*CHUNK_OVERHEAD
- FREE_BIN_DELTA
)
587 size
= ROUND_UP (size
, 1024);
588 entry
= alloc_large_page (size
, zone
);
590 entry
->next
= entry
->zone
->pages
;
591 entry
->zone
->pages
= entry
;
593 chunk
= (struct alloc_chunk
*) entry
->page
;
594 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
596 chunk
->size
= CEIL (size
, 1024);
601 /* First look for a tiny object already segregated into its own
603 bin
= SIZE_BIN_UP (size
);
604 if (bin
<= NUM_FREE_BINS
)
606 chunk
= zone
->free_chunks
[bin
];
609 zone
->free_chunks
[bin
] = chunk
->u
.next_free
;
610 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
615 /* Failing that, look through the "other" bucket for a chunk
616 that is large enough. */
617 pp
= &(zone
->free_chunks
[0]);
619 while (chunk
&& chunk
->size
< size
)
621 pp
= &chunk
->u
.next_free
;
625 /* Failing that, allocate new storage. */
628 entry
= alloc_small_page (zone
);
629 entry
->next
= entry
->zone
->pages
;
630 entry
->zone
->pages
= entry
;
632 chunk
= (struct alloc_chunk
*) entry
->page
;
633 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
634 chunk
->size
= G
.pagesize
- CHUNK_OVERHEAD
;
639 *pp
= chunk
->u
.next_free
;
640 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
643 /* Release extra memory from a chunk that's too big. */
644 lsize
= chunk
->size
- size
;
645 if (lsize
>= CHUNK_OVERHEAD
+ FREE_BIN_DELTA
)
647 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
650 lsize
-= CHUNK_OVERHEAD
;
651 lchunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ size
);
652 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk
, sizeof (struct alloc_chunk
)));
653 #ifdef COOKIE_CHECKING
654 lchunk
->magic
= CHUNK_MAGIC
;
658 lchunk
->size
= lsize
;
660 free_chunk (lchunk
, lsize
, zone
);
662 /* Calculate the object's address. */
664 #ifdef COOKIE_CHECKING
665 chunk
->magic
= CHUNK_MAGIC
;
669 chunk
->typecode
= type
;
670 result
= chunk
->u
.data
;
672 #ifdef ENABLE_GC_CHECKING
673 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
674 exact same semantics in presence of memory bugs, regardless of
675 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
676 handle to avoid handle leak. */
677 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
679 /* `Poison' the entire allocated object. */
680 memset (result
, 0xaf, size
);
683 /* Tell Valgrind that the memory is there, but its content isn't
684 defined. The bytes at the end of the object are still marked
686 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
688 /* Keep track of how many bytes are being allocated. This
689 information is used in deciding when to collect. */
690 zone
->allocated
+= size
+ CHUNK_OVERHEAD
;
692 if (GGC_DEBUG_LEVEL
>= 3)
693 fprintf (G
.debug_file
, "Allocating object, chunk=%p size=%lu at %p\n",
694 (void *)chunk
, (unsigned long) size
, result
);
699 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
703 ggc_alloc_typed (enum gt_types_enum gte
, size_t size
)
707 case gt_ggc_e_14lang_tree_node
:
708 return ggc_alloc_zone_1 (size
, tree_zone
, gte
);
710 case gt_ggc_e_7rtx_def
:
711 return ggc_alloc_zone_1 (size
, rtl_zone
, gte
);
713 case gt_ggc_e_9rtvec_def
:
714 return ggc_alloc_zone_1 (size
, rtl_zone
, gte
);
717 return ggc_alloc_zone_1 (size
, &main_zone
, gte
);
721 /* Normal ggc_alloc simply allocates into the main zone. */
724 ggc_alloc (size_t size
)
726 return ggc_alloc_zone_1 (size
, &main_zone
, -1);
729 /* Zone allocation allocates into the specified zone. */
732 ggc_alloc_zone (size_t size
, struct alloc_zone
*zone
)
734 return ggc_alloc_zone_1 (size
, zone
, -1);
737 /* If P is not marked, mark it and return false. Otherwise return true.
738 P must have been allocated by the GC allocator; it mustn't point to
739 static objects, stack variables, or memory allocated with malloc. */
742 ggc_set_mark (const void *p
)
744 struct alloc_chunk
*chunk
;
746 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
747 #ifdef COOKIE_CHECKING
748 if (chunk
->magic
!= CHUNK_MAGIC
)
755 if (GGC_DEBUG_LEVEL
>= 4)
756 fprintf (G
.debug_file
, "Marking %p\n", p
);
761 /* Return 1 if P has been marked, zero otherwise.
762 P must have been allocated by the GC allocator; it mustn't point to
763 static objects, stack variables, or memory allocated with malloc. */
766 ggc_marked_p (const void *p
)
768 struct alloc_chunk
*chunk
;
770 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
771 #ifdef COOKIE_CHECKING
772 if (chunk
->magic
!= CHUNK_MAGIC
)
778 /* Return the size of the gc-able object P. */
781 ggc_get_size (const void *p
)
783 struct alloc_chunk
*chunk
;
785 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
786 #ifdef COOKIE_CHECKING
787 if (chunk
->magic
!= CHUNK_MAGIC
)
791 return chunk
->size
* 1024;
796 /* Initialize the ggc-zone-mmap allocator. */
800 /* Set up the main zone by hand. */
801 main_zone
.name
= "Main zone";
802 G
.zones
= &main_zone
;
804 /* Allocate the default zones. */
805 rtl_zone
= new_ggc_zone ("RTL zone");
806 tree_zone
= new_ggc_zone ("Tree zone");
807 garbage_zone
= new_ggc_zone ("Garbage zone");
809 G
.pagesize
= getpagesize();
810 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
811 #ifdef HAVE_MMAP_DEV_ZERO
812 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
813 if (G
.dev_zero_fd
== -1)
818 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
819 setlinebuf (G
.debug_file
);
821 G
.debug_file
= stdout
;
825 /* StunOS has an amazing off-by-one error for the first mmap allocation
826 after fiddling with RLIMIT_STACK. The result, as hard as it is to
827 believe, is an unaligned page allocation, which would cause us to
828 hork badly if we tried to use it. */
830 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
831 struct page_entry
*e
;
832 if ((size_t)p
& (G
.pagesize
- 1))
834 /* How losing. Discard this one and try another. If we still
835 can't get something useful, give up. */
837 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
838 if ((size_t)p
& (G
.pagesize
- 1))
842 /* We have a good page, might as well hold onto it... */
843 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
844 e
->bytes
= G
.pagesize
;
846 e
->next
= main_zone
.free_pages
;
847 main_zone
.free_pages
= e
;
852 /* Start a new GGC zone. */
855 new_ggc_zone (const char * name
)
857 struct alloc_zone
*new_zone
= xcalloc (1, sizeof (struct alloc_zone
));
858 new_zone
->name
= name
;
859 new_zone
->next_zone
= G
.zones
->next_zone
;
860 G
.zones
->next_zone
= new_zone
;
864 /* Destroy a GGC zone. */
866 destroy_ggc_zone (struct alloc_zone
* dead_zone
)
868 struct alloc_zone
*z
;
870 for (z
= G
.zones
; z
&& z
->next_zone
!= dead_zone
; z
= z
->next_zone
)
871 /* Just find that zone. */ ;
873 #ifdef ENABLE_CHECKING
874 /* We should have found the zone in the list. Anything else is fatal. */
879 /* z is dead, baby. z is dead. */
883 /* Increment the `GC context'. Objects allocated in an outer context
884 are never freed, eliminating the need to register their roots. */
887 ggc_push_context (void)
889 struct alloc_zone
*zone
;
890 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
891 ++(zone
->context_depth
);
893 if (main_zone
.context_depth
>= HOST_BITS_PER_LONG
)
897 /* Decrement the `GC context'. All objects allocated since the
898 previous ggc_push_context are migrated to the outer context. */
901 ggc_pop_context_1 (struct alloc_zone
*zone
)
907 depth
= --(zone
->context_depth
);
908 omask
= (unsigned long)1 << (depth
+ 1);
910 if (!((zone
->context_depth_allocations
| zone
->context_depth_collections
) & omask
))
913 zone
->context_depth_allocations
|= (zone
->context_depth_allocations
& omask
) >> 1;
914 zone
->context_depth_allocations
&= omask
- 1;
915 zone
->context_depth_collections
&= omask
- 1;
917 /* Any remaining pages in the popped context are lowered to the new
918 current context; i.e. objects allocated in the popped context and
919 left over are imported into the previous context. */
920 for (p
= zone
->pages
; p
!= NULL
; p
= p
->next
)
921 if (p
->context_depth
> depth
)
922 p
->context_depth
= depth
;
925 /* Pop all the zone contexts. */
928 ggc_pop_context (void)
930 struct alloc_zone
*zone
;
931 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
932 ggc_pop_context_1 (zone
);
934 /* Poison the chunk. */
935 #ifdef ENABLE_GC_CHECKING
936 #define poison_chunk(CHUNK, SIZE) \
937 memset ((CHUNK)->u.data, 0xa5, (SIZE))
939 #define poison_chunk(CHUNK, SIZE)
942 /* Free all empty pages and objects within a page for a given zone */
945 sweep_pages (struct alloc_zone
*zone
)
947 page_entry
**pp
, *p
, *next
;
948 struct alloc_chunk
*chunk
, *last_free
, *end
;
949 size_t last_free_size
, allocated
= 0;
951 /* First, reset the free_chunks lists, since we are going to
952 re-free free chunks in hopes of coalescing them into large chunks. */
953 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
955 for (p
= zone
->pages
; p
; p
= next
)
958 /* Large pages are all or none affairs. Either they are
959 completely empty, or they are completely full.
961 XXX: Should we bother to increment allocated. */
964 if (((struct alloc_chunk
*)p
->page
)->mark
== 1)
966 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
971 #ifdef ENABLE_GC_CHECKING
972 /* Poison the page. */
973 memset (p
->page
, 0xb5, p
->bytes
);
980 /* This page has now survived another collection. */
983 /* Which leaves full and partial pages. Step through all chunks,
984 consolidate those that are free and insert them into the free
985 lists. Note that consolidation slows down collection
988 chunk
= (struct alloc_chunk
*)p
->page
;
989 end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
992 nomarksinpage
= true;
995 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
996 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
998 nomarksinpage
= false;
1001 last_free
->type
= 0;
1002 last_free
->size
= last_free_size
;
1003 last_free
->mark
= 0;
1004 poison_chunk (last_free
, last_free_size
);
1005 free_chunk (last_free
, last_free_size
, zone
);
1010 allocated
+= chunk
->size
+ CHUNK_OVERHEAD
;
1018 last_free_size
+= CHUNK_OVERHEAD
+ chunk
->size
;
1023 last_free_size
= chunk
->size
;
1027 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1029 while (chunk
< end
);
1034 #ifdef ENABLE_GC_CHECKING
1035 /* Poison the page. */
1036 memset (p
->page
, 0xb5, p
->bytes
);
1043 last_free
->type
= 0;
1044 last_free
->size
= last_free_size
;
1045 last_free
->mark
= 0;
1046 poison_chunk (last_free
, last_free_size
);
1047 free_chunk (last_free
, last_free_size
, zone
);
1052 zone
->allocated
= allocated
;
1055 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1056 is true if we need to mark before sweeping, false if some other
1057 zone collection has already performed marking for us. Returns true
1058 if we collected, false otherwise. */
1061 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1065 /* Avoid frequent unnecessary work by skipping collection if the
1066 total allocations haven't expanded much since the last
1068 float allocated_last_gc
=
1069 MAX (zone
->allocated_last_gc
,
1070 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1072 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1074 if (zone
->allocated
< allocated_last_gc
+ min_expand
)
1079 fprintf (stderr
, " {%s GC %luk -> ",
1080 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1082 /* Zero the total allocated bytes. This will be recalculated in the
1084 zone
->allocated
= 0;
1086 /* Release the pages we freed the last time we collected, but didn't
1087 reuse in the interim. */
1088 release_pages (zone
);
1090 /* Indicate that we've seen collections at this context depth. */
1091 zone
->context_depth_collections
1092 = ((unsigned long)1 << (zone
->context_depth
+ 1)) - 1;
1096 zone
->was_collected
= true;
1097 zone
->allocated_last_gc
= zone
->allocated
;
1100 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1104 /* Calculate the average page survival rate in terms of number of
1108 calculate_average_page_survival (struct alloc_zone
*zone
)
1111 float survival
= 0.0;
1113 for (p
= zone
->pages
; p
; p
= p
->next
)
1116 survival
+= p
->survived
;
1118 return survival
/count
;
1121 /* Check the magic cookies all of the chunks contain, to make sure we
1122 aren't doing anything stupid, like stomping on alloc_chunk
1126 check_cookies (void)
1128 #ifdef COOKIE_CHECKING
1130 struct alloc_zone
*zone
;
1132 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1134 for (p
= zone
->pages
; p
; p
= p
->next
)
1138 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1139 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1142 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
1144 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1146 while (chunk
< end
);
1152 /* Top level collection routine. */
1157 struct alloc_zone
*zone
;
1158 bool marked
= false;
1161 timevar_push (TV_GC
);
1163 /* Start by possibly collecting the main zone. */
1164 main_zone
.was_collected
= false;
1165 marked
|= ggc_collect_1 (&main_zone
, true);
1167 /* In order to keep the number of collections down, we don't
1168 collect other zones unless we are collecting the main zone. This
1169 gives us roughly the same number of collections as we used to
1170 have with the old gc. The number of collection is important
1171 because our main slowdown (according to profiling) is now in
1172 marking. So if we mark twice as often as we used to, we'll be
1173 twice as slow. Hopefully we'll avoid this cost when we mark
1176 if (main_zone
.was_collected
)
1178 struct alloc_zone
*zone
;
1180 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
1183 zone
->was_collected
= false;
1184 marked
|= ggc_collect_1 (zone
, !marked
);
1188 /* Print page survival stats, if someone wants them. */
1189 if (GGC_DEBUG_LEVEL
>= 2)
1191 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1193 if (zone
->was_collected
)
1195 f
= calculate_average_page_survival (zone
);
1196 printf ("Average page survival in zone `%s' is %f\n",
1202 /* Since we don't mark zone at a time right now, marking in any
1203 zone means marking in every zone. So we have to clear all the
1204 marks in all the zones that weren't collected already. */
1208 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1210 if (zone
->was_collected
)
1212 for (p
= zone
->pages
; p
; p
= p
->next
)
1216 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1217 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1220 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1221 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1225 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1227 while (chunk
< end
);
1231 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1237 /* Free dead zones. */
1238 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
1240 if (zone
->next_zone
->dead
)
1242 struct alloc_zone
*dead_zone
= zone
->next_zone
;
1244 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
1246 /* The zone must be empty. */
1247 if (dead_zone
->allocated
!= 0)
1250 /* Unchain the dead zone, release all its pages and free it. */
1251 zone
->next_zone
= zone
->next_zone
->next_zone
;
1252 release_pages (dead_zone
);
1257 timevar_pop (TV_GC
);
1260 /* Print allocation statistics. */
1263 ggc_print_statistics (void)
1269 struct ggc_pch_ondisk
1277 /* Initialize the PCH datastructure. */
1279 struct ggc_pch_data
*
1282 return xcalloc (sizeof (struct ggc_pch_data
), 1);
1285 /* Add the size of object X to the size of the PCH data. */
1288 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
1289 size_t size
, bool is_string
)
1293 d
->d
.total
+= size
+ CHUNK_OVERHEAD
;
1299 /* Return the total size of the PCH data. */
1302 ggc_pch_total_size (struct ggc_pch_data
*d
)
1307 /* Set the base address for the objects in the PCH file. */
1310 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
1312 d
->base
= (size_t) base
;
1315 /* Allocate a place for object X of size SIZE in the PCH file. */
1318 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
1319 size_t size
, bool is_string
)
1322 result
= (char *)d
->base
;
1325 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1327 d
->base
+= ggc_get_size (x
) + CHUNK_OVERHEAD
;
1329 d
->base
+= chunk
->size
+ CHUNK_OVERHEAD
;
1330 return result
+ CHUNK_OVERHEAD
;
1340 /* Prepare to write out the PCH data to file F. */
1343 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1344 FILE *f ATTRIBUTE_UNUSED
)
1346 /* Nothing to do. */
1349 /* Write out object X of SIZE to file F. */
1352 ggc_pch_write_object (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1353 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
1354 size_t size
, bool is_string
)
1358 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1359 size
= ggc_get_size (x
);
1360 if (fwrite (chunk
, size
+ CHUNK_OVERHEAD
, 1, f
) != 1)
1361 fatal_error ("can't write PCH file: %m");
1362 d
->written
+= size
+ CHUNK_OVERHEAD
;
1366 if (fwrite (x
, size
, 1, f
) != 1)
1367 fatal_error ("can't write PCH file: %m");
1370 if (d
->written
== d
->d
.total
1371 && fseek (f
, ROUND_UP_VALUE (d
->d
.total
, G
.pagesize
), SEEK_CUR
) != 0)
1372 fatal_error ("can't write PCH file: %m");
1376 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
1378 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
1379 fatal_error ("can't write PCH file: %m");
1383 ggc_pch_read (FILE *f
, void *addr
)
1385 struct ggc_pch_ondisk d
;
1386 struct page_entry
*entry
;
1387 struct alloc_zone
*pch_zone
;
1388 if (fread (&d
, sizeof (d
), 1, f
) != 1)
1389 fatal_error ("can't read PCH file: %m");
1390 entry
= xcalloc (1, sizeof (struct page_entry
));
1391 entry
->bytes
= d
.total
;
1393 entry
->context_depth
= 0;
1394 pch_zone
= new_ggc_zone ("PCH zone");
1395 entry
->zone
= pch_zone
;
1396 entry
->next
= entry
->zone
->pages
;
1397 entry
->zone
->pages
= entry
;