1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 # include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 # include <memcheck.h>
46 # include <valgrind.h>
49 /* Avoid #ifdef:s when we can help it. */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55 file open. Prefer either to valloc. */
57 # undef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
61 # define MAP_FAILED -1
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 # define MAP_ANONYMOUS MAP_ANON
70 #ifdef HAVE_MMAP_DEV_ZERO
72 # include <sys/mman.h>
74 # define MAP_FAILED -1
81 #error "Zone collector requires mmap"
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
87 #define prefetch(X) __builtin_prefetch (X)
91 If we track inter-zone pointers, we can mark single zones at a
93 If we have a zone where we guarantee no inter-zone pointers, we
94 could mark that zone separately.
95 The garbage zone should not be marked, and we should return 1 in
96 ggc_set_mark for any object in the garbage zone, which cuts off
100 This garbage-collecting allocator segregates objects into zones.
101 It also segregates objects into "large" and "small" bins. Large
102 objects are greater or equal to page size.
104 Pages for small objects are broken up into chunks, each of which
105 are described by a struct alloc_chunk. One can walk over all
106 chunks on the page by adding the chunk size to the chunk's data
107 address. The free space for a page exists in the free chunk bins.
109 Each page-entry also has a context depth, which is used to track
110 pushing and popping of allocation contexts. Only objects allocated
111 in the current (highest-numbered) context may be collected.
113 Empty pages (of all sizes) are kept on a single page cache list,
114 and are considered first when new pages are required; they are
115 deallocated at the start of the next collection if they haven't
116 been recycled by then. */
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119 0: No debugging output.
120 1: GC statistics only.
121 2: Page-entry allocations/deallocations as well.
122 3: Object allocations as well.
123 4: Object marks as well. */
124 #define GGC_DEBUG_LEVEL (0)
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
130 #ifdef COOKIE_CHECKING
131 #define CHUNK_MAGIC 0x95321123
132 #define DEADCHUNK_MAGIC 0x12817317
135 /* This structure manages small chunks. When the chunk is free, it's
136 linked with other chunks via free_next. When the chunk is allocated,
137 the data starts at u. Large chunks are allocated one at a time to
138 their own page, and so don't come in here.
140 The "type" field is a placeholder for a future change to do
141 generational collection. At present it is 0 when free and
142 and 1 when allocated. */
145 #ifdef COOKIE_CHECKING
149 unsigned int typecode
:14;
150 unsigned int large
:1;
151 unsigned int size
:15;
154 struct alloc_chunk
*next_free
;
157 /* Make sure the data is sufficiently aligned. */
158 HOST_WIDEST_INT align_i
;
159 #ifdef HAVE_LONG_DOUBLE
165 } __attribute__ ((packed
));
167 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
169 /* We maintain several bins of free lists for chunks for very small
170 objects. We never exhaustively search other bins -- if we don't
171 find one of the proper size, we allocate from the "larger" bin. */
173 /* Decreasing the number of free bins increases the time it takes to allocate.
174 Similar with increasing max_free_bin_size without increasing num_free_bins.
176 After much histogramming of allocation sizes and time spent on gc,
177 on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
178 these were determined to be the optimal values. */
179 #define NUM_FREE_BINS 64
180 #define MAX_FREE_BIN_SIZE 256
181 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
182 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
183 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
185 /* Marker used as chunk->size for a large object. Should correspond
186 to the size of the bitfield above. */
187 #define LARGE_OBJECT_SIZE 0x7fff
189 /* We use this structure to determine the alignment required for
190 allocations. For power-of-two sized allocations, that's not a
191 problem, but it does matter for odd-sized allocations. */
193 struct max_alignment
{
197 #ifdef HAVE_LONG_DOUBLE
205 /* The biggest alignment required. */
207 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
209 /* Compute the smallest nonnegative number which when added to X gives
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 /* Compute the smallest multiple of F that is >= X. */
216 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
219 /* A page_entry records the status of an allocation page. */
220 typedef struct page_entry
222 /* The next page-entry with objects of the same size, or NULL if
223 this is the last page-entry. */
224 struct page_entry
*next
;
226 /* The number of bytes allocated. (This will always be a multiple
227 of the host system page size.) */
230 /* How many collections we've survived. */
233 /* The address at which the memory is allocated. */
236 /* Context depth of this page. */
237 unsigned short context_depth
;
239 /* Does this page contain small objects, or one large object? */
242 /* The zone that this page entry belongs to. */
243 struct alloc_zone
*zone
;
247 /* The global variables. */
248 static struct globals
250 /* The linked list of zones. */
251 struct alloc_zone
*zones
;
253 /* The system's page size. */
257 /* A file descriptor open to /dev/zero for reading. */
258 #if defined (HAVE_MMAP_DEV_ZERO)
262 /* The file descriptor for debugging output. */
266 /* The zone allocation structure. */
269 /* Name of the zone. */
272 /* Linked list of pages in a zone. */
275 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
276 FREE_BIN_DELTA. All other chunks are in slot 0. */
277 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
279 /* Bytes currently allocated. */
282 /* Bytes currently allocated at the end of the last collection. */
283 size_t allocated_last_gc
;
285 /* Total amount of memory mapped. */
288 /* Bit N set if any allocations have been done at context depth N. */
289 unsigned long context_depth_allocations
;
291 /* Bit N set if any collections have been done at context depth N. */
292 unsigned long context_depth_collections
;
294 /* The current depth in the context stack. */
295 unsigned short context_depth
;
297 /* A cache of free system pages. */
298 page_entry
*free_pages
;
300 /* Next zone in the linked list of zones. */
301 struct alloc_zone
*next_zone
;
303 /* True if this zone was collected during this collection. */
306 /* True if this zone should be destroyed after the next collection. */
310 struct alloc_zone
*rtl_zone
;
311 struct alloc_zone
*garbage_zone
;
312 struct alloc_zone
*tree_zone
;
314 /* Allocate pages in chunks of this size, to throttle calls to memory
315 allocation routines. The first page is used, the rest go onto the
316 free list. This cannot be larger than HOST_BITS_PER_INT for the
317 in_use bitmask for page_group. */
318 #define GGC_QUIRE_SIZE 16
320 static int ggc_allocated_p (const void *);
322 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
324 static struct page_entry
* alloc_small_page ( struct alloc_zone
*);
325 static struct page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
326 static void free_chunk (struct alloc_chunk
*, size_t, struct alloc_zone
*);
327 static void free_page (struct page_entry
*);
328 static void release_pages (struct alloc_zone
*);
329 static void sweep_pages (struct alloc_zone
*);
330 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone
*, short MEM_STAT_DECL
);
331 static bool ggc_collect_1 (struct alloc_zone
*, bool);
332 static void check_cookies (void);
335 /* Returns nonzero if P was allocated in GC'able memory. */
338 ggc_allocated_p (const void *p
)
340 struct alloc_chunk
*chunk
;
341 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
342 #ifdef COOKIE_CHECKING
343 if (chunk
->magic
!= CHUNK_MAGIC
)
346 if (chunk
->type
== 1)
353 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
354 (if non-null). The ifdef structure here is intended to cause a
355 compile error unless exactly one of the HAVE_* is defined. */
358 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
360 #ifdef HAVE_MMAP_ANON
361 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
362 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
364 #ifdef HAVE_MMAP_DEV_ZERO
365 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
366 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
368 VALGRIND_MALLOCLIKE_BLOCK(page
, size
, 0, 0);
370 if (page
== (char *) MAP_FAILED
)
372 perror ("virtual memory exhausted");
373 exit (FATAL_EXIT_CODE
);
376 /* Remember that we allocated this memory. */
377 zone
->bytes_mapped
+= size
;
378 /* Pretend we don't have access to the allocated pages. We'll enable
379 access to smaller pieces of the area in ggc_alloc. Discard the
380 handle to avoid handle leak. */
381 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page
, size
));
386 /* Allocate a new page for allocating objects of size 2^ORDER,
387 and return an entry for it. */
389 static inline struct page_entry
*
390 alloc_small_page (struct alloc_zone
*zone
)
392 struct page_entry
*entry
;
397 /* Check the list of free pages for one we can use. */
398 entry
= zone
->free_pages
;
401 /* Recycle the allocated memory from this page ... */
402 zone
->free_pages
= entry
->next
;
410 /* We want just one page. Allocate a bunch of them and put the
411 extras on the freelist. (Can only do this optimization with
412 mmap for backing store.) */
413 struct page_entry
*e
, *f
= zone
->free_pages
;
416 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, zone
);
418 /* This loop counts down so that the chain will be in ascending
420 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
422 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
423 e
->bytes
= G
.pagesize
;
424 e
->page
= page
+ (i
<< G
.lg_pagesize
);
429 zone
->free_pages
= f
;
433 entry
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
436 entry
->bytes
= G
.pagesize
;
438 entry
->context_depth
= zone
->context_depth
;
439 entry
->large_p
= false;
441 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
443 if (GGC_DEBUG_LEVEL
>= 2)
444 fprintf (G
.debug_file
,
445 "Allocating %s page at %p, data %p-%p\n", entry
->zone
->name
,
446 (PTR
) entry
, page
, page
+ G
.pagesize
- 1);
450 /* Compute the smallest multiple of F that is >= X. */
452 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
454 /* Allocate a large page of size SIZE in ZONE. */
456 static inline struct page_entry
*
457 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
459 struct page_entry
*entry
;
461 size
= ROUND_UP (size
, 1024);
462 page
= (char *) xmalloc (size
+ CHUNK_OVERHEAD
+ sizeof (struct page_entry
));
463 entry
= (struct page_entry
*) (page
+ size
+ CHUNK_OVERHEAD
);
468 entry
->context_depth
= zone
->context_depth
;
469 entry
->large_p
= true;
471 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
473 if (GGC_DEBUG_LEVEL
>= 2)
474 fprintf (G
.debug_file
,
475 "Allocating %s large page at %p, data %p-%p\n", entry
->zone
->name
,
476 (PTR
) entry
, page
, page
+ size
- 1);
482 /* For a page that is no longer needed, put it on the free page list. */
485 free_page (page_entry
*entry
)
487 if (GGC_DEBUG_LEVEL
>= 2)
488 fprintf (G
.debug_file
,
489 "Deallocating %s page at %p, data %p-%p\n", entry
->zone
->name
, (PTR
) entry
,
490 entry
->page
, entry
->page
+ entry
->bytes
- 1);
495 VALGRIND_FREELIKE_BLOCK (entry
->page
, entry
->bytes
);
499 /* Mark the page as inaccessible. Discard the handle to
500 avoid handle leak. */
501 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry
->page
, entry
->bytes
));
503 entry
->next
= entry
->zone
->free_pages
;
504 entry
->zone
->free_pages
= entry
;
508 /* Release the free page cache to the system. */
511 release_pages (struct alloc_zone
*zone
)
514 page_entry
*p
, *next
;
518 /* Gather up adjacent pages so they are unmapped together. */
519 p
= zone
->free_pages
;
529 while (p
&& p
->page
== start
+ len
)
538 zone
->bytes_mapped
-= len
;
541 zone
->free_pages
= NULL
;
545 /* Place CHUNK of size SIZE on the free list for ZONE. */
548 free_chunk (struct alloc_chunk
*chunk
, size_t size
, struct alloc_zone
*zone
)
552 bin
= SIZE_BIN_DOWN (size
);
555 if (bin
> NUM_FREE_BINS
)
557 #ifdef COOKIE_CHECKING
558 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
560 chunk
->magic
= DEADCHUNK_MAGIC
;
562 chunk
->u
.next_free
= zone
->free_chunks
[bin
];
563 zone
->free_chunks
[bin
] = chunk
;
564 if (GGC_DEBUG_LEVEL
>= 3)
565 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
566 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk
, sizeof (struct alloc_chunk
)));
569 /* Allocate a chunk of memory of SIZE bytes. */
572 ggc_alloc_zone_1 (size_t size
, struct alloc_zone
*zone
, short type
577 struct page_entry
*entry
;
578 struct alloc_chunk
*chunk
, *lchunk
, **pp
;
581 /* Align size, so that we're assured of aligned allocations. */
582 if (size
< FREE_BIN_DELTA
)
583 size
= FREE_BIN_DELTA
;
584 size
= (size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
586 /* Large objects are handled specially. */
587 if (size
>= G
.pagesize
- 2*CHUNK_OVERHEAD
- FREE_BIN_DELTA
)
589 size
= ROUND_UP (size
, 1024);
590 entry
= alloc_large_page (size
, zone
);
592 entry
->next
= entry
->zone
->pages
;
593 entry
->zone
->pages
= entry
;
595 chunk
= (struct alloc_chunk
*) entry
->page
;
596 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
598 chunk
->size
= CEIL (size
, 1024);
603 /* First look for a tiny object already segregated into its own
605 bin
= SIZE_BIN_UP (size
);
606 if (bin
<= NUM_FREE_BINS
)
608 chunk
= zone
->free_chunks
[bin
];
611 zone
->free_chunks
[bin
] = chunk
->u
.next_free
;
612 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
617 /* Failing that, look through the "other" bucket for a chunk
618 that is large enough. */
619 pp
= &(zone
->free_chunks
[0]);
621 while (chunk
&& chunk
->size
< size
)
623 pp
= &chunk
->u
.next_free
;
627 /* Failing that, allocate new storage. */
630 entry
= alloc_small_page (zone
);
631 entry
->next
= entry
->zone
->pages
;
632 entry
->zone
->pages
= entry
;
634 chunk
= (struct alloc_chunk
*) entry
->page
;
635 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
636 chunk
->size
= G
.pagesize
- CHUNK_OVERHEAD
;
641 *pp
= chunk
->u
.next_free
;
642 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
645 /* Release extra memory from a chunk that's too big. */
646 lsize
= chunk
->size
- size
;
647 if (lsize
>= CHUNK_OVERHEAD
+ FREE_BIN_DELTA
)
649 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
652 lsize
-= CHUNK_OVERHEAD
;
653 lchunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ size
);
654 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk
, sizeof (struct alloc_chunk
)));
655 #ifdef COOKIE_CHECKING
656 lchunk
->magic
= CHUNK_MAGIC
;
660 lchunk
->size
= lsize
;
662 free_chunk (lchunk
, lsize
, zone
);
665 #ifdef GATHER_STATISTICS
666 ggc_record_overhead (size
, lsize PASS_MEM_STAT
);
669 /* Calculate the object's address. */
671 #ifdef COOKIE_CHECKING
672 chunk
->magic
= CHUNK_MAGIC
;
676 chunk
->typecode
= type
;
677 result
= chunk
->u
.data
;
679 #ifdef ENABLE_GC_CHECKING
680 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
681 exact same semantics in presence of memory bugs, regardless of
682 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
683 handle to avoid handle leak. */
684 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
686 /* `Poison' the entire allocated object. */
687 memset (result
, 0xaf, size
);
690 /* Tell Valgrind that the memory is there, but its content isn't
691 defined. The bytes at the end of the object are still marked
693 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
695 /* Keep track of how many bytes are being allocated. This
696 information is used in deciding when to collect. */
697 zone
->allocated
+= size
+ CHUNK_OVERHEAD
;
699 if (GGC_DEBUG_LEVEL
>= 3)
700 fprintf (G
.debug_file
, "Allocating object, chunk=%p size=%lu at %p\n",
701 (void *)chunk
, (unsigned long) size
, result
);
706 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
710 ggc_alloc_typed_stat (enum gt_types_enum gte
, size_t size
715 case gt_ggc_e_14lang_tree_node
:
716 return ggc_alloc_zone_1 (size
, tree_zone
, gte PASS_MEM_STAT
);
718 case gt_ggc_e_7rtx_def
:
719 return ggc_alloc_zone_1 (size
, rtl_zone
, gte PASS_MEM_STAT
);
721 case gt_ggc_e_9rtvec_def
:
722 return ggc_alloc_zone_1 (size
, rtl_zone
, gte PASS_MEM_STAT
);
725 return ggc_alloc_zone_1 (size
, &main_zone
, gte PASS_MEM_STAT
);
729 /* Normal ggc_alloc simply allocates into the main zone. */
732 ggc_alloc_stat (size_t size MEM_STAT_DECL
)
734 return ggc_alloc_zone_1 (size
, &main_zone
, -1 PASS_MEM_STAT
);
737 /* Zone allocation allocates into the specified zone. */
740 ggc_alloc_zone_stat (size_t size
, struct alloc_zone
*zone MEM_STAT_DECL
)
742 return ggc_alloc_zone_1 (size
, zone
, -1 PASS_MEM_STAT
);
745 /* Poison the chunk. */
746 #ifdef ENABLE_GC_CHECKING
747 #define poison_chunk(CHUNK, SIZE) \
748 memset ((CHUNK)->u.data, 0xa5, (SIZE))
750 #define poison_chunk(CHUNK, SIZE)
753 /* Free the object at P. */
758 struct alloc_chunk
*chunk
;
760 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
762 /* Poison the chunk. */
763 poison_chunk (chunk
, ggc_get_size (p
));
765 /* XXX: We only deal with explicitly freeing large objects ATM. */
770 /* If P is not marked, mark it and return false. Otherwise return true.
771 P must have been allocated by the GC allocator; it mustn't point to
772 static objects, stack variables, or memory allocated with malloc. */
775 ggc_set_mark (const void *p
)
777 struct alloc_chunk
*chunk
;
779 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
780 #ifdef COOKIE_CHECKING
781 if (chunk
->magic
!= CHUNK_MAGIC
)
788 if (GGC_DEBUG_LEVEL
>= 4)
789 fprintf (G
.debug_file
, "Marking %p\n", p
);
794 /* Return 1 if P has been marked, zero otherwise.
795 P must have been allocated by the GC allocator; it mustn't point to
796 static objects, stack variables, or memory allocated with malloc. */
799 ggc_marked_p (const void *p
)
801 struct alloc_chunk
*chunk
;
803 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
804 #ifdef COOKIE_CHECKING
805 if (chunk
->magic
!= CHUNK_MAGIC
)
811 /* Return the size of the gc-able object P. */
814 ggc_get_size (const void *p
)
816 struct alloc_chunk
*chunk
;
818 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
819 #ifdef COOKIE_CHECKING
820 if (chunk
->magic
!= CHUNK_MAGIC
)
824 return chunk
->size
* 1024;
829 /* Initialize the ggc-zone-mmap allocator. */
833 /* Set up the main zone by hand. */
834 main_zone
.name
= "Main zone";
835 G
.zones
= &main_zone
;
837 /* Allocate the default zones. */
838 rtl_zone
= new_ggc_zone ("RTL zone");
839 tree_zone
= new_ggc_zone ("Tree zone");
840 garbage_zone
= new_ggc_zone ("Garbage zone");
842 G
.pagesize
= getpagesize();
843 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
844 #ifdef HAVE_MMAP_DEV_ZERO
845 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
846 if (G
.dev_zero_fd
== -1)
851 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
852 setlinebuf (G
.debug_file
);
854 G
.debug_file
= stdout
;
858 /* StunOS has an amazing off-by-one error for the first mmap allocation
859 after fiddling with RLIMIT_STACK. The result, as hard as it is to
860 believe, is an unaligned page allocation, which would cause us to
861 hork badly if we tried to use it. */
863 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
864 struct page_entry
*e
;
865 if ((size_t)p
& (G
.pagesize
- 1))
867 /* How losing. Discard this one and try another. If we still
868 can't get something useful, give up. */
870 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
871 if ((size_t)p
& (G
.pagesize
- 1))
875 /* We have a good page, might as well hold onto it... */
876 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
877 e
->bytes
= G
.pagesize
;
879 e
->next
= main_zone
.free_pages
;
880 main_zone
.free_pages
= e
;
885 /* Start a new GGC zone. */
888 new_ggc_zone (const char * name
)
890 struct alloc_zone
*new_zone
= xcalloc (1, sizeof (struct alloc_zone
));
891 new_zone
->name
= name
;
892 new_zone
->next_zone
= G
.zones
->next_zone
;
893 G
.zones
->next_zone
= new_zone
;
897 /* Destroy a GGC zone. */
899 destroy_ggc_zone (struct alloc_zone
* dead_zone
)
901 struct alloc_zone
*z
;
903 for (z
= G
.zones
; z
&& z
->next_zone
!= dead_zone
; z
= z
->next_zone
)
904 /* Just find that zone. */ ;
906 #ifdef ENABLE_CHECKING
907 /* We should have found the zone in the list. Anything else is fatal. */
912 /* z is dead, baby. z is dead. */
916 /* Increment the `GC context'. Objects allocated in an outer context
917 are never freed, eliminating the need to register their roots. */
920 ggc_push_context (void)
922 struct alloc_zone
*zone
;
923 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
924 ++(zone
->context_depth
);
926 if (main_zone
.context_depth
>= HOST_BITS_PER_LONG
)
930 /* Decrement the `GC context'. All objects allocated since the
931 previous ggc_push_context are migrated to the outer context. */
934 ggc_pop_context_1 (struct alloc_zone
*zone
)
940 depth
= --(zone
->context_depth
);
941 omask
= (unsigned long)1 << (depth
+ 1);
943 if (!((zone
->context_depth_allocations
| zone
->context_depth_collections
) & omask
))
946 zone
->context_depth_allocations
|= (zone
->context_depth_allocations
& omask
) >> 1;
947 zone
->context_depth_allocations
&= omask
- 1;
948 zone
->context_depth_collections
&= omask
- 1;
950 /* Any remaining pages in the popped context are lowered to the new
951 current context; i.e. objects allocated in the popped context and
952 left over are imported into the previous context. */
953 for (p
= zone
->pages
; p
!= NULL
; p
= p
->next
)
954 if (p
->context_depth
> depth
)
955 p
->context_depth
= depth
;
958 /* Pop all the zone contexts. */
961 ggc_pop_context (void)
963 struct alloc_zone
*zone
;
964 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
965 ggc_pop_context_1 (zone
);
968 /* Free all empty pages and objects within a page for a given zone */
971 sweep_pages (struct alloc_zone
*zone
)
973 page_entry
**pp
, *p
, *next
;
974 struct alloc_chunk
*chunk
, *last_free
, *end
;
975 size_t last_free_size
, allocated
= 0;
977 /* First, reset the free_chunks lists, since we are going to
978 re-free free chunks in hopes of coalescing them into large chunks. */
979 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
981 for (p
= zone
->pages
; p
; p
= next
)
984 /* Large pages are all or none affairs. Either they are
985 completely empty, or they are completely full.
987 XXX: Should we bother to increment allocated. */
990 if (((struct alloc_chunk
*)p
->page
)->mark
== 1)
992 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
997 #ifdef ENABLE_GC_CHECKING
998 /* Poison the page. */
999 memset (p
->page
, 0xb5, p
->bytes
);
1006 /* This page has now survived another collection. */
1009 /* Which leaves full and partial pages. Step through all chunks,
1010 consolidate those that are free and insert them into the free
1011 lists. Note that consolidation slows down collection
1014 chunk
= (struct alloc_chunk
*)p
->page
;
1015 end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1018 nomarksinpage
= true;
1021 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1022 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1024 nomarksinpage
= false;
1027 last_free
->type
= 0;
1028 last_free
->size
= last_free_size
;
1029 last_free
->mark
= 0;
1030 poison_chunk (last_free
, last_free_size
);
1031 free_chunk (last_free
, last_free_size
, zone
);
1036 allocated
+= chunk
->size
+ CHUNK_OVERHEAD
;
1044 last_free_size
+= CHUNK_OVERHEAD
+ chunk
->size
;
1049 last_free_size
= chunk
->size
;
1053 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1055 while (chunk
< end
);
1060 #ifdef ENABLE_GC_CHECKING
1061 /* Poison the page. */
1062 memset (p
->page
, 0xb5, p
->bytes
);
1069 last_free
->type
= 0;
1070 last_free
->size
= last_free_size
;
1071 last_free
->mark
= 0;
1072 poison_chunk (last_free
, last_free_size
);
1073 free_chunk (last_free
, last_free_size
, zone
);
1078 zone
->allocated
= allocated
;
1081 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1082 is true if we need to mark before sweeping, false if some other
1083 zone collection has already performed marking for us. Returns true
1084 if we collected, false otherwise. */
1087 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1091 /* Avoid frequent unnecessary work by skipping collection if the
1092 total allocations haven't expanded much since the last
1094 float allocated_last_gc
=
1095 MAX (zone
->allocated_last_gc
,
1096 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1098 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1100 if (zone
->allocated
< allocated_last_gc
+ min_expand
)
1105 fprintf (stderr
, " {%s GC %luk -> ",
1106 zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1108 /* Zero the total allocated bytes. This will be recalculated in the
1110 zone
->allocated
= 0;
1112 /* Release the pages we freed the last time we collected, but didn't
1113 reuse in the interim. */
1114 release_pages (zone
);
1116 /* Indicate that we've seen collections at this context depth. */
1117 zone
->context_depth_collections
1118 = ((unsigned long)1 << (zone
->context_depth
+ 1)) - 1;
1122 zone
->was_collected
= true;
1123 zone
->allocated_last_gc
= zone
->allocated
;
1126 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1130 /* Calculate the average page survival rate in terms of number of
1134 calculate_average_page_survival (struct alloc_zone
*zone
)
1137 float survival
= 0.0;
1139 for (p
= zone
->pages
; p
; p
= p
->next
)
1142 survival
+= p
->survived
;
1144 return survival
/count
;
1147 /* Check the magic cookies all of the chunks contain, to make sure we
1148 aren't doing anything stupid, like stomping on alloc_chunk
1152 check_cookies (void)
1154 #ifdef COOKIE_CHECKING
1156 struct alloc_zone
*zone
;
1158 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1160 for (p
= zone
->pages
; p
; p
= p
->next
)
1164 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1165 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1168 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
1170 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1172 while (chunk
< end
);
1178 /* Top level collection routine. */
1183 struct alloc_zone
*zone
;
1184 bool marked
= false;
1187 timevar_push (TV_GC
);
1189 /* Start by possibly collecting the main zone. */
1190 main_zone
.was_collected
= false;
1191 marked
|= ggc_collect_1 (&main_zone
, true);
1193 /* In order to keep the number of collections down, we don't
1194 collect other zones unless we are collecting the main zone. This
1195 gives us roughly the same number of collections as we used to
1196 have with the old gc. The number of collection is important
1197 because our main slowdown (according to profiling) is now in
1198 marking. So if we mark twice as often as we used to, we'll be
1199 twice as slow. Hopefully we'll avoid this cost when we mark
1202 if (main_zone
.was_collected
)
1204 struct alloc_zone
*zone
;
1206 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
1209 zone
->was_collected
= false;
1210 marked
|= ggc_collect_1 (zone
, !marked
);
1214 /* Print page survival stats, if someone wants them. */
1215 if (GGC_DEBUG_LEVEL
>= 2)
1217 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1219 if (zone
->was_collected
)
1221 f
= calculate_average_page_survival (zone
);
1222 printf ("Average page survival in zone `%s' is %f\n",
1228 /* Since we don't mark zone at a time right now, marking in any
1229 zone means marking in every zone. So we have to clear all the
1230 marks in all the zones that weren't collected already. */
1234 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1236 if (zone
->was_collected
)
1238 for (p
= zone
->pages
; p
; p
= p
->next
)
1242 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1243 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1246 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1247 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1251 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1253 while (chunk
< end
);
1257 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1263 /* Free dead zones. */
1264 for (zone
= G
.zones
; zone
&& zone
->next_zone
; zone
= zone
->next_zone
)
1266 if (zone
->next_zone
->dead
)
1268 struct alloc_zone
*dead_zone
= zone
->next_zone
;
1270 printf ("Zone `%s' is dead and will be freed.\n", dead_zone
->name
);
1272 /* The zone must be empty. */
1273 if (dead_zone
->allocated
!= 0)
1276 /* Unchain the dead zone, release all its pages and free it. */
1277 zone
->next_zone
= zone
->next_zone
->next_zone
;
1278 release_pages (dead_zone
);
1283 timevar_pop (TV_GC
);
1286 /* Print allocation statistics. */
1289 ggc_print_statistics (void)
1295 struct ggc_pch_ondisk
1303 /* Initialize the PCH data structure. */
1305 struct ggc_pch_data
*
1308 return xcalloc (sizeof (struct ggc_pch_data
), 1);
1311 /* Add the size of object X to the size of the PCH data. */
1314 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
1315 size_t size
, bool is_string
)
1319 d
->d
.total
+= size
+ CHUNK_OVERHEAD
;
1325 /* Return the total size of the PCH data. */
1328 ggc_pch_total_size (struct ggc_pch_data
*d
)
1333 /* Set the base address for the objects in the PCH file. */
1336 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
1338 d
->base
= (size_t) base
;
1341 /* Allocate a place for object X of size SIZE in the PCH file. */
1344 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
1345 size_t size
, bool is_string
)
1348 result
= (char *)d
->base
;
1351 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1353 d
->base
+= ggc_get_size (x
) + CHUNK_OVERHEAD
;
1355 d
->base
+= chunk
->size
+ CHUNK_OVERHEAD
;
1356 return result
+ CHUNK_OVERHEAD
;
1366 /* Prepare to write out the PCH data to file F. */
1369 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1370 FILE *f ATTRIBUTE_UNUSED
)
1372 /* Nothing to do. */
1375 /* Write out object X of SIZE to file F. */
1378 ggc_pch_write_object (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1379 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
1380 size_t size
, bool is_string
)
1384 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1385 size
= ggc_get_size (x
);
1386 if (fwrite (chunk
, size
+ CHUNK_OVERHEAD
, 1, f
) != 1)
1387 fatal_error ("can't write PCH file: %m");
1388 d
->written
+= size
+ CHUNK_OVERHEAD
;
1392 if (fwrite (x
, size
, 1, f
) != 1)
1393 fatal_error ("can't write PCH file: %m");
1399 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
1401 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
1402 fatal_error ("can't write PCH file: %m");
1406 ggc_pch_read (FILE *f
, void *addr
)
1408 struct ggc_pch_ondisk d
;
1409 struct page_entry
*entry
;
1410 struct alloc_zone
*pch_zone
;
1411 if (fread (&d
, sizeof (d
), 1, f
) != 1)
1412 fatal_error ("can't read PCH file: %m");
1413 entry
= xcalloc (1, sizeof (struct page_entry
));
1414 entry
->bytes
= d
.total
;
1416 entry
->context_depth
= 0;
1417 pch_zone
= new_ggc_zone ("PCH zone");
1418 entry
->zone
= pch_zone
;
1419 entry
->next
= entry
->zone
->pages
;
1420 entry
->zone
->pages
= entry
;