1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
32 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
35 # undef HAVE_MMAP_DEV_ZERO
37 # include <sys/mman.h>
39 # define MAP_FAILED -1
41 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
42 # define MAP_ANONYMOUS MAP_ANON
48 #ifdef HAVE_MMAP_DEV_ZERO
50 # include <sys/mman.h>
52 # define MAP_FAILED -1
59 #define USING_MALLOC_PAGE_GROUPS
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
77 in the current (highest-numbered) context may be collected.
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
92 /* Define GGC_POISON to poison memory marked unused by the collector. */
95 /* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
99 #undef GGC_ALWAYS_COLLECT
101 #ifdef ENABLE_GC_CHECKING
104 #ifdef ENABLE_GC_ALWAYS_COLLECT
105 #define GGC_ALWAYS_COLLECT
108 /* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114 #define GGC_DEBUG_LEVEL (0)
116 #ifndef HOST_BITS_PER_PTR
117 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
121 /* A two-level tree is used to look up the page-entry for a given
122 pointer. Two chunks of the pointer's bits are extracted to index
123 the first and second levels of the tree, as follows:
127 msb +----------------+----+------+------+ lsb
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
136 index values in the lookup table, respectively.
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
143 #define PAGE_L1_BITS (8)
144 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
145 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
146 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
148 #define LOOKUP_L1(p) \
149 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
151 #define LOOKUP_L2(p) \
152 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
154 /* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
158 /* The size of an object on a page of the indicated ORDER. */
159 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
161 /* For speed, we avoid doing a general integer divide to locate the
162 offset in the allocation bitmap, by precalculating numbers M, S
163 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
164 within the page which is evenly divisible by the object size Z. */
165 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
166 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
167 #define OFFSET_TO_BIT(OFFSET, ORDER) \
168 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
170 /* The number of extra orders, not corresponding to power-of-two sized
173 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
175 #define RTL_SIZE(NSLOTS) \
176 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
178 /* The Ith entry is the maximum size of an object to be stored in the
179 Ith extra order. Adding a new entry to this array is the *only*
180 thing you need to do to add a new special allocation size. */
182 static const size_t extra_order_size_table
[] = {
183 sizeof (struct tree_decl
),
184 sizeof (struct tree_list
),
185 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
186 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
189 /* The total number of orders. */
191 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
193 /* We use this structure to determine the alignment required for
194 allocations. For power-of-two sized allocations, that's not a
195 problem, but it does matter for odd-sized allocations. */
197 struct max_alignment
{
201 #ifdef HAVE_LONG_DOUBLE
209 /* The biggest alignment required. */
211 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
213 /* The Ith entry is the number of objects on a page or order I. */
215 static unsigned objects_per_page_table
[NUM_ORDERS
];
217 /* The Ith entry is the size of an object on a page of order I. */
219 static size_t object_size_table
[NUM_ORDERS
];
221 /* The Ith entry is a pair of numbers (mult, shift) such that
222 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
223 for all k evenly divisible by OBJECT_SIZE(I). */
230 inverse_table
[NUM_ORDERS
];
232 /* A page_entry records the status of an allocation page. This
233 structure is dynamically sized to fit the bitmap in_use_p. */
234 typedef struct page_entry
236 /* The next page-entry with objects of the same size, or NULL if
237 this is the last page-entry. */
238 struct page_entry
*next
;
240 /* The number of bytes allocated. (This will always be a multiple
241 of the host system page size.) */
244 /* The address at which the memory is allocated. */
247 #ifdef USING_MALLOC_PAGE_GROUPS
248 /* Back pointer to the page group this page came from. */
249 struct page_group
*group
;
252 /* Saved in-use bit vector for pages that aren't in the topmost
253 context during collection. */
254 unsigned long *save_in_use_p
;
256 /* Context depth of this page. */
257 unsigned short context_depth
;
259 /* The number of free objects remaining on this page. */
260 unsigned short num_free_objects
;
262 /* A likely candidate for the bit position of a free object for the
263 next allocation from this page. */
264 unsigned short next_bit_hint
;
266 /* The lg of size of objects allocated from this page. */
269 /* A bit vector indicating whether or not objects are in use. The
270 Nth bit is one if the Nth object on this page is allocated. This
271 array is dynamically sized. */
272 unsigned long in_use_p
[1];
275 #ifdef USING_MALLOC_PAGE_GROUPS
276 /* A page_group describes a large allocation from malloc, from which
277 we parcel out aligned pages. */
278 typedef struct page_group
280 /* A linked list of all extant page groups. */
281 struct page_group
*next
;
283 /* The address we received from malloc. */
286 /* The size of the block. */
289 /* A bitmask of pages in use. */
294 #if HOST_BITS_PER_PTR <= 32
296 /* On 32-bit hosts, we use a two level page table, as pictured above. */
297 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
301 /* On 64-bit hosts, we use the same two level page tables plus a linked
302 list that disambiguates the top 32-bits. There will almost always be
303 exactly one entry in the list. */
304 typedef struct page_table_chain
306 struct page_table_chain
*next
;
308 page_entry
**table
[PAGE_L1_SIZE
];
313 /* The rest of the global variables. */
314 static struct globals
316 /* The Nth element in this array is a page with objects of size 2^N.
317 If there are any pages with free objects, they will be at the
318 head of the list. NULL if there are no page-entries for this
320 page_entry
*pages
[NUM_ORDERS
];
322 /* The Nth element in this array is the last page with objects of
323 size 2^N. NULL if there are no page-entries for this object
325 page_entry
*page_tails
[NUM_ORDERS
];
327 /* Lookup table for associating allocation pages with object addresses. */
330 /* The system's page size. */
334 /* Bytes currently allocated. */
337 /* Bytes currently allocated at the end of the last collection. */
338 size_t allocated_last_gc
;
340 /* Total amount of memory mapped. */
343 /* The current depth in the context stack. */
344 unsigned short context_depth
;
346 /* A file descriptor open to /dev/zero for reading. */
347 #if defined (HAVE_MMAP_DEV_ZERO)
351 /* A cache of free system pages. */
352 page_entry
*free_pages
;
354 #ifdef USING_MALLOC_PAGE_GROUPS
355 page_group
*page_groups
;
358 /* The file descriptor for debugging output. */
362 /* The size in bytes required to maintain a bitmap for the objects
364 #define BITMAP_SIZE(Num_objects) \
365 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
367 /* Skip garbage collection if the current allocation is not at least
368 this factor times the allocation at the end of the last collection.
369 In other words, total allocation must expand by (this factor minus
370 one) before collection is performed. */
371 #define GGC_MIN_EXPAND_FOR_GC (1.3)
373 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
374 test from triggering too often when the heap is small. */
375 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
377 /* Allocate pages in chunks of this size, to throttle calls to memory
378 allocation routines. The first page is used, the rest go onto the
379 free list. This cannot be larger than HOST_BITS_PER_INT for the
380 in_use bitmask for page_group. */
381 #define GGC_QUIRE_SIZE 16
383 static int ggc_allocated_p
PARAMS ((const void *));
384 static page_entry
*lookup_page_table_entry
PARAMS ((const void *));
385 static void set_page_table_entry
PARAMS ((void *, page_entry
*));
387 static char *alloc_anon
PARAMS ((char *, size_t));
389 #ifdef USING_MALLOC_PAGE_GROUPS
390 static size_t page_group_index
PARAMS ((char *, char *));
391 static void set_page_group_in_use
PARAMS ((page_group
*, char *));
392 static void clear_page_group_in_use
PARAMS ((page_group
*, char *));
394 static struct page_entry
* alloc_page
PARAMS ((unsigned));
395 static void free_page
PARAMS ((struct page_entry
*));
396 static void release_pages
PARAMS ((void));
397 static void clear_marks
PARAMS ((void));
398 static void sweep_pages
PARAMS ((void));
399 static void ggc_recalculate_in_use_p
PARAMS ((page_entry
*));
400 static void compute_inverse
PARAMS ((unsigned));
403 static void poison_pages
PARAMS ((void));
406 void debug_print_page_list
PARAMS ((int));
408 /* Returns non-zero if P was allocated in GC'able memory. */
417 #if HOST_BITS_PER_PTR <= 32
420 page_table table
= G
.lookup
;
421 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
426 if (table
->high_bits
== high_bits
)
430 base
= &table
->table
[0];
433 /* Extract the level 1 and 2 indices. */
437 return base
[L1
] && base
[L1
][L2
];
440 /* Traverse the page table and find the entry for a page.
441 Die (probably) if the object wasn't allocated via GC. */
443 static inline page_entry
*
444 lookup_page_table_entry(p
)
450 #if HOST_BITS_PER_PTR <= 32
453 page_table table
= G
.lookup
;
454 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
455 while (table
->high_bits
!= high_bits
)
457 base
= &table
->table
[0];
460 /* Extract the level 1 and 2 indices. */
467 /* Set the page table entry for a page. */
470 set_page_table_entry(p
, entry
)
477 #if HOST_BITS_PER_PTR <= 32
481 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
482 for (table
= G
.lookup
; table
; table
= table
->next
)
483 if (table
->high_bits
== high_bits
)
486 /* Not found -- allocate a new table. */
487 table
= (page_table
) xcalloc (1, sizeof(*table
));
488 table
->next
= G
.lookup
;
489 table
->high_bits
= high_bits
;
492 base
= &table
->table
[0];
495 /* Extract the level 1 and 2 indices. */
499 if (base
[L1
] == NULL
)
500 base
[L1
] = (page_entry
**) xcalloc (PAGE_L2_SIZE
, sizeof (page_entry
*));
502 base
[L1
][L2
] = entry
;
505 /* Prints the page-entry for object size ORDER, for debugging. */
508 debug_print_page_list (order
)
512 printf ("Head=%p, Tail=%p:\n", (PTR
) G
.pages
[order
],
513 (PTR
) G
.page_tails
[order
]);
517 printf ("%p(%1d|%3d) -> ", (PTR
) p
, p
->context_depth
,
518 p
->num_free_objects
);
526 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
527 (if non-null). The ifdef structure here is intended to cause a
528 compile error unless exactly one of the HAVE_* is defined. */
531 alloc_anon (pref
, size
)
532 char *pref ATTRIBUTE_UNUSED
;
535 #ifdef HAVE_MMAP_ANON
536 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
537 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
539 #ifdef HAVE_MMAP_DEV_ZERO
540 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
541 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
544 if (page
== (char *) MAP_FAILED
)
546 perror ("virtual memory exhausted");
547 exit (FATAL_EXIT_CODE
);
550 /* Remember that we allocated this memory. */
551 G
.bytes_mapped
+= size
;
556 #ifdef USING_MALLOC_PAGE_GROUPS
557 /* Compute the index for this page into the page group. */
560 page_group_index (allocation
, page
)
561 char *allocation
, *page
;
563 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
566 /* Set and clear the in_use bit for this page in the page group. */
569 set_page_group_in_use (group
, page
)
573 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
577 clear_page_group_in_use (group
, page
)
581 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
585 /* Allocate a new page for allocating objects of size 2^ORDER,
586 and return an entry for it. The entry is not added to the
587 appropriate page_table list. */
589 static inline struct page_entry
*
593 struct page_entry
*entry
, *p
, **pp
;
597 size_t page_entry_size
;
599 #ifdef USING_MALLOC_PAGE_GROUPS
603 num_objects
= OBJECTS_PER_PAGE (order
);
604 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
605 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
606 entry_size
= num_objects
* OBJECT_SIZE (order
);
607 if (entry_size
< G
.pagesize
)
608 entry_size
= G
.pagesize
;
613 /* Check the list of free pages for one we can use. */
614 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
615 if (p
->bytes
== entry_size
)
620 /* Recycle the allocated memory from this page ... */
624 #ifdef USING_MALLOC_PAGE_GROUPS
628 /* ... and, if possible, the page entry itself. */
629 if (p
->order
== order
)
632 memset (entry
, 0, page_entry_size
);
638 else if (entry_size
== G
.pagesize
)
640 /* We want just one page. Allocate a bunch of them and put the
641 extras on the freelist. (Can only do this optimization with
642 mmap for backing store.) */
643 struct page_entry
*e
, *f
= G
.free_pages
;
646 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
);
648 /* This loop counts down so that the chain will be in ascending
650 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
652 e
= (struct page_entry
*) xcalloc (1, page_entry_size
);
654 e
->bytes
= G
.pagesize
;
655 e
->page
= page
+ (i
<< G
.lg_pagesize
);
663 page
= alloc_anon (NULL
, entry_size
);
665 #ifdef USING_MALLOC_PAGE_GROUPS
668 /* Allocate a large block of memory and serve out the aligned
669 pages therein. This results in much less memory wastage
670 than the traditional implementation of valloc. */
672 char *allocation
, *a
, *enda
;
673 size_t alloc_size
, head_slop
, tail_slop
;
674 int multiple_pages
= (entry_size
== G
.pagesize
);
677 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
679 alloc_size
= entry_size
+ G
.pagesize
- 1;
680 allocation
= xmalloc (alloc_size
);
682 page
= (char *) (((size_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
683 head_slop
= page
- allocation
;
685 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
687 tail_slop
= alloc_size
- entry_size
- head_slop
;
688 enda
= allocation
+ alloc_size
- tail_slop
;
690 /* We allocated N pages, which are likely not aligned, leaving
691 us with N-1 usable pages. We plan to place the page_group
692 structure somewhere in the slop. */
693 if (head_slop
>= sizeof (page_group
))
694 group
= (page_group
*)page
- 1;
697 /* We magically got an aligned allocation. Too bad, we have
698 to waste a page anyway. */
702 tail_slop
+= G
.pagesize
;
704 if (tail_slop
< sizeof (page_group
))
706 group
= (page_group
*)enda
;
707 tail_slop
-= sizeof (page_group
);
710 /* Remember that we allocated this memory. */
711 group
->next
= G
.page_groups
;
712 group
->allocation
= allocation
;
713 group
->alloc_size
= alloc_size
;
715 G
.page_groups
= group
;
716 G
.bytes_mapped
+= alloc_size
;
718 /* If we allocated multiple pages, put the rest on the free list. */
721 struct page_entry
*e
, *f
= G
.free_pages
;
722 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
724 e
= (struct page_entry
*) xcalloc (1, page_entry_size
);
726 e
->bytes
= G
.pagesize
;
738 entry
= (struct page_entry
*) xcalloc (1, page_entry_size
);
740 entry
->bytes
= entry_size
;
742 entry
->context_depth
= G
.context_depth
;
743 entry
->order
= order
;
744 entry
->num_free_objects
= num_objects
;
745 entry
->next_bit_hint
= 1;
747 #ifdef USING_MALLOC_PAGE_GROUPS
748 entry
->group
= group
;
749 set_page_group_in_use (group
, page
);
752 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
753 increment the hint. */
754 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
755 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
757 set_page_table_entry (page
, entry
);
759 if (GGC_DEBUG_LEVEL
>= 2)
760 fprintf (G
.debug_file
,
761 "Allocating page at %p, object size=%lu, data %p-%p\n",
762 (PTR
) entry
, (unsigned long) OBJECT_SIZE (order
), page
,
763 page
+ entry_size
- 1);
768 /* For a page that is no longer needed, put it on the free page list. */
774 if (GGC_DEBUG_LEVEL
>= 2)
775 fprintf (G
.debug_file
,
776 "Deallocating page at %p, data %p-%p\n", (PTR
) entry
,
777 entry
->page
, entry
->page
+ entry
->bytes
- 1);
779 set_page_table_entry (entry
->page
, NULL
);
781 #ifdef USING_MALLOC_PAGE_GROUPS
782 clear_page_group_in_use (entry
->group
, entry
->page
);
785 entry
->next
= G
.free_pages
;
786 G
.free_pages
= entry
;
789 /* Release the free page cache to the system. */
795 page_entry
*p
, *next
;
799 /* Gather up adjacent pages so they are unmapped together. */
810 while (p
&& p
->page
== start
+ len
)
819 G
.bytes_mapped
-= len
;
824 #ifdef USING_MALLOC_PAGE_GROUPS
828 /* Remove all pages from free page groups from the list. */
830 while ((p
= *pp
) != NULL
)
831 if (p
->group
->in_use
== 0)
839 /* Remove all free page groups, and release the storage. */
841 while ((g
= *gp
) != NULL
)
845 G
.bytes_mapped
-= g
->alloc_size
;
846 free (g
->allocation
);
853 /* This table provides a fast way to determine ceil(log_2(size)) for
854 allocation requests. The minimum allocation size is eight bytes. */
856 static unsigned char size_lookup
[257] =
858 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
859 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
860 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
861 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
862 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
863 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
864 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
865 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
866 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
867 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
868 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
869 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
870 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
871 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
872 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
873 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
877 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
878 memory is zeroed; otherwise, its contents are undefined. */
884 unsigned order
, word
, bit
, object_offset
;
885 struct page_entry
*entry
;
889 order
= size_lookup
[size
];
893 while (size
> OBJECT_SIZE (order
))
897 /* If there are non-full pages for this size allocation, they are at
898 the head of the list. */
899 entry
= G
.pages
[order
];
901 /* If there is no page for this object size, or all pages in this
902 context are full, allocate a new page. */
903 if (entry
== NULL
|| entry
->num_free_objects
== 0)
905 struct page_entry
*new_entry
;
906 new_entry
= alloc_page (order
);
908 /* If this is the only entry, it's also the tail. */
910 G
.page_tails
[order
] = new_entry
;
912 /* Put new pages at the head of the page list. */
913 new_entry
->next
= entry
;
915 G
.pages
[order
] = new_entry
;
917 /* For a new page, we know the word and bit positions (in the
918 in_use bitmap) of the first available object -- they're zero. */
919 new_entry
->next_bit_hint
= 1;
926 /* First try to use the hint left from the previous allocation
927 to locate a clear bit in the in-use bitmap. We've made sure
928 that the one-past-the-end bit is always set, so if the hint
929 has run over, this test will fail. */
930 unsigned hint
= entry
->next_bit_hint
;
931 word
= hint
/ HOST_BITS_PER_LONG
;
932 bit
= hint
% HOST_BITS_PER_LONG
;
934 /* If the hint didn't work, scan the bitmap from the beginning. */
935 if ((entry
->in_use_p
[word
] >> bit
) & 1)
938 while (~entry
->in_use_p
[word
] == 0)
940 while ((entry
->in_use_p
[word
] >> bit
) & 1)
942 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
945 /* Next time, try the next bit. */
946 entry
->next_bit_hint
= hint
+ 1;
948 object_offset
= hint
* OBJECT_SIZE (order
);
951 /* Set the in-use bit. */
952 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
954 /* Keep a running total of the number of free objects. If this page
955 fills up, we may have to move it to the end of the list if the
956 next page isn't full. If the next page is full, all subsequent
957 pages are full, so there's no need to move it. */
958 if (--entry
->num_free_objects
== 0
959 && entry
->next
!= NULL
960 && entry
->next
->num_free_objects
> 0)
962 G
.pages
[order
] = entry
->next
;
964 G
.page_tails
[order
]->next
= entry
;
965 G
.page_tails
[order
] = entry
;
968 /* Calculate the object's address. */
969 result
= entry
->page
+ object_offset
;
972 /* `Poison' the entire allocated object, including any padding at
974 memset (result
, 0xaf, OBJECT_SIZE (order
));
977 /* Keep track of how many bytes are being allocated. This
978 information is used in deciding when to collect. */
979 G
.allocated
+= OBJECT_SIZE (order
);
981 if (GGC_DEBUG_LEVEL
>= 3)
982 fprintf (G
.debug_file
,
983 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
984 (unsigned long) size
, (unsigned long) OBJECT_SIZE (order
), result
,
990 /* If P is not marked, marks it and return false. Otherwise return true.
991 P must have been allocated by the GC allocator; it mustn't point to
992 static objects, stack variables, or memory allocated with malloc. */
1002 /* Look up the page on which the object is alloced. If the object
1003 wasn't allocated by the collector, we'll probably die. */
1004 entry
= lookup_page_table_entry (p
);
1005 #ifdef ENABLE_CHECKING
1010 /* Calculate the index of the object on the page; this is its bit
1011 position in the in_use_p bitmap. */
1012 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1013 word
= bit
/ HOST_BITS_PER_LONG
;
1014 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1016 /* If the bit was previously set, skip it. */
1017 if (entry
->in_use_p
[word
] & mask
)
1020 /* Otherwise set it, and decrement the free object count. */
1021 entry
->in_use_p
[word
] |= mask
;
1022 entry
->num_free_objects
-= 1;
1024 if (GGC_DEBUG_LEVEL
>= 4)
1025 fprintf (G
.debug_file
, "Marking %p\n", p
);
1030 /* Return 1 if P has been marked, zero otherwise.
1031 P must have been allocated by the GC allocator; it mustn't point to
1032 static objects, stack variables, or memory allocated with malloc. */
1042 /* Look up the page on which the object is alloced. If the object
1043 wasn't allocated by the collector, we'll probably die. */
1044 entry
= lookup_page_table_entry (p
);
1045 #ifdef ENABLE_CHECKING
1050 /* Calculate the index of the object on the page; this is its bit
1051 position in the in_use_p bitmap. */
1052 bit
= OFFSET_TO_BIT (((const char *) p
) - entry
->page
, entry
->order
);
1053 word
= bit
/ HOST_BITS_PER_LONG
;
1054 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
1056 return (entry
->in_use_p
[word
] & mask
) != 0;
1059 /* Return the size of the gc-able object P. */
1065 page_entry
*pe
= lookup_page_table_entry (p
);
1066 return OBJECT_SIZE (pe
->order
);
1069 /* Subroutine of init_ggc which computes the pair of numbers used to
1070 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1072 This algorithm is taken from Granlund and Montgomery's paper
1073 "Division by Invariant Integers using Multiplication"
1074 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1078 compute_inverse (order
)
1081 unsigned size
, inv
, e
;
1083 /* There can be only one object per "page" in a bucket for sizes
1084 larger than half a machine page; it will always have offset zero. */
1085 if (OBJECT_SIZE (order
) > G
.pagesize
/2)
1087 if (OBJECTS_PER_PAGE (order
) != 1)
1090 DIV_MULT (order
) = 1;
1091 DIV_SHIFT (order
) = 0;
1095 size
= OBJECT_SIZE (order
);
1097 while (size
% 2 == 0)
1104 while (inv
* size
!= 1)
1105 inv
= inv
* (2 - inv
*size
);
1107 DIV_MULT (order
) = inv
;
1108 DIV_SHIFT (order
) = e
;
1111 /* Initialize the ggc-mmap allocator. */
1117 G
.pagesize
= getpagesize();
1118 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1120 #ifdef HAVE_MMAP_DEV_ZERO
1121 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1122 if (G
.dev_zero_fd
== -1)
1127 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1129 G
.debug_file
= stdout
;
1132 G
.allocated_last_gc
= GGC_MIN_LAST_ALLOCATED
;
1135 /* StunOS has an amazing off-by-one error for the first mmap allocation
1136 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1137 believe, is an unaligned page allocation, which would cause us to
1138 hork badly if we tried to use it. */
1140 char *p
= alloc_anon (NULL
, G
.pagesize
);
1141 struct page_entry
*e
;
1142 if ((size_t)p
& (G
.pagesize
- 1))
1144 /* How losing. Discard this one and try another. If we still
1145 can't get something useful, give up. */
1147 p
= alloc_anon (NULL
, G
.pagesize
);
1148 if ((size_t)p
& (G
.pagesize
- 1))
1152 /* We have a good page, might as well hold onto it... */
1153 e
= (struct page_entry
*) xcalloc (1, sizeof (struct page_entry
));
1154 e
->bytes
= G
.pagesize
;
1156 e
->next
= G
.free_pages
;
1161 /* Initialize the object size table. */
1162 for (order
= 0; order
< HOST_BITS_PER_PTR
; ++order
)
1163 object_size_table
[order
] = (size_t) 1 << order
;
1164 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1166 size_t s
= extra_order_size_table
[order
- HOST_BITS_PER_PTR
];
1168 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1169 so that we're sure of getting aligned memory. */
1170 s
= CEIL (s
, MAX_ALIGNMENT
) * MAX_ALIGNMENT
;
1171 object_size_table
[order
] = s
;
1174 /* Initialize the objects-per-page and inverse tables. */
1175 for (order
= 0; order
< NUM_ORDERS
; ++order
)
1177 objects_per_page_table
[order
] = G
.pagesize
/ OBJECT_SIZE (order
);
1178 if (objects_per_page_table
[order
] == 0)
1179 objects_per_page_table
[order
] = 1;
1180 compute_inverse (order
);
1183 /* Reset the size_lookup array to put appropriately sized objects in
1184 the special orders. All objects bigger than the previous power
1185 of two, but no greater than the special size, should go in the
1187 for (order
= HOST_BITS_PER_PTR
; order
< NUM_ORDERS
; ++order
)
1192 o
= size_lookup
[OBJECT_SIZE (order
)];
1193 for (i
= OBJECT_SIZE (order
); size_lookup
[i
] == o
; --i
)
1194 size_lookup
[i
] = order
;
1198 /* Increment the `GC context'. Objects allocated in an outer context
1199 are never freed, eliminating the need to register their roots. */
1207 if (G
.context_depth
== 0)
1211 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1212 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1215 ggc_recalculate_in_use_p (p
)
1221 /* Because the past-the-end bit in in_use_p is always set, we
1222 pretend there is one additional object. */
1223 num_objects
= OBJECTS_PER_PAGE (p
->order
) + 1;
1225 /* Reset the free object count. */
1226 p
->num_free_objects
= num_objects
;
1228 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1230 i
< CEIL (BITMAP_SIZE (num_objects
),
1231 sizeof (*p
->in_use_p
));
1236 /* Something is in use if it is marked, or if it was in use in a
1237 context further down the context stack. */
1238 p
->in_use_p
[i
] |= p
->save_in_use_p
[i
];
1240 /* Decrement the free object count for every object allocated. */
1241 for (j
= p
->in_use_p
[i
]; j
; j
>>= 1)
1242 p
->num_free_objects
-= (j
& 1);
1245 if (p
->num_free_objects
>= num_objects
)
1249 /* Decrement the `GC context'. All objects allocated since the
1250 previous ggc_push_context are migrated to the outer context. */
1255 unsigned order
, depth
;
1257 depth
= --G
.context_depth
;
1259 /* Any remaining pages in the popped context are lowered to the new
1260 current context; i.e. objects allocated in the popped context and
1261 left over are imported into the previous context. */
1262 for (order
= 2; order
< NUM_ORDERS
; order
++)
1266 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1268 if (p
->context_depth
> depth
)
1269 p
->context_depth
= depth
;
1271 /* If this page is now in the topmost context, and we'd
1272 saved its allocation state, restore it. */
1273 else if (p
->context_depth
== depth
&& p
->save_in_use_p
)
1275 ggc_recalculate_in_use_p (p
);
1276 free (p
->save_in_use_p
);
1277 p
->save_in_use_p
= 0;
1283 /* Unmark all objects. */
1290 for (order
= 2; order
< NUM_ORDERS
; order
++)
1292 size_t num_objects
= OBJECTS_PER_PAGE (order
);
1293 size_t bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
1296 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1298 #ifdef ENABLE_CHECKING
1299 /* The data should be page-aligned. */
1300 if ((size_t) p
->page
& (G
.pagesize
- 1))
1304 /* Pages that aren't in the topmost context are not collected;
1305 nevertheless, we need their in-use bit vectors to store GC
1306 marks. So, back them up first. */
1307 if (p
->context_depth
< G
.context_depth
)
1309 if (! p
->save_in_use_p
)
1310 p
->save_in_use_p
= xmalloc (bitmap_size
);
1311 memcpy (p
->save_in_use_p
, p
->in_use_p
, bitmap_size
);
1314 /* Reset reset the number of free objects and clear the
1315 in-use bits. These will be adjusted by mark_obj. */
1316 p
->num_free_objects
= num_objects
;
1317 memset (p
->in_use_p
, 0, bitmap_size
);
1319 /* Make sure the one-past-the-end bit is always set. */
1320 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
1321 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
1326 /* Free all empty pages. Partially empty pages need no attention
1327 because the `mark' bit doubles as an `unused' bit. */
1334 for (order
= 2; order
< NUM_ORDERS
; order
++)
1336 /* The last page-entry to consider, regardless of entries
1337 placed at the end of the list. */
1338 page_entry
* const last
= G
.page_tails
[order
];
1340 size_t num_objects
= OBJECTS_PER_PAGE (order
);
1341 size_t live_objects
;
1342 page_entry
*p
, *previous
;
1352 page_entry
*next
= p
->next
;
1354 /* Loop until all entries have been examined. */
1357 /* Add all live objects on this page to the count of
1358 allocated memory. */
1359 live_objects
= num_objects
- p
->num_free_objects
;
1361 G
.allocated
+= OBJECT_SIZE (order
) * live_objects
;
1363 /* Only objects on pages in the topmost context should get
1365 if (p
->context_depth
< G
.context_depth
)
1368 /* Remove the page if it's empty. */
1369 else if (live_objects
== 0)
1372 G
.pages
[order
] = next
;
1374 previous
->next
= next
;
1376 /* Are we removing the last element? */
1377 if (p
== G
.page_tails
[order
])
1378 G
.page_tails
[order
] = previous
;
1383 /* If the page is full, move it to the end. */
1384 else if (p
->num_free_objects
== 0)
1386 /* Don't move it if it's already at the end. */
1387 if (p
!= G
.page_tails
[order
])
1389 /* Move p to the end of the list. */
1391 G
.page_tails
[order
]->next
= p
;
1393 /* Update the tail pointer... */
1394 G
.page_tails
[order
] = p
;
1396 /* ... and the head pointer, if necessary. */
1398 G
.pages
[order
] = next
;
1400 previous
->next
= next
;
1405 /* If we've fallen through to here, it's a page in the
1406 topmost context that is neither full nor empty. Such a
1407 page must precede pages at lesser context depth in the
1408 list, so move it to the head. */
1409 else if (p
!= G
.pages
[order
])
1411 previous
->next
= p
->next
;
1412 p
->next
= G
.pages
[order
];
1414 /* Are we moving the last element? */
1415 if (G
.page_tails
[order
] == p
)
1416 G
.page_tails
[order
] = previous
;
1425 /* Now, restore the in_use_p vectors for any pages from contexts
1426 other than the current one. */
1427 for (p
= G
.pages
[order
]; p
; p
= p
->next
)
1428 if (p
->context_depth
!= G
.context_depth
)
1429 ggc_recalculate_in_use_p (p
);
1434 /* Clobber all free objects. */
1441 for (order
= 2; order
< NUM_ORDERS
; order
++)
1443 size_t num_objects
= OBJECTS_PER_PAGE (order
);
1444 size_t size
= OBJECT_SIZE (order
);
1447 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1451 if (p
->context_depth
!= G
.context_depth
)
1452 /* Since we don't do any collection for pages in pushed
1453 contexts, there's no need to do any poisoning. And
1454 besides, the IN_USE_P array isn't valid until we pop
1458 for (i
= 0; i
< num_objects
; i
++)
1461 word
= i
/ HOST_BITS_PER_LONG
;
1462 bit
= i
% HOST_BITS_PER_LONG
;
1463 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
1464 memset (p
->page
+ i
* size
, 0xa5, size
);
1471 /* Top level mark-and-sweep routine. */
1476 /* Avoid frequent unnecessary work by skipping collection if the
1477 total allocations haven't expanded much since the last
1479 #ifndef GGC_ALWAYS_COLLECT
1480 if (G
.allocated
< GGC_MIN_EXPAND_FOR_GC
* G
.allocated_last_gc
)
1484 timevar_push (TV_GC
);
1486 fprintf (stderr
, " {GC %luk -> ", (unsigned long) G
.allocated
/ 1024);
1488 /* Zero the total allocated bytes. This will be recalculated in the
1492 /* Release the pages we freed the last time we collected, but didn't
1493 reuse in the interim. */
1505 G
.allocated_last_gc
= G
.allocated
;
1506 if (G
.allocated_last_gc
< GGC_MIN_LAST_ALLOCATED
)
1507 G
.allocated_last_gc
= GGC_MIN_LAST_ALLOCATED
;
1509 timevar_pop (TV_GC
);
1512 fprintf (stderr
, "%luk}", (unsigned long) G
.allocated
/ 1024);
1515 /* Print allocation statistics. */
1516 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1518 : ((x) < 1024*1024*10 \
1520 : (x) / (1024*1024))))
1521 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1524 ggc_print_statistics ()
1526 struct ggc_statistics stats
;
1528 size_t total_overhead
= 0;
1530 /* Clear the statistics. */
1531 memset (&stats
, 0, sizeof (stats
));
1533 /* Make sure collection will really occur. */
1534 G
.allocated_last_gc
= 0;
1536 /* Collect and print the statistics common across collectors. */
1537 ggc_print_common_statistics (stderr
, &stats
);
1539 /* Release free pages so that we will not count the bytes allocated
1540 there as part of the total allocated memory. */
1543 /* Collect some information about the various sizes of
1545 fprintf (stderr
, "\n%-5s %10s %10s %10s\n",
1546 "Size", "Allocated", "Used", "Overhead");
1547 for (i
= 0; i
< NUM_ORDERS
; ++i
)
1554 /* Skip empty entries. */
1558 overhead
= allocated
= in_use
= 0;
1560 /* Figure out the total number of bytes allocated for objects of
1561 this size, and how many of them are actually in use. Also figure
1562 out how much memory the page table is using. */
1563 for (p
= G
.pages
[i
]; p
; p
= p
->next
)
1565 allocated
+= p
->bytes
;
1567 (OBJECTS_PER_PAGE (i
) - p
->num_free_objects
) * OBJECT_SIZE (i
);
1569 overhead
+= (sizeof (page_entry
) - sizeof (long)
1570 + BITMAP_SIZE (OBJECTS_PER_PAGE (i
) + 1));
1572 fprintf (stderr
, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1573 (unsigned long) OBJECT_SIZE (i
),
1574 SCALE (allocated
), LABEL (allocated
),
1575 SCALE (in_use
), LABEL (in_use
),
1576 SCALE (overhead
), LABEL (overhead
));
1577 total_overhead
+= overhead
;
1579 fprintf (stderr
, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1580 SCALE (G
.bytes_mapped
), LABEL (G
.bytes_mapped
),
1581 SCALE (G
.allocated
), LABEL(G
.allocated
),
1582 SCALE (total_overhead
), LABEL (total_overhead
));