1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
38 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
39 #define MAP_ANONYMOUS MAP_ANON
44 This garbage-collecting allocator allocates objects on one of a set
45 of pages. Each page can allocate objects of a single size only;
46 available sizes are powers of two starting at four bytes. The size
47 of an allocation request is rounded up to the next power of two
48 (`order'), and satisfied from the appropriate page.
50 Each page is recorded in a page-entry, which also maintains an
51 in-use bitmap of object positions on the page. This allows the
52 allocation state of a particular object to be flipped without
53 touching the page itself.
55 Each page-entry also has a context depth, which is used to track
56 pushing and popping of allocation contexts. Only objects allocated
57 in the current (highest-numbered) context may be collected.
59 Page entries are arranged in an array of singly-linked lists. The
60 array is indexed by the allocation size, in bits, of the pages on
61 it; i.e. all pages on a list allocate objects of the same size.
62 Pages are ordered on the list such that all non-full pages precede
63 all full pages, with non-full pages arranged in order of decreasing
66 Empty pages (of all orders) are kept on a single page cache list,
67 and are considered first when new pages are required; they are
68 deallocated at the start of the next collection if they haven't
69 been recycled by then. */
72 /* Define GGC_POISON to poison memory marked unused by the collector. */
75 /* Define GGC_ALWAYS_COLLECT to perform collection every time
76 ggc_collect is invoked. Otherwise, collection is performed only
77 when a significant amount of memory has been allocated since the
79 #undef GGC_ALWAYS_COLLECT
81 /* If ENABLE_CHECKING is defined, enable GGC_POISON and
82 GGC_ALWAYS_COLLECT automatically. */
83 #ifdef ENABLE_CHECKING
85 #define GGC_ALWAYS_COLLECT
88 /* Define GGC_DEBUG_LEVEL to print debugging information.
89 0: No debugging output.
90 1: GC statistics only.
91 2: Page-entry allocations/deallocations as well.
92 3: Object allocations as well.
93 4: Object marks as well. */
94 #define GGC_DEBUG_LEVEL (0)
96 #ifndef HOST_BITS_PER_PTR
97 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 /* Timing information for collect execution goes into here. */
103 /* The "" allocated string. */
106 /* A two-level tree is used to look up the page-entry for a given
107 pointer. Two chunks of the pointer's bits are extracted to index
108 the first and second levels of the tree, as follows:
112 msb +----------------+----+------+------+ lsb
118 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
119 pages are aligned on system page boundaries. The next most
120 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
121 index values in the lookup table, respectively.
123 For 32-bit architectures and the settings below, there are no
124 leftover bits. For architectures with wider pointers, the lookup
125 tree points to a list of pages, which must be scanned to find the
128 #define PAGE_L1_BITS (8)
129 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
130 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
131 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
133 #define LOOKUP_L1(p) \
134 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
136 #define LOOKUP_L2(p) \
137 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
140 /* A page_entry records the status of an allocation page. This
141 structure is dynamically sized to fit the bitmap in_use_p. */
142 typedef struct page_entry
144 /* The next page-entry with objects of the same size, or NULL if
145 this is the last page-entry. */
146 struct page_entry
*next
;
148 /* The number of bytes allocated. (This will always be a multiple
149 of the host system page size.) */
152 /* The address at which the memory is allocated. */
155 /* Saved in-use bit vector for pages that aren't in the topmost
156 context during collection. */
157 unsigned long *save_in_use_p
;
159 /* Context depth of this page. */
160 unsigned char context_depth
;
162 /* The lg of size of objects allocated from this page. */
165 /* The number of free objects remaining on this page. */
166 unsigned short num_free_objects
;
168 /* A likely candidate for the bit position of a free object for the
169 next allocation from this page. */
170 unsigned short next_bit_hint
;
172 /* Saved number of free objects for pages that aren't in the topmost
173 context during colleciton. */
174 unsigned short save_num_free_objects
;
176 /* A bit vector indicating whether or not objects are in use. The
177 Nth bit is one if the Nth object on this page is allocated. This
178 array is dynamically sized. */
179 unsigned long in_use_p
[1];
183 #if HOST_BITS_PER_PTR <= 32
185 /* On 32-bit hosts, we use a two level page table, as pictured above. */
186 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
190 /* On 64-bit hosts, we use the same two level page tables plus a linked
191 list that disambiguates the top 32-bits. There will almost always be
192 exactly one entry in the list. */
193 typedef struct page_table_chain
195 struct page_table_chain
*next
;
197 page_entry
**table
[PAGE_L1_SIZE
];
202 /* The rest of the global variables. */
203 static struct globals
205 /* The Nth element in this array is a page with objects of size 2^N.
206 If there are any pages with free objects, they will be at the
207 head of the list. NULL if there are no page-entries for this
209 page_entry
*pages
[HOST_BITS_PER_PTR
];
211 /* The Nth element in this array is the last page with objects of
212 size 2^N. NULL if there are no page-entries for this object
214 page_entry
*page_tails
[HOST_BITS_PER_PTR
];
216 /* Lookup table for associating allocation pages with object addresses. */
219 /* The system's page size. */
223 /* Bytes currently allocated. */
226 /* Bytes currently allocated at the end of the last collection. */
227 size_t allocated_last_gc
;
229 /* The current depth in the context stack. */
230 unsigned char context_depth
;
232 /* A file descriptor open to /dev/zero for reading. */
233 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
237 /* A cache of free system pages. */
238 page_entry
*free_pages
;
240 /* The file descriptor for debugging output. */
245 /* Compute DIVIDEND / DIVISOR, rounded up. */
246 #define DIV_ROUND_UP(Dividend, Divisor) \
247 ((Dividend + Divisor - 1) / Divisor)
249 /* The number of objects per allocation page, for objects of size
251 #define OBJECTS_PER_PAGE(Order) \
252 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
254 /* The size in bytes required to maintain a bitmap for the objects
256 #define BITMAP_SIZE(Num_objects) \
257 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
259 /* Skip garbage collection if the current allocation is not at least
260 this factor times the allocation at the end of the last collection.
261 In other words, total allocation must expand by (this factor minus
262 one) before collection is performed. */
263 #define GGC_MIN_EXPAND_FOR_GC (1.3)
265 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
266 test from triggering too often when the heap is small. */
267 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
270 static int ggc_allocated_p
PROTO ((const void *));
271 static page_entry
*lookup_page_table_entry
PROTO ((const void *));
272 static void set_page_table_entry
PROTO ((void *, page_entry
*));
273 static char *alloc_anon
PROTO ((char *, size_t));
274 static struct page_entry
* alloc_page
PROTO ((unsigned));
275 static void free_page
PROTO ((struct page_entry
*));
276 static void release_pages
PROTO ((void));
277 static void clear_marks
PROTO ((void));
278 static void sweep_pages
PROTO ((void));
281 static void poison
PROTO ((void *, size_t));
282 static void poison_pages
PROTO ((void));
285 void debug_print_page_list
PROTO ((int));
287 /* Returns non-zero if P was allocated in GC'able memory. */
296 #if HOST_BITS_PER_PTR <= 32
299 page_table table
= G
.lookup
;
300 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
305 if (table
->high_bits
== high_bits
)
309 base
= &table
->table
[0];
312 /* Extract the level 1 and 2 indicies. */
316 return base
[L1
] && base
[L1
][L2
];
319 /* Traverse the page table and find the entry for a page.
320 Die (probably) if the object wasn't allocated via GC. */
322 static inline page_entry
*
323 lookup_page_table_entry(p
)
329 #if HOST_BITS_PER_PTR <= 32
332 page_table table
= G
.lookup
;
333 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
334 while (table
->high_bits
!= high_bits
)
336 base
= &table
->table
[0];
339 /* Extract the level 1 and 2 indicies. */
347 /* Set the page table entry for a page. */
349 set_page_table_entry(p
, entry
)
356 #if HOST_BITS_PER_PTR <= 32
360 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
361 for (table
= G
.lookup
; table
; table
= table
->next
)
362 if (table
->high_bits
== high_bits
)
365 /* Not found -- allocate a new table. */
366 table
= (page_table
) xcalloc (1, sizeof(*table
));
367 table
->next
= G
.lookup
;
368 table
->high_bits
= high_bits
;
371 base
= &table
->table
[0];
374 /* Extract the level 1 and 2 indicies. */
378 if (base
[L1
] == NULL
)
379 base
[L1
] = (page_entry
**) xcalloc (PAGE_L2_SIZE
, sizeof (page_entry
*));
381 base
[L1
][L2
] = entry
;
385 /* Prints the page-entry for object size ORDER, for debugging. */
387 debug_print_page_list (order
)
391 printf ("Head=%p, Tail=%p:\n", G
.pages
[order
], G
.page_tails
[order
]);
395 printf ("%p(%1d|%3d) -> ", p
, p
->context_depth
, p
->num_free_objects
);
403 /* `Poisons' the region of memory starting at START and extending for
410 memset (start
, 0xa5, len
);
414 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
417 alloc_anon (pref
, size
)
418 char *pref ATTRIBUTE_UNUSED
;
425 page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
426 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
428 page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
429 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
431 if (page
== (char *) MAP_FAILED
)
433 fputs ("Virtual memory exhausted!\n", stderr
);
438 page
= (char *) valloc (size
);
441 fputs ("Virtual memory exhausted!\n", stderr
);
444 #endif /* HAVE_VALLOC */
445 #endif /* HAVE_MMAP */
450 /* Allocate a new page for allocating objects of size 2^ORDER,
451 and return an entry for it. The entry is not added to the
452 appropriate page_table list. */
453 static inline struct page_entry
*
457 struct page_entry
*entry
, *p
, **pp
;
461 size_t page_entry_size
;
464 num_objects
= OBJECTS_PER_PAGE (order
);
465 bitmap_size
= BITMAP_SIZE (num_objects
+ 1);
466 page_entry_size
= sizeof (page_entry
) - sizeof (long) + bitmap_size
;
467 entry_size
= num_objects
* (1 << order
);
472 /* Check the list of free pages for one we can use. */
473 for (pp
= &G
.free_pages
, p
= *pp
; p
; pp
= &p
->next
, p
= *pp
)
474 if (p
->bytes
== entry_size
)
479 /* Recycle the allocated memory from this page ... */
482 /* ... and, if possible, the page entry itself. */
483 if (p
->order
== order
)
486 memset (entry
, 0, page_entry_size
);
493 /* Actually allocate the memory, using mmap. */
494 page
= alloc_anon (NULL
, entry_size
);
498 entry
= (struct page_entry
*) xcalloc (1, page_entry_size
);
500 entry
->bytes
= entry_size
;
502 entry
->context_depth
= G
.context_depth
;
503 entry
->order
= order
;
504 entry
->num_free_objects
= num_objects
;
505 entry
->next_bit_hint
= 1;
507 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
508 increment the hint. */
509 entry
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
510 = (unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
);
512 set_page_table_entry (page
, entry
);
514 if (GGC_DEBUG_LEVEL
>= 2)
515 fprintf (G
.debug_file
,
516 "Allocating page at %p, object size=%d, data %p-%p\n", entry
,
517 1 << order
, page
, page
+ entry_size
- 1);
523 /* Free a page when it's no longer needed. */
528 if (GGC_DEBUG_LEVEL
>= 2)
529 fprintf (G
.debug_file
,
530 "Deallocating page at %p, data %p-%p\n", entry
,
531 entry
->page
, entry
->page
+ entry
->bytes
- 1);
533 set_page_table_entry (entry
->page
, NULL
);
535 entry
->next
= G
.free_pages
;
536 G
.free_pages
= entry
;
540 /* Release the page cache to the system. */
545 page_entry
*p
, *next
;
562 /* Gather up adjacent pages so they are unmapped together. */
563 if (p
->page
== start
+ len
)
578 page_entry
*p
, *next
;
580 for (p
= G
.free_pages
; p
; p
= next
)
586 #endif /* HAVE_VALLOC */
587 #endif /* HAVE_MMAP */
593 /* This table provides a fast way to determine ceil(log_2(size)) for
594 allocation requests. The minimum allocation size is four bytes. */
595 static unsigned char const size_lookup
[257] =
597 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
598 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
599 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
600 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
601 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
602 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
603 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
604 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
605 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
607 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
608 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
609 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
610 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
611 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
612 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
616 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
617 memory is zeroed; otherwise, its contents are undefined. */
619 ggc_alloc_obj (size
, zero
)
623 unsigned order
, word
, bit
, object_offset
;
624 struct page_entry
*entry
;
628 order
= size_lookup
[size
];
632 while (size
> ((size_t) 1 << order
))
636 /* If there are non-full pages for this size allocation, they are at
637 the head of the list. */
638 entry
= G
.pages
[order
];
640 /* If there is no page for this object size, or all pages in this
641 context are full, allocate a new page. */
643 || entry
->num_free_objects
== 0
644 || entry
->context_depth
!= G
.context_depth
)
646 struct page_entry
*new_entry
;
647 new_entry
= alloc_page (order
);
649 /* If this is the only entry, it's also the tail. */
651 G
.page_tails
[order
] = new_entry
;
653 /* Put new pages at the head of the page list. */
654 new_entry
->next
= entry
;
656 G
.pages
[order
] = new_entry
;
658 /* For a new page, we know the word and bit positions (in the
659 in_use bitmap) of the first available object -- they're zero. */
660 new_entry
->next_bit_hint
= 1;
667 /* First try to use the hint left from the previous allocation
668 to locate a clear bit in the in-use bitmap. We've made sure
669 that the one-past-the-end bit is always set, so if the hint
670 has run over, this test will fail. */
671 unsigned hint
= entry
->next_bit_hint
;
672 word
= hint
/ HOST_BITS_PER_LONG
;
673 bit
= hint
% HOST_BITS_PER_LONG
;
675 /* If the hint didn't work, scan the bitmap from the beginning. */
676 if ((entry
->in_use_p
[word
] >> bit
) & 1)
679 while (~entry
->in_use_p
[word
] == 0)
681 while ((entry
->in_use_p
[word
] >> bit
) & 1)
683 hint
= word
* HOST_BITS_PER_LONG
+ bit
;
686 /* Next time, try the next bit. */
687 entry
->next_bit_hint
= hint
+ 1;
689 object_offset
= hint
<< order
;
692 /* Set the in-use bit. */
693 entry
->in_use_p
[word
] |= ((unsigned long) 1 << bit
);
695 /* Keep a running total of the number of free objects. If this page
696 fills up, we may have to move it to the end of the list if the
697 next page isn't full. If the next page is full, all subsequent
698 pages are full, so there's no need to move it. */
699 if (--entry
->num_free_objects
== 0
700 && entry
->next
!= NULL
701 && entry
->next
->num_free_objects
> 0)
703 G
.pages
[order
] = entry
->next
;
705 G
.page_tails
[order
]->next
= entry
;
706 G
.page_tails
[order
] = entry
;
709 /* Calculate the object's address. */
710 result
= entry
->page
+ object_offset
;
713 /* `Poison' the entire allocated object before zeroing the requested area,
714 so that bytes beyond the end, if any, will not necessarily be zero. */
715 poison (result
, 1 << order
);
718 memset (result
, 0, size
);
720 /* Keep track of how many bytes are being allocated. This
721 information is used in deciding when to collect. */
722 G
.allocated
+= (size_t) 1 << order
;
724 if (GGC_DEBUG_LEVEL
>= 3)
725 fprintf (G
.debug_file
,
726 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
727 (int) size
, 1 << order
, result
, entry
);
733 /* If P is not marked, marks it and returns 0. Otherwise returns 1.
734 P must have been allocated by the GC allocator; it mustn't point to
735 static objects, stack variables, or memory allocated with malloc. */
744 /* Look up the page on which the object is alloced. If the object
745 wasn't allocated by the collector, we'll probably die. */
746 entry
= lookup_page_table_entry (p
);
747 #ifdef ENABLE_CHECKING
752 /* Calculate the index of the object on the page; this is its bit
753 position in the in_use_p bitmap. */
754 bit
= (((char *) p
) - entry
->page
) >> entry
->order
;
755 word
= bit
/ HOST_BITS_PER_LONG
;
756 mask
= (unsigned long) 1 << (bit
% HOST_BITS_PER_LONG
);
758 /* If the bit was previously set, skip it. */
759 if (entry
->in_use_p
[word
] & mask
)
762 /* Otherwise set it, and decrement the free object count. */
763 entry
->in_use_p
[word
] |= mask
;
764 entry
->num_free_objects
-= 1;
766 G
.allocated
+= (size_t) 1 << entry
->order
;
768 if (GGC_DEBUG_LEVEL
>= 4)
769 fprintf (G
.debug_file
, "Marking %p\n", p
);
775 ggc_mark_if_gcable (p
)
778 if (p
&& ggc_allocated_p (p
))
782 /* Initialize the ggc-mmap allocator. */
786 G
.pagesize
= getpagesize();
787 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
789 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
790 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
791 if (G
.dev_zero_fd
== -1)
796 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
798 G
.debug_file
= stdout
;
801 G
.allocated_last_gc
= GGC_MIN_LAST_ALLOCATED
;
804 /* StunOS has an amazing off-by-one error for the first mmap allocation
805 after fiddling with RLIMIT_STACK. The result, as hard as it is to
806 believe, is an unaligned page allocation, which would cause us to
807 hork badly if we tried to use it. */
809 char *p
= alloc_anon (NULL
, G
.pagesize
);
810 if ((size_t)p
& (G
.pagesize
- 1))
812 /* How losing. Discard this one and try another. If we still
813 can't get something useful, give up. */
815 p
= alloc_anon (NULL
, G
.pagesize
);
816 if ((size_t)p
& (G
.pagesize
- 1))
819 munmap (p
, G
.pagesize
);
823 empty_string
= ggc_alloc_string ("", 0);
824 ggc_add_string_root (&empty_string
, 1);
834 if (G
.context_depth
== 0)
842 unsigned order
, depth
;
844 depth
= --G
.context_depth
;
846 /* Any remaining pages in the popped context are lowered to the new
847 current context; i.e. objects allocated in the popped context and
848 left over are imported into the previous context. */
849 for (order
= 2; order
< HOST_BITS_PER_PTR
; order
++)
851 size_t num_objects
= OBJECTS_PER_PAGE (order
);
852 size_t bitmap_size
= BITMAP_SIZE (num_objects
);
856 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
858 if (p
->context_depth
> depth
)
860 p
->context_depth
= depth
;
863 /* If this page is now in the topmost context, and we'd
864 saved its allocation state, restore it. */
865 else if (p
->context_depth
== depth
&& p
->save_in_use_p
)
867 memcpy (p
->in_use_p
, p
->save_in_use_p
, bitmap_size
);
868 free (p
->save_in_use_p
);
869 p
->save_in_use_p
= 0;
870 p
->num_free_objects
= p
->save_num_free_objects
;
881 for (order
= 2; order
< HOST_BITS_PER_PTR
; order
++)
883 size_t num_objects
= OBJECTS_PER_PAGE (order
);
884 size_t bitmap_size
= BITMAP_SIZE (num_objects
);
887 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
889 #ifdef ENABLE_CHECKING
890 /* The data should be page-aligned. */
891 if ((size_t) p
->page
& (G
.pagesize
- 1))
895 /* Pages that aren't in the topmost context are not collected;
896 nevertheless, we need their in-use bit vectors to store GC
897 marks. So, back them up first. */
898 if (p
->context_depth
< G
.context_depth
899 && ! p
->save_in_use_p
)
901 p
->save_in_use_p
= xmalloc (bitmap_size
);
902 memcpy (p
->save_in_use_p
, p
->in_use_p
, bitmap_size
);
903 p
->save_num_free_objects
= p
->num_free_objects
;
906 /* Reset reset the number of free objects and clear the
907 in-use bits. These will be adjusted by mark_obj. */
908 p
->num_free_objects
= num_objects
;
909 memset (p
->in_use_p
, 0, bitmap_size
);
911 /* Make sure the one-past-the-end bit is always set. */
912 p
->in_use_p
[num_objects
/ HOST_BITS_PER_LONG
]
913 = ((unsigned long) 1 << (num_objects
% HOST_BITS_PER_LONG
));
923 for (order
= 2; order
< HOST_BITS_PER_PTR
; order
++)
925 /* The last page-entry to consider, regardless of entries
926 placed at the end of the list. */
927 page_entry
* const last
= G
.page_tails
[order
];
929 size_t num_objects
= OBJECTS_PER_PAGE (order
);
930 page_entry
*p
, *previous
;
940 page_entry
*next
= p
->next
;
942 /* Loop until all entries have been examined. */
945 /* Only objects on pages in the topmost context should get
947 if (p
->context_depth
< G
.context_depth
)
950 /* Remove the page if it's empty. */
951 else if (p
->num_free_objects
== num_objects
)
954 G
.pages
[order
] = next
;
956 previous
->next
= next
;
958 /* Are we removing the last element? */
959 if (p
== G
.page_tails
[order
])
960 G
.page_tails
[order
] = previous
;
965 /* If the page is full, move it to the end. */
966 else if (p
->num_free_objects
== 0)
968 /* Don't move it if it's already at the end. */
969 if (p
!= G
.page_tails
[order
])
971 /* Move p to the end of the list. */
973 G
.page_tails
[order
]->next
= p
;
975 /* Update the tail pointer... */
976 G
.page_tails
[order
] = p
;
978 /* ... and the head pointer, if necessary. */
980 G
.pages
[order
] = next
;
982 previous
->next
= next
;
987 /* If we've fallen through to here, it's a page in the
988 topmost context that is neither full nor empty. Such a
989 page must precede pages at lesser context depth in the
990 list, so move it to the head. */
991 else if (p
!= G
.pages
[order
])
993 previous
->next
= p
->next
;
994 p
->next
= G
.pages
[order
];
996 /* Are we moving the last element? */
997 if (G
.page_tails
[order
] == p
)
998 G
.page_tails
[order
] = previous
;
1015 for (order
= 2; order
< HOST_BITS_PER_PTR
; order
++)
1017 size_t num_objects
= OBJECTS_PER_PAGE (order
);
1018 size_t size
= (size_t) 1 << order
;
1021 for (p
= G
.pages
[order
]; p
!= NULL
; p
= p
->next
)
1024 for (i
= 0; i
< num_objects
; i
++)
1027 word
= i
/ HOST_BITS_PER_LONG
;
1028 bit
= i
% HOST_BITS_PER_LONG
;
1029 if (((p
->in_use_p
[word
] >> bit
) & 1) == 0)
1030 poison (p
->page
+ i
* size
, size
);
1042 /* Avoid frequent unnecessary work by skipping collection if the
1043 total allocations haven't expanded much since the last
1045 #ifndef GGC_ALWAYS_COLLECT
1046 if (G
.allocated
< GGC_MIN_EXPAND_FOR_GC
* G
.allocated_last_gc
)
1050 time
= get_run_time ();
1052 fprintf (stderr
, " {GC %luk -> ", (unsigned long)G
.allocated
/ 1024);
1054 /* Zero the total allocated bytes. We'll reaccumulate this while
1058 /* Release the pages we freed the last time we collected, but didn't
1059 reuse in the interim. */
1070 G
.allocated_last_gc
= G
.allocated
;
1071 if (G
.allocated_last_gc
< GGC_MIN_LAST_ALLOCATED
)
1072 G
.allocated_last_gc
= GGC_MIN_LAST_ALLOCATED
;
1074 time
= get_run_time () - time
;
1079 fprintf (stderr
, "%luk in %.3f}",
1080 (unsigned long) G
.allocated
/ 1024, time
* 1e-6);