* ggc-page.c (poison_pages): Don't be overzealous.
[official-gcc.git] / gcc / ggc-page.c
blobf034a4fb47ff03d4cc5d9c37de832b99011e01ce
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "varray.h"
27 #include "flags.h"
28 #include "ggc.h"
30 #ifdef HAVE_MMAP
31 #include <sys/mman.h>
32 #endif
34 #ifndef MAP_FAILED
35 #define MAP_FAILED -1
36 #endif
38 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
39 #define MAP_ANONYMOUS MAP_ANON
40 #endif
42 /* Stategy:
44 This garbage-collecting allocator allocates objects on one of a set
45 of pages. Each page can allocate objects of a single size only;
46 available sizes are powers of two starting at four bytes. The size
47 of an allocation request is rounded up to the next power of two
48 (`order'), and satisfied from the appropriate page.
50 Each page is recorded in a page-entry, which also maintains an
51 in-use bitmap of object positions on the page. This allows the
52 allocation state of a particular object to be flipped without
53 touching the page itself.
55 Each page-entry also has a context depth, which is used to track
56 pushing and popping of allocation contexts. Only objects allocated
57 in the current (highest-numbered) context may be collected.
59 Page entries are arranged in an array of singly-linked lists. The
60 array is indexed by the allocation size, in bits, of the pages on
61 it; i.e. all pages on a list allocate objects of the same size.
62 Pages are ordered on the list such that all non-full pages precede
63 all full pages, with non-full pages arranged in order of decreasing
64 context depth.
66 Empty pages (of all orders) are kept on a single page cache list,
67 and are considered first when new pages are required; they are
68 deallocated at the start of the next collection if they haven't
69 been recycled by then. */
72 /* Define GGC_POISON to poison memory marked unused by the collector. */
73 #undef GGC_POISON
75 /* Define GGC_ALWAYS_COLLECT to perform collection every time
76 ggc_collect is invoked. Otherwise, collection is performed only
77 when a significant amount of memory has been allocated since the
78 last collection. */
79 #undef GGC_ALWAYS_COLLECT
81 /* If ENABLE_CHECKING is defined, enable GGC_POISON and
82 GGC_ALWAYS_COLLECT automatically. */
83 #ifdef ENABLE_CHECKING
84 #define GGC_POISON
85 #define GGC_ALWAYS_COLLECT
86 #endif
88 /* Define GGC_DEBUG_LEVEL to print debugging information.
89 0: No debugging output.
90 1: GC statistics only.
91 2: Page-entry allocations/deallocations as well.
92 3: Object allocations as well.
93 4: Object marks as well. */
94 #define GGC_DEBUG_LEVEL (0)
96 #ifndef HOST_BITS_PER_PTR
97 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
98 #endif
100 /* Timing information for collect execution goes into here. */
101 extern int gc_time;
103 /* The "" allocated string. */
104 char *empty_string;
106 /* A two-level tree is used to look up the page-entry for a given
107 pointer. Two chunks of the pointer's bits are extracted to index
108 the first and second levels of the tree, as follows:
110 HOST_PAGE_SIZE_BITS
111 32 | |
112 msb +----------------+----+------+------+ lsb
113 | | |
114 PAGE_L1_BITS |
116 PAGE_L2_BITS
118 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
119 pages are aligned on system page boundaries. The next most
120 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
121 index values in the lookup table, respectively.
123 For 32-bit architectures and the settings below, there are no
124 leftover bits. For architectures with wider pointers, the lookup
125 tree points to a list of pages, which must be scanned to find the
126 correct one. */
128 #define PAGE_L1_BITS (8)
129 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
130 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
131 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
133 #define LOOKUP_L1(p) \
134 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
136 #define LOOKUP_L2(p) \
137 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
140 /* A page_entry records the status of an allocation page. This
141 structure is dynamically sized to fit the bitmap in_use_p. */
142 typedef struct page_entry
144 /* The next page-entry with objects of the same size, or NULL if
145 this is the last page-entry. */
146 struct page_entry *next;
148 /* The number of bytes allocated. (This will always be a multiple
149 of the host system page size.) */
150 size_t bytes;
152 /* The address at which the memory is allocated. */
153 char *page;
155 /* Saved in-use bit vector for pages that aren't in the topmost
156 context during collection. */
157 unsigned long *save_in_use_p;
159 /* Context depth of this page. */
160 unsigned char context_depth;
162 /* The lg of size of objects allocated from this page. */
163 unsigned char order;
165 /* The number of free objects remaining on this page. */
166 unsigned short num_free_objects;
168 /* A likely candidate for the bit position of a free object for the
169 next allocation from this page. */
170 unsigned short next_bit_hint;
172 /* Saved number of free objects for pages that aren't in the topmost
173 context during colleciton. */
174 unsigned short save_num_free_objects;
176 /* A bit vector indicating whether or not objects are in use. The
177 Nth bit is one if the Nth object on this page is allocated. This
178 array is dynamically sized. */
179 unsigned long in_use_p[1];
180 } page_entry;
183 #if HOST_BITS_PER_PTR <= 32
185 /* On 32-bit hosts, we use a two level page table, as pictured above. */
186 typedef page_entry **page_table[PAGE_L1_SIZE];
188 #else
190 /* On 64-bit hosts, we use the same two level page tables plus a linked
191 list that disambiguates the top 32-bits. There will almost always be
192 exactly one entry in the list. */
193 typedef struct page_table_chain
195 struct page_table_chain *next;
196 size_t high_bits;
197 page_entry **table[PAGE_L1_SIZE];
198 } *page_table;
200 #endif
202 /* The rest of the global variables. */
203 static struct globals
205 /* The Nth element in this array is a page with objects of size 2^N.
206 If there are any pages with free objects, they will be at the
207 head of the list. NULL if there are no page-entries for this
208 object size. */
209 page_entry *pages[HOST_BITS_PER_PTR];
211 /* The Nth element in this array is the last page with objects of
212 size 2^N. NULL if there are no page-entries for this object
213 size. */
214 page_entry *page_tails[HOST_BITS_PER_PTR];
216 /* Lookup table for associating allocation pages with object addresses. */
217 page_table lookup;
219 /* The system's page size. */
220 size_t pagesize;
221 size_t lg_pagesize;
223 /* Bytes currently allocated. */
224 size_t allocated;
226 /* Bytes currently allocated at the end of the last collection. */
227 size_t allocated_last_gc;
229 /* The current depth in the context stack. */
230 unsigned char context_depth;
232 /* A file descriptor open to /dev/zero for reading. */
233 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
234 int dev_zero_fd;
235 #endif
237 /* A cache of free system pages. */
238 page_entry *free_pages;
240 /* The file descriptor for debugging output. */
241 FILE *debug_file;
242 } G;
245 /* Compute DIVIDEND / DIVISOR, rounded up. */
246 #define DIV_ROUND_UP(Dividend, Divisor) \
247 ((Dividend + Divisor - 1) / Divisor)
249 /* The number of objects per allocation page, for objects of size
250 2^ORDER. */
251 #define OBJECTS_PER_PAGE(Order) \
252 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
254 /* The size in bytes required to maintain a bitmap for the objects
255 on a page-entry. */
256 #define BITMAP_SIZE(Num_objects) \
257 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
259 /* Skip garbage collection if the current allocation is not at least
260 this factor times the allocation at the end of the last collection.
261 In other words, total allocation must expand by (this factor minus
262 one) before collection is performed. */
263 #define GGC_MIN_EXPAND_FOR_GC (1.3)
265 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
266 test from triggering too often when the heap is small. */
267 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
270 static int ggc_allocated_p PROTO ((const void *));
271 static page_entry *lookup_page_table_entry PROTO ((const void *));
272 static void set_page_table_entry PROTO ((void *, page_entry *));
273 static char *alloc_anon PROTO ((char *, size_t));
274 static struct page_entry * alloc_page PROTO ((unsigned));
275 static void free_page PROTO ((struct page_entry *));
276 static void release_pages PROTO ((void));
277 static void clear_marks PROTO ((void));
278 static void sweep_pages PROTO ((void));
280 #ifdef GGC_POISON
281 static void poison PROTO ((void *, size_t));
282 static void poison_pages PROTO ((void));
283 #endif
285 void debug_print_page_list PROTO ((int));
287 /* Returns non-zero if P was allocated in GC'able memory. */
289 static inline int
290 ggc_allocated_p (p)
291 const void *p;
293 page_entry ***base;
294 size_t L1, L2;
296 #if HOST_BITS_PER_PTR <= 32
297 base = &G.lookup[0];
298 #else
299 page_table table = G.lookup;
300 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
301 while (1)
303 if (table == NULL)
304 return 0;
305 if (table->high_bits == high_bits)
306 break;
307 table = table->next;
309 base = &table->table[0];
310 #endif
312 /* Extract the level 1 and 2 indicies. */
313 L1 = LOOKUP_L1 (p);
314 L2 = LOOKUP_L2 (p);
316 return base[L1] && base[L1][L2];
319 /* Traverse the page table and find the entry for a page.
320 Die (probably) if the object wasn't allocated via GC. */
322 static inline page_entry *
323 lookup_page_table_entry(p)
324 const void *p;
326 page_entry ***base;
327 size_t L1, L2;
329 #if HOST_BITS_PER_PTR <= 32
330 base = &G.lookup[0];
331 #else
332 page_table table = G.lookup;
333 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
334 while (table->high_bits != high_bits)
335 table = table->next;
336 base = &table->table[0];
337 #endif
339 /* Extract the level 1 and 2 indicies. */
340 L1 = LOOKUP_L1 (p);
341 L2 = LOOKUP_L2 (p);
343 return base[L1][L2];
347 /* Set the page table entry for a page. */
348 static void
349 set_page_table_entry(p, entry)
350 void *p;
351 page_entry *entry;
353 page_entry ***base;
354 size_t L1, L2;
356 #if HOST_BITS_PER_PTR <= 32
357 base = &G.lookup[0];
358 #else
359 page_table table;
360 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
361 for (table = G.lookup; table; table = table->next)
362 if (table->high_bits == high_bits)
363 goto found;
365 /* Not found -- allocate a new table. */
366 table = (page_table) xcalloc (1, sizeof(*table));
367 table->next = G.lookup;
368 table->high_bits = high_bits;
369 G.lookup = table;
370 found:
371 base = &table->table[0];
372 #endif
374 /* Extract the level 1 and 2 indicies. */
375 L1 = LOOKUP_L1 (p);
376 L2 = LOOKUP_L2 (p);
378 if (base[L1] == NULL)
379 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
381 base[L1][L2] = entry;
385 /* Prints the page-entry for object size ORDER, for debugging. */
386 void
387 debug_print_page_list (order)
388 int order;
390 page_entry *p;
391 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
392 p = G.pages[order];
393 while (p != NULL)
395 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
396 p = p->next;
398 printf ("NULL\n");
399 fflush (stdout);
402 #ifdef GGC_POISON
403 /* `Poisons' the region of memory starting at START and extending for
404 LEN bytes. */
405 static inline void
406 poison (start, len)
407 void *start;
408 size_t len;
410 memset (start, 0xa5, len);
412 #endif
414 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
415 (if non-null). */
416 static inline char *
417 alloc_anon (pref, size)
418 char *pref ATTRIBUTE_UNUSED;
419 size_t size;
421 char *page;
423 #ifdef HAVE_MMAP
424 #ifdef MAP_ANONYMOUS
425 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
426 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
427 #else
428 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
429 MAP_PRIVATE, G.dev_zero_fd, 0);
430 #endif
431 if (page == (char *) MAP_FAILED)
433 fputs ("Virtual memory exhausted!\n", stderr);
434 exit(1);
436 #else
437 #ifdef HAVE_VALLOC
438 page = (char *) valloc (size);
439 if (!page)
441 fputs ("Virtual memory exhausted!\n", stderr);
442 exit(1);
444 #endif /* HAVE_VALLOC */
445 #endif /* HAVE_MMAP */
447 return page;
450 /* Allocate a new page for allocating objects of size 2^ORDER,
451 and return an entry for it. The entry is not added to the
452 appropriate page_table list. */
453 static inline struct page_entry *
454 alloc_page (order)
455 unsigned order;
457 struct page_entry *entry, *p, **pp;
458 char *page;
459 size_t num_objects;
460 size_t bitmap_size;
461 size_t page_entry_size;
462 size_t entry_size;
464 num_objects = OBJECTS_PER_PAGE (order);
465 bitmap_size = BITMAP_SIZE (num_objects + 1);
466 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
467 entry_size = num_objects * (1 << order);
469 entry = NULL;
470 page = NULL;
472 /* Check the list of free pages for one we can use. */
473 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
474 if (p->bytes == entry_size)
475 break;
477 if (p != NULL)
479 /* Recycle the allocated memory from this page ... */
480 *pp = p->next;
481 page = p->page;
482 /* ... and, if possible, the page entry itself. */
483 if (p->order == order)
485 entry = p;
486 memset (entry, 0, page_entry_size);
488 else
489 free (p);
491 else
493 /* Actually allocate the memory, using mmap. */
494 page = alloc_anon (NULL, entry_size);
497 if (entry == NULL)
498 entry = (struct page_entry *) xcalloc (1, page_entry_size);
500 entry->bytes = entry_size;
501 entry->page = page;
502 entry->context_depth = G.context_depth;
503 entry->order = order;
504 entry->num_free_objects = num_objects;
505 entry->next_bit_hint = 1;
507 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
508 increment the hint. */
509 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
510 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
512 set_page_table_entry (page, entry);
514 if (GGC_DEBUG_LEVEL >= 2)
515 fprintf (G.debug_file,
516 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
517 1 << order, page, page + entry_size - 1);
519 return entry;
523 /* Free a page when it's no longer needed. */
524 static inline void
525 free_page (entry)
526 page_entry *entry;
528 if (GGC_DEBUG_LEVEL >= 2)
529 fprintf (G.debug_file,
530 "Deallocating page at %p, data %p-%p\n", entry,
531 entry->page, entry->page + entry->bytes - 1);
533 set_page_table_entry (entry->page, NULL);
535 entry->next = G.free_pages;
536 G.free_pages = entry;
540 /* Release the page cache to the system. */
541 static inline void
542 release_pages ()
544 #ifdef HAVE_MMAP
545 page_entry *p, *next;
546 char *start;
547 size_t len;
549 p = G.free_pages;
550 if (p == NULL)
551 return;
553 next = p->next;
554 start = p->page;
555 len = p->bytes;
556 free (p);
557 p = next;
559 while (p)
561 next = p->next;
562 /* Gather up adjacent pages so they are unmapped together. */
563 if (p->page == start + len)
564 len += p->bytes;
565 else
567 munmap (start, len);
568 start = p->page;
569 len = p->bytes;
571 free (p);
572 p = next;
575 munmap (start, len);
576 #else
577 #ifdef HAVE_VALLOC
578 page_entry *p, *next;
580 for (p = G.free_pages; p ; p = next)
582 next = p->next;
583 free (p->page);
584 free (p);
586 #endif /* HAVE_VALLOC */
587 #endif /* HAVE_MMAP */
589 G.free_pages = NULL;
593 /* This table provides a fast way to determine ceil(log_2(size)) for
594 allocation requests. The minimum allocation size is four bytes. */
595 static unsigned char const size_lookup[257] =
597 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
598 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
599 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
600 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
601 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
602 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
603 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
604 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
605 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
607 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
608 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
609 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
610 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
611 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
612 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
616 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
617 memory is zeroed; otherwise, its contents are undefined. */
618 void *
619 ggc_alloc_obj (size, zero)
620 size_t size;
621 int zero;
623 unsigned order, word, bit, object_offset;
624 struct page_entry *entry;
625 void *result;
627 if (size <= 256)
628 order = size_lookup[size];
629 else
631 order = 9;
632 while (size > ((size_t) 1 << order))
633 order++;
636 /* If there are non-full pages for this size allocation, they are at
637 the head of the list. */
638 entry = G.pages[order];
640 /* If there is no page for this object size, or all pages in this
641 context are full, allocate a new page. */
642 if (entry == NULL
643 || entry->num_free_objects == 0
644 || entry->context_depth != G.context_depth)
646 struct page_entry *new_entry;
647 new_entry = alloc_page (order);
649 /* If this is the only entry, it's also the tail. */
650 if (entry == NULL)
651 G.page_tails[order] = new_entry;
653 /* Put new pages at the head of the page list. */
654 new_entry->next = entry;
655 entry = new_entry;
656 G.pages[order] = new_entry;
658 /* For a new page, we know the word and bit positions (in the
659 in_use bitmap) of the first available object -- they're zero. */
660 new_entry->next_bit_hint = 1;
661 word = 0;
662 bit = 0;
663 object_offset = 0;
665 else
667 /* First try to use the hint left from the previous allocation
668 to locate a clear bit in the in-use bitmap. We've made sure
669 that the one-past-the-end bit is always set, so if the hint
670 has run over, this test will fail. */
671 unsigned hint = entry->next_bit_hint;
672 word = hint / HOST_BITS_PER_LONG;
673 bit = hint % HOST_BITS_PER_LONG;
675 /* If the hint didn't work, scan the bitmap from the beginning. */
676 if ((entry->in_use_p[word] >> bit) & 1)
678 word = bit = 0;
679 while (~entry->in_use_p[word] == 0)
680 ++word;
681 while ((entry->in_use_p[word] >> bit) & 1)
682 ++bit;
683 hint = word * HOST_BITS_PER_LONG + bit;
686 /* Next time, try the next bit. */
687 entry->next_bit_hint = hint + 1;
689 object_offset = hint << order;
692 /* Set the in-use bit. */
693 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
695 /* Keep a running total of the number of free objects. If this page
696 fills up, we may have to move it to the end of the list if the
697 next page isn't full. If the next page is full, all subsequent
698 pages are full, so there's no need to move it. */
699 if (--entry->num_free_objects == 0
700 && entry->next != NULL
701 && entry->next->num_free_objects > 0)
703 G.pages[order] = entry->next;
704 entry->next = NULL;
705 G.page_tails[order]->next = entry;
706 G.page_tails[order] = entry;
709 /* Calculate the object's address. */
710 result = entry->page + object_offset;
712 #ifdef GGC_POISON
713 /* `Poison' the entire allocated object before zeroing the requested area,
714 so that bytes beyond the end, if any, will not necessarily be zero. */
715 poison (result, 1 << order);
716 #endif
717 if (zero)
718 memset (result, 0, size);
720 /* Keep track of how many bytes are being allocated. This
721 information is used in deciding when to collect. */
722 G.allocated += (size_t) 1 << order;
724 if (GGC_DEBUG_LEVEL >= 3)
725 fprintf (G.debug_file,
726 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
727 (int) size, 1 << order, result, entry);
729 return result;
733 /* If P is not marked, marks it and returns 0. Otherwise returns 1.
734 P must have been allocated by the GC allocator; it mustn't point to
735 static objects, stack variables, or memory allocated with malloc. */
737 ggc_set_mark (p)
738 void *p;
740 page_entry *entry;
741 unsigned bit, word;
742 unsigned long mask;
744 /* Look up the page on which the object is alloced. If the object
745 wasn't allocated by the collector, we'll probably die. */
746 entry = lookup_page_table_entry (p);
747 #ifdef ENABLE_CHECKING
748 if (entry == NULL)
749 abort ();
750 #endif
752 /* Calculate the index of the object on the page; this is its bit
753 position in the in_use_p bitmap. */
754 bit = (((char *) p) - entry->page) >> entry->order;
755 word = bit / HOST_BITS_PER_LONG;
756 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
758 /* If the bit was previously set, skip it. */
759 if (entry->in_use_p[word] & mask)
760 return 1;
762 /* Otherwise set it, and decrement the free object count. */
763 entry->in_use_p[word] |= mask;
764 entry->num_free_objects -= 1;
766 G.allocated += (size_t) 1 << entry->order;
768 if (GGC_DEBUG_LEVEL >= 4)
769 fprintf (G.debug_file, "Marking %p\n", p);
771 return 0;
774 void
775 ggc_mark_if_gcable (p)
776 void *p;
778 if (p && ggc_allocated_p (p))
779 ggc_set_mark (p);
782 /* Initialize the ggc-mmap allocator. */
783 void
784 init_ggc ()
786 G.pagesize = getpagesize();
787 G.lg_pagesize = exact_log2 (G.pagesize);
789 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
790 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
791 if (G.dev_zero_fd == -1)
792 abort ();
793 #endif
795 #if 0
796 G.debug_file = fopen ("ggc-mmap.debug", "w");
797 #else
798 G.debug_file = stdout;
799 #endif
801 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
803 #ifdef HAVE_MMAP
804 /* StunOS has an amazing off-by-one error for the first mmap allocation
805 after fiddling with RLIMIT_STACK. The result, as hard as it is to
806 believe, is an unaligned page allocation, which would cause us to
807 hork badly if we tried to use it. */
809 char *p = alloc_anon (NULL, G.pagesize);
810 if ((size_t)p & (G.pagesize - 1))
812 /* How losing. Discard this one and try another. If we still
813 can't get something useful, give up. */
815 p = alloc_anon (NULL, G.pagesize);
816 if ((size_t)p & (G.pagesize - 1))
817 abort ();
819 munmap (p, G.pagesize);
821 #endif
823 empty_string = ggc_alloc_string ("", 0);
824 ggc_add_string_root (&empty_string, 1);
828 void
829 ggc_push_context ()
831 ++G.context_depth;
833 /* Die on wrap. */
834 if (G.context_depth == 0)
835 abort ();
839 void
840 ggc_pop_context ()
842 unsigned order, depth;
844 depth = --G.context_depth;
846 /* Any remaining pages in the popped context are lowered to the new
847 current context; i.e. objects allocated in the popped context and
848 left over are imported into the previous context. */
849 for (order = 2; order < HOST_BITS_PER_PTR; order++)
851 size_t num_objects = OBJECTS_PER_PAGE (order);
852 size_t bitmap_size = BITMAP_SIZE (num_objects);
854 page_entry *p;
856 for (p = G.pages[order]; p != NULL; p = p->next)
858 if (p->context_depth > depth)
860 p->context_depth = depth;
863 /* If this page is now in the topmost context, and we'd
864 saved its allocation state, restore it. */
865 else if (p->context_depth == depth && p->save_in_use_p)
867 memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
868 free (p->save_in_use_p);
869 p->save_in_use_p = 0;
870 p->num_free_objects = p->save_num_free_objects;
876 static inline void
877 clear_marks ()
879 unsigned order;
881 for (order = 2; order < HOST_BITS_PER_PTR; order++)
883 size_t num_objects = OBJECTS_PER_PAGE (order);
884 size_t bitmap_size = BITMAP_SIZE (num_objects);
885 page_entry *p;
887 for (p = G.pages[order]; p != NULL; p = p->next)
889 #ifdef ENABLE_CHECKING
890 /* The data should be page-aligned. */
891 if ((size_t) p->page & (G.pagesize - 1))
892 abort ();
893 #endif
895 /* Pages that aren't in the topmost context are not collected;
896 nevertheless, we need their in-use bit vectors to store GC
897 marks. So, back them up first. */
898 if (p->context_depth < G.context_depth
899 && ! p->save_in_use_p)
901 p->save_in_use_p = xmalloc (bitmap_size);
902 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
903 p->save_num_free_objects = p->num_free_objects;
906 /* Reset reset the number of free objects and clear the
907 in-use bits. These will be adjusted by mark_obj. */
908 p->num_free_objects = num_objects;
909 memset (p->in_use_p, 0, bitmap_size);
911 /* Make sure the one-past-the-end bit is always set. */
912 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
913 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
918 static inline void
919 sweep_pages ()
921 unsigned order;
923 for (order = 2; order < HOST_BITS_PER_PTR; order++)
925 /* The last page-entry to consider, regardless of entries
926 placed at the end of the list. */
927 page_entry * const last = G.page_tails[order];
929 size_t num_objects = OBJECTS_PER_PAGE (order);
930 page_entry *p, *previous;
931 int done;
933 p = G.pages[order];
934 if (p == NULL)
935 continue;
937 previous = NULL;
940 page_entry *next = p->next;
942 /* Loop until all entries have been examined. */
943 done = (p == last);
945 /* Only objects on pages in the topmost context should get
946 collected. */
947 if (p->context_depth < G.context_depth)
950 /* Remove the page if it's empty. */
951 else if (p->num_free_objects == num_objects)
953 if (! previous)
954 G.pages[order] = next;
955 else
956 previous->next = next;
958 /* Are we removing the last element? */
959 if (p == G.page_tails[order])
960 G.page_tails[order] = previous;
961 free_page (p);
962 p = previous;
965 /* If the page is full, move it to the end. */
966 else if (p->num_free_objects == 0)
968 /* Don't move it if it's already at the end. */
969 if (p != G.page_tails[order])
971 /* Move p to the end of the list. */
972 p->next = NULL;
973 G.page_tails[order]->next = p;
975 /* Update the tail pointer... */
976 G.page_tails[order] = p;
978 /* ... and the head pointer, if necessary. */
979 if (! previous)
980 G.pages[order] = next;
981 else
982 previous->next = next;
983 p = previous;
987 /* If we've fallen through to here, it's a page in the
988 topmost context that is neither full nor empty. Such a
989 page must precede pages at lesser context depth in the
990 list, so move it to the head. */
991 else if (p != G.pages[order])
993 previous->next = p->next;
994 p->next = G.pages[order];
995 G.pages[order] = p;
996 /* Are we moving the last element? */
997 if (G.page_tails[order] == p)
998 G.page_tails[order] = previous;
999 p = previous;
1002 previous = p;
1003 p = next;
1005 while (! done);
1009 #ifdef GGC_POISON
1010 static inline void
1011 poison_pages ()
1013 unsigned order;
1015 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1017 size_t num_objects = OBJECTS_PER_PAGE (order);
1018 size_t size = (size_t) 1 << order;
1019 page_entry *p;
1021 for (p = G.pages[order]; p != NULL; p = p->next)
1023 size_t i;
1025 if (p->context_depth != G.context_depth)
1026 /* Since we don't do any collection for pages in pushed
1027 contexts, there's no need to do any poisoning. And
1028 besides, the IN_USE_P array isn't valid until we pop
1029 contexts. */
1030 continue;
1032 for (i = 0; i < num_objects; i++)
1034 size_t word, bit;
1035 word = i / HOST_BITS_PER_LONG;
1036 bit = i % HOST_BITS_PER_LONG;
1037 if (((p->in_use_p[word] >> bit) & 1) == 0)
1038 poison (p->page + i * size, size);
1043 #endif
1045 void
1046 ggc_collect ()
1048 int time;
1050 /* Avoid frequent unnecessary work by skipping collection if the
1051 total allocations haven't expanded much since the last
1052 collection. */
1053 #ifndef GGC_ALWAYS_COLLECT
1054 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1055 return;
1056 #endif
1058 time = get_run_time ();
1059 if (!quiet_flag)
1060 fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
1062 /* Zero the total allocated bytes. We'll reaccumulate this while
1063 marking. */
1064 G.allocated = 0;
1066 /* Release the pages we freed the last time we collected, but didn't
1067 reuse in the interim. */
1068 release_pages ();
1070 clear_marks ();
1071 ggc_mark_roots ();
1072 sweep_pages ();
1074 #ifdef GGC_POISON
1075 poison_pages ();
1076 #endif
1078 G.allocated_last_gc = G.allocated;
1079 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1080 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1082 time = get_run_time () - time;
1083 gc_time += time;
1085 if (!quiet_flag)
1087 fprintf (stderr, "%luk in %.3f}",
1088 (unsigned long) G.allocated / 1024, time * 1e-6);