* rtl.h (rtunion_def): Constify member `rtstr'.
[official-gcc.git] / gcc / ggc-page.c
bloba7490f3cad15f04dac7988c7b500829282a0f39f
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "varray.h"
27 #include "flags.h"
28 #include "ggc.h"
30 #ifdef HAVE_MMAP_ANYWHERE
31 #include <sys/mman.h>
32 #endif
34 #ifndef MAP_FAILED
35 #define MAP_FAILED -1
36 #endif
38 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
39 #define MAP_ANONYMOUS MAP_ANON
40 #endif
42 /* Stategy:
44 This garbage-collecting allocator allocates objects on one of a set
45 of pages. Each page can allocate objects of a single size only;
46 available sizes are powers of two starting at four bytes. The size
47 of an allocation request is rounded up to the next power of two
48 (`order'), and satisfied from the appropriate page.
50 Each page is recorded in a page-entry, which also maintains an
51 in-use bitmap of object positions on the page. This allows the
52 allocation state of a particular object to be flipped without
53 touching the page itself.
55 Each page-entry also has a context depth, which is used to track
56 pushing and popping of allocation contexts. Only objects allocated
57 in the current (highest-numbered) context may be collected.
59 Page entries are arranged in an array of singly-linked lists. The
60 array is indexed by the allocation size, in bits, of the pages on
61 it; i.e. all pages on a list allocate objects of the same size.
62 Pages are ordered on the list such that all non-full pages precede
63 all full pages, with non-full pages arranged in order of decreasing
64 context depth.
66 Empty pages (of all orders) are kept on a single page cache list,
67 and are considered first when new pages are required; they are
68 deallocated at the start of the next collection if they haven't
69 been recycled by then. */
72 /* Define GGC_POISON to poison memory marked unused by the collector. */
73 #undef GGC_POISON
75 /* Define GGC_ALWAYS_COLLECT to perform collection every time
76 ggc_collect is invoked. Otherwise, collection is performed only
77 when a significant amount of memory has been allocated since the
78 last collection. */
79 #undef GGC_ALWAYS_COLLECT
81 #ifdef ENABLE_GC_CHECKING
82 #define GGC_POISON
83 #endif
84 #ifdef ENABLE_GC_ALWAYS_COLLECT
85 #define GGC_ALWAYS_COLLECT
86 #endif
88 /* Define GGC_DEBUG_LEVEL to print debugging information.
89 0: No debugging output.
90 1: GC statistics only.
91 2: Page-entry allocations/deallocations as well.
92 3: Object allocations as well.
93 4: Object marks as well. */
94 #define GGC_DEBUG_LEVEL (0)
96 #ifndef HOST_BITS_PER_PTR
97 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
98 #endif
100 /* Timing information for collect execution goes into here. */
101 extern int gc_time;
103 /* The "" allocated string. */
104 char *empty_string;
106 /* A two-level tree is used to look up the page-entry for a given
107 pointer. Two chunks of the pointer's bits are extracted to index
108 the first and second levels of the tree, as follows:
110 HOST_PAGE_SIZE_BITS
111 32 | |
112 msb +----------------+----+------+------+ lsb
113 | | |
114 PAGE_L1_BITS |
116 PAGE_L2_BITS
118 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
119 pages are aligned on system page boundaries. The next most
120 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
121 index values in the lookup table, respectively.
123 For 32-bit architectures and the settings below, there are no
124 leftover bits. For architectures with wider pointers, the lookup
125 tree points to a list of pages, which must be scanned to find the
126 correct one. */
128 #define PAGE_L1_BITS (8)
129 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
130 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
131 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
133 #define LOOKUP_L1(p) \
134 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
136 #define LOOKUP_L2(p) \
137 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
140 /* A page_entry records the status of an allocation page. This
141 structure is dynamically sized to fit the bitmap in_use_p. */
142 typedef struct page_entry
144 /* The next page-entry with objects of the same size, or NULL if
145 this is the last page-entry. */
146 struct page_entry *next;
148 /* The number of bytes allocated. (This will always be a multiple
149 of the host system page size.) */
150 size_t bytes;
152 /* The address at which the memory is allocated. */
153 char *page;
155 /* Saved in-use bit vector for pages that aren't in the topmost
156 context during collection. */
157 unsigned long *save_in_use_p;
159 /* Context depth of this page. */
160 unsigned short context_depth;
162 /* The number of free objects remaining on this page. */
163 unsigned short num_free_objects;
165 /* A likely candidate for the bit position of a free object for the
166 next allocation from this page. */
167 unsigned short next_bit_hint;
169 /* The lg of size of objects allocated from this page. */
170 unsigned char order;
172 /* A bit vector indicating whether or not objects are in use. The
173 Nth bit is one if the Nth object on this page is allocated. This
174 array is dynamically sized. */
175 unsigned long in_use_p[1];
176 } page_entry;
179 #if HOST_BITS_PER_PTR <= 32
181 /* On 32-bit hosts, we use a two level page table, as pictured above. */
182 typedef page_entry **page_table[PAGE_L1_SIZE];
184 #else
186 /* On 64-bit hosts, we use the same two level page tables plus a linked
187 list that disambiguates the top 32-bits. There will almost always be
188 exactly one entry in the list. */
189 typedef struct page_table_chain
191 struct page_table_chain *next;
192 size_t high_bits;
193 page_entry **table[PAGE_L1_SIZE];
194 } *page_table;
196 #endif
198 /* The rest of the global variables. */
199 static struct globals
201 /* The Nth element in this array is a page with objects of size 2^N.
202 If there are any pages with free objects, they will be at the
203 head of the list. NULL if there are no page-entries for this
204 object size. */
205 page_entry *pages[HOST_BITS_PER_PTR];
207 /* The Nth element in this array is the last page with objects of
208 size 2^N. NULL if there are no page-entries for this object
209 size. */
210 page_entry *page_tails[HOST_BITS_PER_PTR];
212 /* Lookup table for associating allocation pages with object addresses. */
213 page_table lookup;
215 /* The system's page size. */
216 size_t pagesize;
217 size_t lg_pagesize;
219 /* Bytes currently allocated. */
220 size_t allocated;
222 /* Bytes currently allocated at the end of the last collection. */
223 size_t allocated_last_gc;
225 /* Total amount of memory mapped. */
226 size_t bytes_mapped;
228 /* The current depth in the context stack. */
229 unsigned short context_depth;
231 /* A file descriptor open to /dev/zero for reading. */
232 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
233 int dev_zero_fd;
234 #endif
236 /* A cache of free system pages. */
237 page_entry *free_pages;
239 /* The file descriptor for debugging output. */
240 FILE *debug_file;
241 } G;
244 /* Compute DIVIDEND / DIVISOR, rounded up. */
245 #define DIV_ROUND_UP(Dividend, Divisor) \
246 (((Dividend) + (Divisor) - 1) / (Divisor))
248 /* The number of objects per allocation page, for objects of size
249 2^ORDER. */
250 #define OBJECTS_PER_PAGE(Order) \
251 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
253 /* The size in bytes required to maintain a bitmap for the objects
254 on a page-entry. */
255 #define BITMAP_SIZE(Num_objects) \
256 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
258 /* Skip garbage collection if the current allocation is not at least
259 this factor times the allocation at the end of the last collection.
260 In other words, total allocation must expand by (this factor minus
261 one) before collection is performed. */
262 #define GGC_MIN_EXPAND_FOR_GC (1.3)
264 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
265 test from triggering too often when the heap is small. */
266 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
269 static int ggc_allocated_p PARAMS ((const void *));
270 static page_entry *lookup_page_table_entry PARAMS ((const void *));
271 static void set_page_table_entry PARAMS ((void *, page_entry *));
272 static char *alloc_anon PARAMS ((char *, size_t));
273 static struct page_entry * alloc_page PARAMS ((unsigned));
274 static void free_page PARAMS ((struct page_entry *));
275 static void release_pages PARAMS ((void));
276 static void clear_marks PARAMS ((void));
277 static void sweep_pages PARAMS ((void));
278 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
280 #ifdef GGC_POISON
281 static void poison_pages PARAMS ((void));
282 #endif
284 void debug_print_page_list PARAMS ((int));
286 /* Returns non-zero if P was allocated in GC'able memory. */
288 static inline int
289 ggc_allocated_p (p)
290 const void *p;
292 page_entry ***base;
293 size_t L1, L2;
295 #if HOST_BITS_PER_PTR <= 32
296 base = &G.lookup[0];
297 #else
298 page_table table = G.lookup;
299 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
300 while (1)
302 if (table == NULL)
303 return 0;
304 if (table->high_bits == high_bits)
305 break;
306 table = table->next;
308 base = &table->table[0];
309 #endif
311 /* Extract the level 1 and 2 indicies. */
312 L1 = LOOKUP_L1 (p);
313 L2 = LOOKUP_L2 (p);
315 return base[L1] && base[L1][L2];
318 /* Traverse the page table and find the entry for a page.
319 Die (probably) if the object wasn't allocated via GC. */
321 static inline page_entry *
322 lookup_page_table_entry(p)
323 const void *p;
325 page_entry ***base;
326 size_t L1, L2;
328 #if HOST_BITS_PER_PTR <= 32
329 base = &G.lookup[0];
330 #else
331 page_table table = G.lookup;
332 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
333 while (table->high_bits != high_bits)
334 table = table->next;
335 base = &table->table[0];
336 #endif
338 /* Extract the level 1 and 2 indicies. */
339 L1 = LOOKUP_L1 (p);
340 L2 = LOOKUP_L2 (p);
342 return base[L1][L2];
345 /* Set the page table entry for a page. */
347 static void
348 set_page_table_entry(p, entry)
349 void *p;
350 page_entry *entry;
352 page_entry ***base;
353 size_t L1, L2;
355 #if HOST_BITS_PER_PTR <= 32
356 base = &G.lookup[0];
357 #else
358 page_table table;
359 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
360 for (table = G.lookup; table; table = table->next)
361 if (table->high_bits == high_bits)
362 goto found;
364 /* Not found -- allocate a new table. */
365 table = (page_table) xcalloc (1, sizeof(*table));
366 table->next = G.lookup;
367 table->high_bits = high_bits;
368 G.lookup = table;
369 found:
370 base = &table->table[0];
371 #endif
373 /* Extract the level 1 and 2 indicies. */
374 L1 = LOOKUP_L1 (p);
375 L2 = LOOKUP_L2 (p);
377 if (base[L1] == NULL)
378 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
380 base[L1][L2] = entry;
383 /* Prints the page-entry for object size ORDER, for debugging. */
385 void
386 debug_print_page_list (order)
387 int order;
389 page_entry *p;
390 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
391 p = G.pages[order];
392 while (p != NULL)
394 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
395 p = p->next;
397 printf ("NULL\n");
398 fflush (stdout);
401 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
402 (if non-null). */
404 static inline char *
405 alloc_anon (pref, size)
406 char *pref ATTRIBUTE_UNUSED;
407 size_t size;
409 char *page;
411 #ifdef HAVE_MMAP_ANYWHERE
412 #ifdef MAP_ANONYMOUS
413 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
414 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
415 #else
416 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
417 MAP_PRIVATE, G.dev_zero_fd, 0);
418 #endif
419 if (page == (char *) MAP_FAILED)
421 fputs ("Virtual memory exhausted!\n", stderr);
422 exit(1);
424 #else
425 #ifdef HAVE_VALLOC
426 page = (char *) valloc (size);
427 if (!page)
429 fputs ("Virtual memory exhausted!\n", stderr);
430 exit(1);
432 #endif /* HAVE_VALLOC */
433 #endif /* HAVE_MMAP_ANYWHERE */
435 /* Remember that we allocated this memory. */
436 G.bytes_mapped += size;
438 return page;
441 /* Allocate a new page for allocating objects of size 2^ORDER,
442 and return an entry for it. The entry is not added to the
443 appropriate page_table list. */
445 static inline struct page_entry *
446 alloc_page (order)
447 unsigned order;
449 struct page_entry *entry, *p, **pp;
450 char *page;
451 size_t num_objects;
452 size_t bitmap_size;
453 size_t page_entry_size;
454 size_t entry_size;
456 num_objects = OBJECTS_PER_PAGE (order);
457 bitmap_size = BITMAP_SIZE (num_objects + 1);
458 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
459 entry_size = num_objects * (1 << order);
461 entry = NULL;
462 page = NULL;
464 /* Check the list of free pages for one we can use. */
465 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
466 if (p->bytes == entry_size)
467 break;
469 if (p != NULL)
471 /* Recycle the allocated memory from this page ... */
472 *pp = p->next;
473 page = p->page;
474 /* ... and, if possible, the page entry itself. */
475 if (p->order == order)
477 entry = p;
478 memset (entry, 0, page_entry_size);
480 else
481 free (p);
483 else
485 /* Actually allocate the memory. */
486 page = alloc_anon (NULL, entry_size);
489 if (entry == NULL)
490 entry = (struct page_entry *) xcalloc (1, page_entry_size);
492 entry->bytes = entry_size;
493 entry->page = page;
494 entry->context_depth = G.context_depth;
495 entry->order = order;
496 entry->num_free_objects = num_objects;
497 entry->next_bit_hint = 1;
499 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
500 increment the hint. */
501 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
502 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
504 set_page_table_entry (page, entry);
506 if (GGC_DEBUG_LEVEL >= 2)
507 fprintf (G.debug_file,
508 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
509 1 << order, page, page + entry_size - 1);
511 return entry;
514 /* For a page that is no longer needed, put it on the free page list. */
516 static inline void
517 free_page (entry)
518 page_entry *entry;
520 if (GGC_DEBUG_LEVEL >= 2)
521 fprintf (G.debug_file,
522 "Deallocating page at %p, data %p-%p\n", entry,
523 entry->page, entry->page + entry->bytes - 1);
525 set_page_table_entry (entry->page, NULL);
527 entry->next = G.free_pages;
528 G.free_pages = entry;
531 /* Release the free page cache to the system. */
533 static void
534 release_pages ()
536 #ifdef HAVE_MMAP_ANYWHERE
537 page_entry *p, *next;
538 char *start;
539 size_t len;
541 p = G.free_pages;
542 if (p == NULL)
543 return;
545 next = p->next;
546 start = p->page;
547 len = p->bytes;
548 free (p);
549 p = next;
551 while (p)
553 next = p->next;
554 /* Gather up adjacent pages so they are unmapped together. */
555 if (p->page == start + len)
556 len += p->bytes;
557 else
559 munmap (start, len);
560 G.bytes_mapped -= len;
561 start = p->page;
562 len = p->bytes;
564 free (p);
565 p = next;
568 munmap (start, len);
569 G.bytes_mapped -= len;
570 #else
571 #ifdef HAVE_VALLOC
572 page_entry *p, *next;
574 for (p = G.free_pages; p ; p = next)
576 next = p->next;
577 free (p->page);
578 G.bytes_mapped -= p->bytes;
579 free (p);
581 #endif /* HAVE_VALLOC */
582 #endif /* HAVE_MMAP_ANYWHERE */
584 G.free_pages = NULL;
587 /* This table provides a fast way to determine ceil(log_2(size)) for
588 allocation requests. The minimum allocation size is four bytes. */
590 static unsigned char const size_lookup[257] =
592 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
593 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
594 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
595 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
596 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
597 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
598 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
599 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
600 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
601 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
602 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
603 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
604 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
605 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
606 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
607 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
611 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
612 memory is zeroed; otherwise, its contents are undefined. */
614 void *
615 ggc_alloc_obj (size, zero)
616 size_t size;
617 int zero;
619 unsigned order, word, bit, object_offset;
620 struct page_entry *entry;
621 void *result;
623 if (size <= 256)
624 order = size_lookup[size];
625 else
627 order = 9;
628 while (size > ((size_t) 1 << order))
629 order++;
632 /* If there are non-full pages for this size allocation, they are at
633 the head of the list. */
634 entry = G.pages[order];
636 /* If there is no page for this object size, or all pages in this
637 context are full, allocate a new page. */
638 if (entry == NULL || entry->num_free_objects == 0)
640 struct page_entry *new_entry;
641 new_entry = alloc_page (order);
643 /* If this is the only entry, it's also the tail. */
644 if (entry == NULL)
645 G.page_tails[order] = new_entry;
647 /* Put new pages at the head of the page list. */
648 new_entry->next = entry;
649 entry = new_entry;
650 G.pages[order] = new_entry;
652 /* For a new page, we know the word and bit positions (in the
653 in_use bitmap) of the first available object -- they're zero. */
654 new_entry->next_bit_hint = 1;
655 word = 0;
656 bit = 0;
657 object_offset = 0;
659 else
661 /* First try to use the hint left from the previous allocation
662 to locate a clear bit in the in-use bitmap. We've made sure
663 that the one-past-the-end bit is always set, so if the hint
664 has run over, this test will fail. */
665 unsigned hint = entry->next_bit_hint;
666 word = hint / HOST_BITS_PER_LONG;
667 bit = hint % HOST_BITS_PER_LONG;
669 /* If the hint didn't work, scan the bitmap from the beginning. */
670 if ((entry->in_use_p[word] >> bit) & 1)
672 word = bit = 0;
673 while (~entry->in_use_p[word] == 0)
674 ++word;
675 while ((entry->in_use_p[word] >> bit) & 1)
676 ++bit;
677 hint = word * HOST_BITS_PER_LONG + bit;
680 /* Next time, try the next bit. */
681 entry->next_bit_hint = hint + 1;
683 object_offset = hint << order;
686 /* Set the in-use bit. */
687 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
689 /* Keep a running total of the number of free objects. If this page
690 fills up, we may have to move it to the end of the list if the
691 next page isn't full. If the next page is full, all subsequent
692 pages are full, so there's no need to move it. */
693 if (--entry->num_free_objects == 0
694 && entry->next != NULL
695 && entry->next->num_free_objects > 0)
697 G.pages[order] = entry->next;
698 entry->next = NULL;
699 G.page_tails[order]->next = entry;
700 G.page_tails[order] = entry;
703 /* Calculate the object's address. */
704 result = entry->page + object_offset;
706 #ifdef GGC_POISON
707 /* `Poison' the entire allocated object before zeroing the requested area,
708 so that bytes beyond the end, if any, will not necessarily be zero. */
709 memset (result, 0xaf, 1 << order);
710 #endif
712 if (zero)
713 memset (result, 0, size);
715 /* Keep track of how many bytes are being allocated. This
716 information is used in deciding when to collect. */
717 G.allocated += (size_t) 1 << order;
719 if (GGC_DEBUG_LEVEL >= 3)
720 fprintf (G.debug_file,
721 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
722 (int) size, 1 << order, result, entry);
724 return result;
727 /* If P is not marked, marks it and return false. Otherwise return true.
728 P must have been allocated by the GC allocator; it mustn't point to
729 static objects, stack variables, or memory allocated with malloc. */
732 ggc_set_mark (p)
733 const void *p;
735 page_entry *entry;
736 unsigned bit, word;
737 unsigned long mask;
739 /* Look up the page on which the object is alloced. If the object
740 wasn't allocated by the collector, we'll probably die. */
741 entry = lookup_page_table_entry (p);
742 #ifdef ENABLE_CHECKING
743 if (entry == NULL)
744 abort ();
745 #endif
747 /* Calculate the index of the object on the page; this is its bit
748 position in the in_use_p bitmap. */
749 bit = (((const char *) p) - entry->page) >> entry->order;
750 word = bit / HOST_BITS_PER_LONG;
751 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
753 /* If the bit was previously set, skip it. */
754 if (entry->in_use_p[word] & mask)
755 return 1;
757 /* Otherwise set it, and decrement the free object count. */
758 entry->in_use_p[word] |= mask;
759 entry->num_free_objects -= 1;
761 G.allocated += (size_t) 1 << entry->order;
763 if (GGC_DEBUG_LEVEL >= 4)
764 fprintf (G.debug_file, "Marking %p\n", p);
766 return 0;
769 /* Mark P, but check first that it was allocated by the collector. */
771 void
772 ggc_mark_if_gcable (p)
773 const void *p;
775 if (p && ggc_allocated_p (p))
776 ggc_set_mark (p);
779 /* Return the size of the gc-able object P. */
781 size_t
782 ggc_get_size (p)
783 const void *p;
785 page_entry *pe = lookup_page_table_entry (p);
786 return 1 << pe->order;
789 /* Initialize the ggc-mmap allocator. */
791 void
792 init_ggc ()
794 G.pagesize = getpagesize();
795 G.lg_pagesize = exact_log2 (G.pagesize);
797 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
798 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
799 if (G.dev_zero_fd == -1)
800 abort ();
801 #endif
803 #if 0
804 G.debug_file = fopen ("ggc-mmap.debug", "w");
805 #else
806 G.debug_file = stdout;
807 #endif
809 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
811 #ifdef HAVE_MMAP_ANYWHERE
812 /* StunOS has an amazing off-by-one error for the first mmap allocation
813 after fiddling with RLIMIT_STACK. The result, as hard as it is to
814 believe, is an unaligned page allocation, which would cause us to
815 hork badly if we tried to use it. */
817 char *p = alloc_anon (NULL, G.pagesize);
818 if ((size_t)p & (G.pagesize - 1))
820 /* How losing. Discard this one and try another. If we still
821 can't get something useful, give up. */
823 p = alloc_anon (NULL, G.pagesize);
824 if ((size_t)p & (G.pagesize - 1))
825 abort ();
827 munmap (p, G.pagesize);
829 #endif
831 empty_string = ggc_alloc_string ("", 0);
832 ggc_add_string_root (&empty_string, 1);
835 /* Increment the `GC context'. Objects allocated in an outer context
836 are never freed, eliminating the need to register their roots. */
838 void
839 ggc_push_context ()
841 ++G.context_depth;
843 /* Die on wrap. */
844 if (G.context_depth == 0)
845 abort ();
848 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
849 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
851 static void
852 ggc_recalculate_in_use_p (p)
853 page_entry *p;
855 unsigned int i;
856 size_t num_objects;
858 /* Because the past-the-end bit in in_use_p is always set, we
859 pretend there is one additional object. */
860 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
862 /* Reset the free object count. */
863 p->num_free_objects = num_objects;
865 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
866 for (i = 0;
867 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
868 sizeof (*p->in_use_p));
869 ++i)
871 unsigned long j;
873 /* Something is in use if it is marked, or if it was in use in a
874 context further down the context stack. */
875 p->in_use_p[i] |= p->save_in_use_p[i];
877 /* Decrement the free object count for every object allocated. */
878 for (j = p->in_use_p[i]; j; j >>= 1)
879 p->num_free_objects -= (j & 1);
882 if (p->num_free_objects >= num_objects)
883 abort ();
886 /* Decrement the `GC context'. All objects allocated since the
887 previous ggc_push_context are migrated to the outer context. */
889 void
890 ggc_pop_context ()
892 unsigned order, depth;
894 depth = --G.context_depth;
896 /* Any remaining pages in the popped context are lowered to the new
897 current context; i.e. objects allocated in the popped context and
898 left over are imported into the previous context. */
899 for (order = 2; order < HOST_BITS_PER_PTR; order++)
901 page_entry *p;
903 for (p = G.pages[order]; p != NULL; p = p->next)
905 if (p->context_depth > depth)
906 p->context_depth = depth;
908 /* If this page is now in the topmost context, and we'd
909 saved its allocation state, restore it. */
910 else if (p->context_depth == depth && p->save_in_use_p)
912 ggc_recalculate_in_use_p (p);
913 free (p->save_in_use_p);
914 p->save_in_use_p = 0;
920 /* Unmark all objects. */
922 static inline void
923 clear_marks ()
925 unsigned order;
927 for (order = 2; order < HOST_BITS_PER_PTR; order++)
929 size_t num_objects = OBJECTS_PER_PAGE (order);
930 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
931 page_entry *p;
933 for (p = G.pages[order]; p != NULL; p = p->next)
935 #ifdef ENABLE_CHECKING
936 /* The data should be page-aligned. */
937 if ((size_t) p->page & (G.pagesize - 1))
938 abort ();
939 #endif
941 /* Pages that aren't in the topmost context are not collected;
942 nevertheless, we need their in-use bit vectors to store GC
943 marks. So, back them up first. */
944 if (p->context_depth < G.context_depth)
946 if (! p->save_in_use_p)
947 p->save_in_use_p = xmalloc (bitmap_size);
948 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
951 /* Reset reset the number of free objects and clear the
952 in-use bits. These will be adjusted by mark_obj. */
953 p->num_free_objects = num_objects;
954 memset (p->in_use_p, 0, bitmap_size);
956 /* Make sure the one-past-the-end bit is always set. */
957 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
958 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
963 /* Free all empty pages. Partially empty pages need no attention
964 because the `mark' bit doubles as an `unused' bit. */
966 static inline void
967 sweep_pages ()
969 unsigned order;
971 for (order = 2; order < HOST_BITS_PER_PTR; order++)
973 /* The last page-entry to consider, regardless of entries
974 placed at the end of the list. */
975 page_entry * const last = G.page_tails[order];
977 size_t num_objects = OBJECTS_PER_PAGE (order);
978 page_entry *p, *previous;
979 int done;
981 p = G.pages[order];
982 if (p == NULL)
983 continue;
985 previous = NULL;
988 page_entry *next = p->next;
990 /* Loop until all entries have been examined. */
991 done = (p == last);
993 /* Only objects on pages in the topmost context should get
994 collected. */
995 if (p->context_depth < G.context_depth)
998 /* Remove the page if it's empty. */
999 else if (p->num_free_objects == num_objects)
1001 if (! previous)
1002 G.pages[order] = next;
1003 else
1004 previous->next = next;
1006 /* Are we removing the last element? */
1007 if (p == G.page_tails[order])
1008 G.page_tails[order] = previous;
1009 free_page (p);
1010 p = previous;
1013 /* If the page is full, move it to the end. */
1014 else if (p->num_free_objects == 0)
1016 /* Don't move it if it's already at the end. */
1017 if (p != G.page_tails[order])
1019 /* Move p to the end of the list. */
1020 p->next = NULL;
1021 G.page_tails[order]->next = p;
1023 /* Update the tail pointer... */
1024 G.page_tails[order] = p;
1026 /* ... and the head pointer, if necessary. */
1027 if (! previous)
1028 G.pages[order] = next;
1029 else
1030 previous->next = next;
1031 p = previous;
1035 /* If we've fallen through to here, it's a page in the
1036 topmost context that is neither full nor empty. Such a
1037 page must precede pages at lesser context depth in the
1038 list, so move it to the head. */
1039 else if (p != G.pages[order])
1041 previous->next = p->next;
1042 p->next = G.pages[order];
1043 G.pages[order] = p;
1044 /* Are we moving the last element? */
1045 if (G.page_tails[order] == p)
1046 G.page_tails[order] = previous;
1047 p = previous;
1050 previous = p;
1051 p = next;
1053 while (! done);
1055 /* Now, restore the in_use_p vectors for any pages from contexts
1056 other than the current one. */
1057 for (p = G.pages[order]; p; p = p->next)
1058 if (p->context_depth != G.context_depth)
1059 ggc_recalculate_in_use_p (p);
1063 #ifdef GGC_POISON
1064 /* Clobber all free objects. */
1066 static inline void
1067 poison_pages ()
1069 unsigned order;
1071 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1073 size_t num_objects = OBJECTS_PER_PAGE (order);
1074 size_t size = (size_t) 1 << order;
1075 page_entry *p;
1077 for (p = G.pages[order]; p != NULL; p = p->next)
1079 size_t i;
1081 if (p->context_depth != G.context_depth)
1082 /* Since we don't do any collection for pages in pushed
1083 contexts, there's no need to do any poisoning. And
1084 besides, the IN_USE_P array isn't valid until we pop
1085 contexts. */
1086 continue;
1088 for (i = 0; i < num_objects; i++)
1090 size_t word, bit;
1091 word = i / HOST_BITS_PER_LONG;
1092 bit = i % HOST_BITS_PER_LONG;
1093 if (((p->in_use_p[word] >> bit) & 1) == 0)
1094 memset (p->page + i * size, 0xa5, size);
1099 #endif
1101 /* Top level mark-and-sweep routine. */
1103 void
1104 ggc_collect ()
1106 int time;
1108 /* Avoid frequent unnecessary work by skipping collection if the
1109 total allocations haven't expanded much since the last
1110 collection. */
1111 #ifndef GGC_ALWAYS_COLLECT
1112 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1113 return;
1114 #endif
1116 time = get_run_time ();
1117 if (!quiet_flag)
1118 fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
1120 /* Zero the total allocated bytes. We'll reaccumulate this while
1121 marking. */
1122 G.allocated = 0;
1124 /* Release the pages we freed the last time we collected, but didn't
1125 reuse in the interim. */
1126 release_pages ();
1128 clear_marks ();
1129 ggc_mark_roots ();
1131 #ifdef GGC_POISON
1132 poison_pages ();
1133 #endif
1135 sweep_pages ();
1137 G.allocated_last_gc = G.allocated;
1138 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1139 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1141 time = get_run_time () - time;
1142 gc_time += time;
1144 if (!quiet_flag)
1146 fprintf (stderr, "%luk in %.3f}",
1147 (unsigned long) G.allocated / 1024, time * 1e-6);
1151 /* Print allocation statistics. */
1153 void
1154 ggc_page_print_statistics ()
1156 struct ggc_statistics stats;
1157 unsigned int i;
1159 /* Clear the statistics. */
1160 memset (&stats, 0, sizeof (stats));
1162 /* Make sure collection will really occur. */
1163 G.allocated_last_gc = 0;
1165 /* Collect and print the statistics common across collectors. */
1166 ggc_print_statistics (stderr, &stats);
1168 /* Release free pages so that we will not count the bytes allocated
1169 there as part of the total allocated memory. */
1170 release_pages ();
1172 /* Collect some information about the various sizes of
1173 allocation. */
1174 fprintf (stderr, "\n%-4s%-16s%-16s\n", "Log", "Allocated", "Used");
1175 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1177 page_entry *p;
1178 size_t allocated;
1179 size_t in_use;
1181 /* Skip empty entries. */
1182 if (!G.pages[i])
1183 continue;
1185 allocated = in_use = 0;
1187 /* Figure out the total number of bytes allocated for objects of
1188 this size, and how many of them are actually in use. */
1189 for (p = G.pages[i]; p; p = p->next)
1191 allocated += p->bytes;
1192 in_use +=
1193 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1195 fprintf (stderr, "%-3d %-15lu %-15lu\n", i,
1196 (unsigned long) allocated, (unsigned long) in_use);
1199 /* Print out some global information. */
1200 fprintf (stderr, "\nTotal bytes marked: %lu\n",
1201 (unsigned long) G.allocated);
1202 fprintf (stderr, "Total bytes mapped: %lu\n",
1203 (unsigned long) G.bytes_mapped);