* ggc-common.c (ggc_print_statistics): Make arguments to fprintf
[official-gcc.git] / gcc / ggc-page.c
blob154ec2c713c897c99f82f459b1e46d056638c0ce
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "varray.h"
27 #include "flags.h"
28 #include "ggc.h"
30 #ifdef HAVE_MMAP
31 #include <sys/mman.h>
32 #endif
34 #ifndef MAP_FAILED
35 #define MAP_FAILED -1
36 #endif
38 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
39 #define MAP_ANONYMOUS MAP_ANON
40 #endif
42 /* Stategy:
44 This garbage-collecting allocator allocates objects on one of a set
45 of pages. Each page can allocate objects of a single size only;
46 available sizes are powers of two starting at four bytes. The size
47 of an allocation request is rounded up to the next power of two
48 (`order'), and satisfied from the appropriate page.
50 Each page is recorded in a page-entry, which also maintains an
51 in-use bitmap of object positions on the page. This allows the
52 allocation state of a particular object to be flipped without
53 touching the page itself.
55 Each page-entry also has a context depth, which is used to track
56 pushing and popping of allocation contexts. Only objects allocated
57 in the current (highest-numbered) context may be collected.
59 Page entries are arranged in an array of singly-linked lists. The
60 array is indexed by the allocation size, in bits, of the pages on
61 it; i.e. all pages on a list allocate objects of the same size.
62 Pages are ordered on the list such that all non-full pages precede
63 all full pages, with non-full pages arranged in order of decreasing
64 context depth.
66 Empty pages (of all orders) are kept on a single page cache list,
67 and are considered first when new pages are required; they are
68 deallocated at the start of the next collection if they haven't
69 been recycled by then. */
72 /* Define GGC_POISON to poison memory marked unused by the collector. */
73 #undef GGC_POISON
75 /* Define GGC_ALWAYS_COLLECT to perform collection every time
76 ggc_collect is invoked. Otherwise, collection is performed only
77 when a significant amount of memory has been allocated since the
78 last collection. */
79 #undef GGC_ALWAYS_COLLECT
81 /* If ENABLE_CHECKING is defined, enable GGC_POISON and
82 GGC_ALWAYS_COLLECT automatically. */
83 #ifdef ENABLE_CHECKING
84 #define GGC_POISON
85 #define GGC_ALWAYS_COLLECT
86 #endif
88 /* Define GGC_DEBUG_LEVEL to print debugging information.
89 0: No debugging output.
90 1: GC statistics only.
91 2: Page-entry allocations/deallocations as well.
92 3: Object allocations as well.
93 4: Object marks as well. */
94 #define GGC_DEBUG_LEVEL (0)
96 #ifndef HOST_BITS_PER_PTR
97 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
98 #endif
100 /* Timing information for collect execution goes into here. */
101 extern int gc_time;
103 /* The "" allocated string. */
104 char *empty_string;
106 /* A two-level tree is used to look up the page-entry for a given
107 pointer. Two chunks of the pointer's bits are extracted to index
108 the first and second levels of the tree, as follows:
110 HOST_PAGE_SIZE_BITS
111 32 | |
112 msb +----------------+----+------+------+ lsb
113 | | |
114 PAGE_L1_BITS |
116 PAGE_L2_BITS
118 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
119 pages are aligned on system page boundaries. The next most
120 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
121 index values in the lookup table, respectively.
123 For 32-bit architectures and the settings below, there are no
124 leftover bits. For architectures with wider pointers, the lookup
125 tree points to a list of pages, which must be scanned to find the
126 correct one. */
128 #define PAGE_L1_BITS (8)
129 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
130 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
131 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
133 #define LOOKUP_L1(p) \
134 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
136 #define LOOKUP_L2(p) \
137 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
140 /* A page_entry records the status of an allocation page. This
141 structure is dynamically sized to fit the bitmap in_use_p. */
142 typedef struct page_entry
144 /* The next page-entry with objects of the same size, or NULL if
145 this is the last page-entry. */
146 struct page_entry *next;
148 /* The number of bytes allocated. (This will always be a multiple
149 of the host system page size.) */
150 size_t bytes;
152 /* The address at which the memory is allocated. */
153 char *page;
155 /* Saved in-use bit vector for pages that aren't in the topmost
156 context during collection. */
157 unsigned long *save_in_use_p;
159 /* Context depth of this page. */
160 unsigned char context_depth;
162 /* The lg of size of objects allocated from this page. */
163 unsigned char order;
165 /* The number of free objects remaining on this page. */
166 unsigned short num_free_objects;
168 /* A likely candidate for the bit position of a free object for the
169 next allocation from this page. */
170 unsigned short next_bit_hint;
172 /* Saved number of free objects for pages that aren't in the topmost
173 context during colleciton. */
174 unsigned short save_num_free_objects;
176 /* A bit vector indicating whether or not objects are in use. The
177 Nth bit is one if the Nth object on this page is allocated. This
178 array is dynamically sized. */
179 unsigned long in_use_p[1];
180 } page_entry;
183 #if HOST_BITS_PER_PTR <= 32
185 /* On 32-bit hosts, we use a two level page table, as pictured above. */
186 typedef page_entry **page_table[PAGE_L1_SIZE];
188 #else
190 /* On 64-bit hosts, we use the same two level page tables plus a linked
191 list that disambiguates the top 32-bits. There will almost always be
192 exactly one entry in the list. */
193 typedef struct page_table_chain
195 struct page_table_chain *next;
196 size_t high_bits;
197 page_entry **table[PAGE_L1_SIZE];
198 } *page_table;
200 #endif
202 /* The rest of the global variables. */
203 static struct globals
205 /* The Nth element in this array is a page with objects of size 2^N.
206 If there are any pages with free objects, they will be at the
207 head of the list. NULL if there are no page-entries for this
208 object size. */
209 page_entry *pages[HOST_BITS_PER_PTR];
211 /* The Nth element in this array is the last page with objects of
212 size 2^N. NULL if there are no page-entries for this object
213 size. */
214 page_entry *page_tails[HOST_BITS_PER_PTR];
216 /* Lookup table for associating allocation pages with object addresses. */
217 page_table lookup;
219 /* The system's page size. */
220 size_t pagesize;
221 size_t lg_pagesize;
223 /* Bytes currently allocated. */
224 size_t allocated;
226 /* Bytes currently allocated at the end of the last collection. */
227 size_t allocated_last_gc;
229 /* Total amount of memory mapped. */
230 size_t bytes_mapped;
232 /* The current depth in the context stack. */
233 unsigned char context_depth;
235 /* A file descriptor open to /dev/zero for reading. */
236 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
237 int dev_zero_fd;
238 #endif
240 /* A cache of free system pages. */
241 page_entry *free_pages;
243 /* The file descriptor for debugging output. */
244 FILE *debug_file;
245 } G;
248 /* Compute DIVIDEND / DIVISOR, rounded up. */
249 #define DIV_ROUND_UP(Dividend, Divisor) \
250 ((Dividend + Divisor - 1) / Divisor)
252 /* The number of objects per allocation page, for objects of size
253 2^ORDER. */
254 #define OBJECTS_PER_PAGE(Order) \
255 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
257 /* The size in bytes required to maintain a bitmap for the objects
258 on a page-entry. */
259 #define BITMAP_SIZE(Num_objects) \
260 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
262 /* Skip garbage collection if the current allocation is not at least
263 this factor times the allocation at the end of the last collection.
264 In other words, total allocation must expand by (this factor minus
265 one) before collection is performed. */
266 #define GGC_MIN_EXPAND_FOR_GC (1.3)
268 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
269 test from triggering too often when the heap is small. */
270 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
273 static int ggc_allocated_p PROTO ((const void *));
274 static page_entry *lookup_page_table_entry PROTO ((const void *));
275 static void set_page_table_entry PROTO ((void *, page_entry *));
276 static char *alloc_anon PROTO ((char *, size_t));
277 static struct page_entry * alloc_page PROTO ((unsigned));
278 static void free_page PROTO ((struct page_entry *));
279 static void release_pages PROTO ((void));
280 static void clear_marks PROTO ((void));
281 static void sweep_pages PROTO ((void));
283 #ifdef GGC_POISON
284 static void poison PROTO ((void *, size_t));
285 static void poison_pages PROTO ((void));
286 #endif
288 void debug_print_page_list PROTO ((int));
290 /* Returns non-zero if P was allocated in GC'able memory. */
292 static inline int
293 ggc_allocated_p (p)
294 const void *p;
296 page_entry ***base;
297 size_t L1, L2;
299 #if HOST_BITS_PER_PTR <= 32
300 base = &G.lookup[0];
301 #else
302 page_table table = G.lookup;
303 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
304 while (1)
306 if (table == NULL)
307 return 0;
308 if (table->high_bits == high_bits)
309 break;
310 table = table->next;
312 base = &table->table[0];
313 #endif
315 /* Extract the level 1 and 2 indicies. */
316 L1 = LOOKUP_L1 (p);
317 L2 = LOOKUP_L2 (p);
319 return base[L1] && base[L1][L2];
322 /* Traverse the page table and find the entry for a page.
323 Die (probably) if the object wasn't allocated via GC. */
325 static inline page_entry *
326 lookup_page_table_entry(p)
327 const void *p;
329 page_entry ***base;
330 size_t L1, L2;
332 #if HOST_BITS_PER_PTR <= 32
333 base = &G.lookup[0];
334 #else
335 page_table table = G.lookup;
336 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
337 while (table->high_bits != high_bits)
338 table = table->next;
339 base = &table->table[0];
340 #endif
342 /* Extract the level 1 and 2 indicies. */
343 L1 = LOOKUP_L1 (p);
344 L2 = LOOKUP_L2 (p);
346 return base[L1][L2];
350 /* Set the page table entry for a page. */
351 static void
352 set_page_table_entry(p, entry)
353 void *p;
354 page_entry *entry;
356 page_entry ***base;
357 size_t L1, L2;
359 #if HOST_BITS_PER_PTR <= 32
360 base = &G.lookup[0];
361 #else
362 page_table table;
363 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
364 for (table = G.lookup; table; table = table->next)
365 if (table->high_bits == high_bits)
366 goto found;
368 /* Not found -- allocate a new table. */
369 table = (page_table) xcalloc (1, sizeof(*table));
370 table->next = G.lookup;
371 table->high_bits = high_bits;
372 G.lookup = table;
373 found:
374 base = &table->table[0];
375 #endif
377 /* Extract the level 1 and 2 indicies. */
378 L1 = LOOKUP_L1 (p);
379 L2 = LOOKUP_L2 (p);
381 if (base[L1] == NULL)
382 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
384 base[L1][L2] = entry;
388 /* Prints the page-entry for object size ORDER, for debugging. */
389 void
390 debug_print_page_list (order)
391 int order;
393 page_entry *p;
394 printf ("Head=%p, Tail=%p:\n", G.pages[order], G.page_tails[order]);
395 p = G.pages[order];
396 while (p != NULL)
398 printf ("%p(%1d|%3d) -> ", p, p->context_depth, p->num_free_objects);
399 p = p->next;
401 printf ("NULL\n");
402 fflush (stdout);
405 #ifdef GGC_POISON
406 /* `Poisons' the region of memory starting at START and extending for
407 LEN bytes. */
408 static inline void
409 poison (start, len)
410 void *start;
411 size_t len;
413 memset (start, 0xa5, len);
415 #endif
417 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
418 (if non-null). */
419 static inline char *
420 alloc_anon (pref, size)
421 char *pref ATTRIBUTE_UNUSED;
422 size_t size;
424 char *page;
426 #ifdef HAVE_MMAP
427 #ifdef MAP_ANONYMOUS
428 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
429 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
430 #else
431 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
432 MAP_PRIVATE, G.dev_zero_fd, 0);
433 #endif
434 if (page == (char *) MAP_FAILED)
436 fputs ("Virtual memory exhausted!\n", stderr);
437 exit(1);
439 #else
440 #ifdef HAVE_VALLOC
441 page = (char *) valloc (size);
442 if (!page)
444 fputs ("Virtual memory exhausted!\n", stderr);
445 exit(1);
447 #endif /* HAVE_VALLOC */
448 #endif /* HAVE_MMAP */
450 /* Remember that we allocated this memory. */
451 G.bytes_mapped += size;
453 return page;
456 /* Allocate a new page for allocating objects of size 2^ORDER,
457 and return an entry for it. The entry is not added to the
458 appropriate page_table list. */
459 static inline struct page_entry *
460 alloc_page (order)
461 unsigned order;
463 struct page_entry *entry, *p, **pp;
464 char *page;
465 size_t num_objects;
466 size_t bitmap_size;
467 size_t page_entry_size;
468 size_t entry_size;
470 num_objects = OBJECTS_PER_PAGE (order);
471 bitmap_size = BITMAP_SIZE (num_objects + 1);
472 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
473 entry_size = num_objects * (1 << order);
475 entry = NULL;
476 page = NULL;
478 /* Check the list of free pages for one we can use. */
479 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
480 if (p->bytes == entry_size)
481 break;
483 if (p != NULL)
485 /* Recycle the allocated memory from this page ... */
486 *pp = p->next;
487 page = p->page;
488 /* ... and, if possible, the page entry itself. */
489 if (p->order == order)
491 entry = p;
492 memset (entry, 0, page_entry_size);
494 else
495 free (p);
497 else
499 /* Actually allocate the memory, using mmap. */
500 page = alloc_anon (NULL, entry_size);
503 if (entry == NULL)
504 entry = (struct page_entry *) xcalloc (1, page_entry_size);
506 entry->bytes = entry_size;
507 entry->page = page;
508 entry->context_depth = G.context_depth;
509 entry->order = order;
510 entry->num_free_objects = num_objects;
511 entry->next_bit_hint = 1;
513 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
514 increment the hint. */
515 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
516 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
518 set_page_table_entry (page, entry);
520 if (GGC_DEBUG_LEVEL >= 2)
521 fprintf (G.debug_file,
522 "Allocating page at %p, object size=%d, data %p-%p\n", entry,
523 1 << order, page, page + entry_size - 1);
525 return entry;
529 /* Free a page when it's no longer needed. */
530 static inline void
531 free_page (entry)
532 page_entry *entry;
534 if (GGC_DEBUG_LEVEL >= 2)
535 fprintf (G.debug_file,
536 "Deallocating page at %p, data %p-%p\n", entry,
537 entry->page, entry->page + entry->bytes - 1);
539 set_page_table_entry (entry->page, NULL);
541 entry->next = G.free_pages;
542 G.free_pages = entry;
546 /* Release the page cache to the system. */
547 static inline void
548 release_pages ()
550 #ifdef HAVE_MMAP
551 page_entry *p, *next;
552 char *start;
553 size_t len;
555 p = G.free_pages;
556 if (p == NULL)
557 return;
559 next = p->next;
560 start = p->page;
561 len = p->bytes;
562 free (p);
563 p = next;
565 while (p)
567 next = p->next;
568 /* Gather up adjacent pages so they are unmapped together. */
569 if (p->page == start + len)
570 len += p->bytes;
571 else
573 munmap (start, len);
574 G.bytes_mapped -= len;
575 start = p->page;
576 len = p->bytes;
578 free (p);
579 p = next;
582 munmap (start, len);
583 G.bytes_mapped -= len;
584 #else
585 #ifdef HAVE_VALLOC
586 page_entry *p, *next;
588 for (p = G.free_pages; p ; p = next)
590 next = p->next;
591 free (p->page);
592 G.bytes_mapped -= p->bytes;
593 free (p);
595 #endif /* HAVE_VALLOC */
596 #endif /* HAVE_MMAP */
598 G.free_pages = NULL;
602 /* This table provides a fast way to determine ceil(log_2(size)) for
603 allocation requests. The minimum allocation size is four bytes. */
604 static unsigned char const size_lookup[257] =
606 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
607 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
608 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
609 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
610 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
611 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
612 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
613 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
614 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
615 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
616 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
617 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
618 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
619 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
620 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
621 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
625 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
626 memory is zeroed; otherwise, its contents are undefined. */
627 void *
628 ggc_alloc_obj (size, zero)
629 size_t size;
630 int zero;
632 unsigned order, word, bit, object_offset;
633 struct page_entry *entry;
634 void *result;
636 if (size <= 256)
637 order = size_lookup[size];
638 else
640 order = 9;
641 while (size > ((size_t) 1 << order))
642 order++;
645 /* If there are non-full pages for this size allocation, they are at
646 the head of the list. */
647 entry = G.pages[order];
649 /* If there is no page for this object size, or all pages in this
650 context are full, allocate a new page. */
651 if (entry == NULL
652 || entry->num_free_objects == 0
653 || entry->context_depth != G.context_depth)
655 struct page_entry *new_entry;
656 new_entry = alloc_page (order);
658 /* If this is the only entry, it's also the tail. */
659 if (entry == NULL)
660 G.page_tails[order] = new_entry;
662 /* Put new pages at the head of the page list. */
663 new_entry->next = entry;
664 entry = new_entry;
665 G.pages[order] = new_entry;
667 /* For a new page, we know the word and bit positions (in the
668 in_use bitmap) of the first available object -- they're zero. */
669 new_entry->next_bit_hint = 1;
670 word = 0;
671 bit = 0;
672 object_offset = 0;
674 else
676 /* First try to use the hint left from the previous allocation
677 to locate a clear bit in the in-use bitmap. We've made sure
678 that the one-past-the-end bit is always set, so if the hint
679 has run over, this test will fail. */
680 unsigned hint = entry->next_bit_hint;
681 word = hint / HOST_BITS_PER_LONG;
682 bit = hint % HOST_BITS_PER_LONG;
684 /* If the hint didn't work, scan the bitmap from the beginning. */
685 if ((entry->in_use_p[word] >> bit) & 1)
687 word = bit = 0;
688 while (~entry->in_use_p[word] == 0)
689 ++word;
690 while ((entry->in_use_p[word] >> bit) & 1)
691 ++bit;
692 hint = word * HOST_BITS_PER_LONG + bit;
695 /* Next time, try the next bit. */
696 entry->next_bit_hint = hint + 1;
698 object_offset = hint << order;
701 /* Set the in-use bit. */
702 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
704 /* Keep a running total of the number of free objects. If this page
705 fills up, we may have to move it to the end of the list if the
706 next page isn't full. If the next page is full, all subsequent
707 pages are full, so there's no need to move it. */
708 if (--entry->num_free_objects == 0
709 && entry->next != NULL
710 && entry->next->num_free_objects > 0)
712 G.pages[order] = entry->next;
713 entry->next = NULL;
714 G.page_tails[order]->next = entry;
715 G.page_tails[order] = entry;
718 /* Calculate the object's address. */
719 result = entry->page + object_offset;
721 #ifdef GGC_POISON
722 /* `Poison' the entire allocated object before zeroing the requested area,
723 so that bytes beyond the end, if any, will not necessarily be zero. */
724 poison (result, 1 << order);
725 #endif
726 if (zero)
727 memset (result, 0, size);
729 /* Keep track of how many bytes are being allocated. This
730 information is used in deciding when to collect. */
731 G.allocated += (size_t) 1 << order;
733 if (GGC_DEBUG_LEVEL >= 3)
734 fprintf (G.debug_file,
735 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
736 (int) size, 1 << order, result, entry);
738 return result;
742 /* If P is not marked, marks it and returns 0. Otherwise returns 1.
743 P must have been allocated by the GC allocator; it mustn't point to
744 static objects, stack variables, or memory allocated with malloc. */
746 ggc_set_mark (p)
747 void *p;
749 page_entry *entry;
750 unsigned bit, word;
751 unsigned long mask;
753 /* Look up the page on which the object is alloced. If the object
754 wasn't allocated by the collector, we'll probably die. */
755 entry = lookup_page_table_entry (p);
756 #ifdef ENABLE_CHECKING
757 if (entry == NULL)
758 abort ();
759 #endif
761 /* Calculate the index of the object on the page; this is its bit
762 position in the in_use_p bitmap. */
763 bit = (((char *) p) - entry->page) >> entry->order;
764 word = bit / HOST_BITS_PER_LONG;
765 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
767 /* If the bit was previously set, skip it. */
768 if (entry->in_use_p[word] & mask)
769 return 1;
771 /* Otherwise set it, and decrement the free object count. */
772 entry->in_use_p[word] |= mask;
773 entry->num_free_objects -= 1;
775 G.allocated += (size_t) 1 << entry->order;
777 if (GGC_DEBUG_LEVEL >= 4)
778 fprintf (G.debug_file, "Marking %p\n", p);
780 return 0;
783 void
784 ggc_mark_if_gcable (p)
785 void *p;
787 if (p && ggc_allocated_p (p))
788 ggc_set_mark (p);
791 size_t
792 ggc_get_size (p)
793 void *p;
795 page_entry *pe = lookup_page_table_entry (p);
796 return 1 << pe->order;
799 /* Initialize the ggc-mmap allocator. */
800 void
801 init_ggc ()
803 G.pagesize = getpagesize();
804 G.lg_pagesize = exact_log2 (G.pagesize);
806 #if defined (HAVE_MMAP) && !defined(MAP_ANONYMOUS)
807 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
808 if (G.dev_zero_fd == -1)
809 abort ();
810 #endif
812 #if 0
813 G.debug_file = fopen ("ggc-mmap.debug", "w");
814 #else
815 G.debug_file = stdout;
816 #endif
818 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
820 #ifdef HAVE_MMAP
821 /* StunOS has an amazing off-by-one error for the first mmap allocation
822 after fiddling with RLIMIT_STACK. The result, as hard as it is to
823 believe, is an unaligned page allocation, which would cause us to
824 hork badly if we tried to use it. */
826 char *p = alloc_anon (NULL, G.pagesize);
827 if ((size_t)p & (G.pagesize - 1))
829 /* How losing. Discard this one and try another. If we still
830 can't get something useful, give up. */
832 p = alloc_anon (NULL, G.pagesize);
833 if ((size_t)p & (G.pagesize - 1))
834 abort ();
836 munmap (p, G.pagesize);
838 #endif
840 empty_string = ggc_alloc_string ("", 0);
841 ggc_add_string_root (&empty_string, 1);
845 void
846 ggc_push_context ()
848 ++G.context_depth;
850 /* Die on wrap. */
851 if (G.context_depth == 0)
852 abort ();
856 void
857 ggc_pop_context ()
859 unsigned order, depth;
861 depth = --G.context_depth;
863 /* Any remaining pages in the popped context are lowered to the new
864 current context; i.e. objects allocated in the popped context and
865 left over are imported into the previous context. */
866 for (order = 2; order < HOST_BITS_PER_PTR; order++)
868 size_t num_objects = OBJECTS_PER_PAGE (order);
869 size_t bitmap_size = BITMAP_SIZE (num_objects);
871 page_entry *p;
873 for (p = G.pages[order]; p != NULL; p = p->next)
875 if (p->context_depth > depth)
877 p->context_depth = depth;
880 /* If this page is now in the topmost context, and we'd
881 saved its allocation state, restore it. */
882 else if (p->context_depth == depth && p->save_in_use_p)
884 memcpy (p->in_use_p, p->save_in_use_p, bitmap_size);
885 free (p->save_in_use_p);
886 p->save_in_use_p = 0;
887 p->num_free_objects = p->save_num_free_objects;
893 static inline void
894 clear_marks ()
896 unsigned order;
898 for (order = 2; order < HOST_BITS_PER_PTR; order++)
900 size_t num_objects = OBJECTS_PER_PAGE (order);
901 size_t bitmap_size = BITMAP_SIZE (num_objects);
902 page_entry *p;
904 for (p = G.pages[order]; p != NULL; p = p->next)
906 #ifdef ENABLE_CHECKING
907 /* The data should be page-aligned. */
908 if ((size_t) p->page & (G.pagesize - 1))
909 abort ();
910 #endif
912 /* Pages that aren't in the topmost context are not collected;
913 nevertheless, we need their in-use bit vectors to store GC
914 marks. So, back them up first. */
915 if (p->context_depth < G.context_depth
916 && ! p->save_in_use_p)
918 p->save_in_use_p = xmalloc (bitmap_size);
919 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
920 p->save_num_free_objects = p->num_free_objects;
923 /* Reset reset the number of free objects and clear the
924 in-use bits. These will be adjusted by mark_obj. */
925 p->num_free_objects = num_objects;
926 memset (p->in_use_p, 0, bitmap_size);
928 /* Make sure the one-past-the-end bit is always set. */
929 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
930 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
935 static inline void
936 sweep_pages ()
938 unsigned order;
940 for (order = 2; order < HOST_BITS_PER_PTR; order++)
942 /* The last page-entry to consider, regardless of entries
943 placed at the end of the list. */
944 page_entry * const last = G.page_tails[order];
946 size_t num_objects = OBJECTS_PER_PAGE (order);
947 page_entry *p, *previous;
948 int done;
950 p = G.pages[order];
951 if (p == NULL)
952 continue;
954 previous = NULL;
957 page_entry *next = p->next;
959 /* Loop until all entries have been examined. */
960 done = (p == last);
962 /* Only objects on pages in the topmost context should get
963 collected. */
964 if (p->context_depth < G.context_depth)
967 /* Remove the page if it's empty. */
968 else if (p->num_free_objects == num_objects)
970 if (! previous)
971 G.pages[order] = next;
972 else
973 previous->next = next;
975 /* Are we removing the last element? */
976 if (p == G.page_tails[order])
977 G.page_tails[order] = previous;
978 free_page (p);
979 p = previous;
982 /* If the page is full, move it to the end. */
983 else if (p->num_free_objects == 0)
985 /* Don't move it if it's already at the end. */
986 if (p != G.page_tails[order])
988 /* Move p to the end of the list. */
989 p->next = NULL;
990 G.page_tails[order]->next = p;
992 /* Update the tail pointer... */
993 G.page_tails[order] = p;
995 /* ... and the head pointer, if necessary. */
996 if (! previous)
997 G.pages[order] = next;
998 else
999 previous->next = next;
1000 p = previous;
1004 /* If we've fallen through to here, it's a page in the
1005 topmost context that is neither full nor empty. Such a
1006 page must precede pages at lesser context depth in the
1007 list, so move it to the head. */
1008 else if (p != G.pages[order])
1010 previous->next = p->next;
1011 p->next = G.pages[order];
1012 G.pages[order] = p;
1013 /* Are we moving the last element? */
1014 if (G.page_tails[order] == p)
1015 G.page_tails[order] = previous;
1016 p = previous;
1019 previous = p;
1020 p = next;
1022 while (! done);
1026 #ifdef GGC_POISON
1027 static inline void
1028 poison_pages ()
1030 unsigned order;
1032 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1034 size_t num_objects = OBJECTS_PER_PAGE (order);
1035 size_t size = (size_t) 1 << order;
1036 page_entry *p;
1038 for (p = G.pages[order]; p != NULL; p = p->next)
1040 size_t i;
1042 if (p->context_depth != G.context_depth)
1043 /* Since we don't do any collection for pages in pushed
1044 contexts, there's no need to do any poisoning. And
1045 besides, the IN_USE_P array isn't valid until we pop
1046 contexts. */
1047 continue;
1049 for (i = 0; i < num_objects; i++)
1051 size_t word, bit;
1052 word = i / HOST_BITS_PER_LONG;
1053 bit = i % HOST_BITS_PER_LONG;
1054 if (((p->in_use_p[word] >> bit) & 1) == 0)
1055 poison (p->page + i * size, size);
1060 #endif
1062 void
1063 ggc_collect ()
1065 int time;
1067 /* Avoid frequent unnecessary work by skipping collection if the
1068 total allocations haven't expanded much since the last
1069 collection. */
1070 #ifndef GGC_ALWAYS_COLLECT
1071 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1072 return;
1073 #endif
1075 time = get_run_time ();
1076 if (!quiet_flag)
1077 fprintf (stderr, " {GC %luk -> ", (unsigned long)G.allocated / 1024);
1079 /* Zero the total allocated bytes. We'll reaccumulate this while
1080 marking. */
1081 G.allocated = 0;
1083 /* Release the pages we freed the last time we collected, but didn't
1084 reuse in the interim. */
1085 release_pages ();
1087 clear_marks ();
1088 ggc_mark_roots ();
1089 sweep_pages ();
1091 #ifdef GGC_POISON
1092 poison_pages ();
1093 #endif
1095 G.allocated_last_gc = G.allocated;
1096 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1097 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1099 time = get_run_time () - time;
1100 gc_time += time;
1102 if (!quiet_flag)
1104 fprintf (stderr, "%luk in %.3f}",
1105 (unsigned long) G.allocated / 1024, time * 1e-6);
1109 /* Print allocation statistics. */
1111 void
1112 ggc_page_print_statistics ()
1114 struct ggc_statistics stats;
1115 int i;
1117 /* Clear the statistics. */
1118 bzero (&stats, sizeof (stats));
1120 /* Make sure collection will really occur. */
1121 G.allocated_last_gc = 0;
1123 /* Collect and print the statistics common across collectors. */
1124 ggc_print_statistics (stderr, &stats);
1126 /* Collect some information about the various sizes of
1127 allocation. */
1128 fprintf (stderr, "\n%-4s%-16s%-16s\n", "Log", "Allocated", "Used");
1129 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1131 page_entry *p;
1132 size_t allocated;
1133 size_t in_use;
1135 /* Skip empty entries. */
1136 if (!G.pages[i])
1137 continue;
1139 allocated = in_use = 0;
1141 /* Figure out the total number of bytes allocated for objects of
1142 this size, and how many of them are actually in use. */
1143 for (p = G.pages[i]; p; p = p->next)
1145 allocated += p->bytes;
1146 in_use +=
1147 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1149 fprintf (stderr, "%-3d %-15lu %-15u\n", i,
1150 (unsigned long) allocated, in_use);
1153 /* Print out some global information. */
1154 fprintf (stderr, "\nTotal bytes marked: %lu\n",
1155 (unsigned long) G.allocated);
1156 fprintf (stderr, "Total bytes mapped: %lu\n",
1157 (unsigned long) G.bytes_mapped);