* tm.texi (REGISTER_MOVE_COST): Add a mode argument.
[official-gcc.git] / gcc / ggc-page.c
blob2b64f45db53e88a6dc12df2427cf312013bdc67a
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
32 #ifdef HAVE_MMAP_ANYWHERE
33 #include <sys/mman.h>
34 #endif
36 #ifndef MAP_FAILED
37 #define MAP_FAILED -1
38 #endif
40 #if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
41 #define MAP_ANONYMOUS MAP_ANON
42 #endif
44 /* Stategy:
46 This garbage-collecting allocator allocates objects on one of a set
47 of pages. Each page can allocate objects of a single size only;
48 available sizes are powers of two starting at four bytes. The size
49 of an allocation request is rounded up to the next power of two
50 (`order'), and satisfied from the appropriate page.
52 Each page is recorded in a page-entry, which also maintains an
53 in-use bitmap of object positions on the page. This allows the
54 allocation state of a particular object to be flipped without
55 touching the page itself.
57 Each page-entry also has a context depth, which is used to track
58 pushing and popping of allocation contexts. Only objects allocated
59 in the current (highest-numbered) context may be collected.
61 Page entries are arranged in an array of singly-linked lists. The
62 array is indexed by the allocation size, in bits, of the pages on
63 it; i.e. all pages on a list allocate objects of the same size.
64 Pages are ordered on the list such that all non-full pages precede
65 all full pages, with non-full pages arranged in order of decreasing
66 context depth.
68 Empty pages (of all orders) are kept on a single page cache list,
69 and are considered first when new pages are required; they are
70 deallocated at the start of the next collection if they haven't
71 been recycled by then. */
74 /* Define GGC_POISON to poison memory marked unused by the collector. */
75 #undef GGC_POISON
77 /* Define GGC_ALWAYS_COLLECT to perform collection every time
78 ggc_collect is invoked. Otherwise, collection is performed only
79 when a significant amount of memory has been allocated since the
80 last collection. */
81 #undef GGC_ALWAYS_COLLECT
83 #ifdef ENABLE_GC_CHECKING
84 #define GGC_POISON
85 #endif
86 #ifdef ENABLE_GC_ALWAYS_COLLECT
87 #define GGC_ALWAYS_COLLECT
88 #endif
90 /* Define GGC_DEBUG_LEVEL to print debugging information.
91 0: No debugging output.
92 1: GC statistics only.
93 2: Page-entry allocations/deallocations as well.
94 3: Object allocations as well.
95 4: Object marks as well. */
96 #define GGC_DEBUG_LEVEL (0)
98 #ifndef HOST_BITS_PER_PTR
99 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
100 #endif
103 /* A two-level tree is used to look up the page-entry for a given
104 pointer. Two chunks of the pointer's bits are extracted to index
105 the first and second levels of the tree, as follows:
107 HOST_PAGE_SIZE_BITS
108 32 | |
109 msb +----------------+----+------+------+ lsb
110 | | |
111 PAGE_L1_BITS |
113 PAGE_L2_BITS
115 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
116 pages are aligned on system page boundaries. The next most
117 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
118 index values in the lookup table, respectively.
120 For 32-bit architectures and the settings below, there are no
121 leftover bits. For architectures with wider pointers, the lookup
122 tree points to a list of pages, which must be scanned to find the
123 correct one. */
125 #define PAGE_L1_BITS (8)
126 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
127 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
128 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
130 #define LOOKUP_L1(p) \
131 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
133 #define LOOKUP_L2(p) \
134 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
137 /* A page_entry records the status of an allocation page. This
138 structure is dynamically sized to fit the bitmap in_use_p. */
139 typedef struct page_entry
141 /* The next page-entry with objects of the same size, or NULL if
142 this is the last page-entry. */
143 struct page_entry *next;
145 /* The number of bytes allocated. (This will always be a multiple
146 of the host system page size.) */
147 size_t bytes;
149 /* The address at which the memory is allocated. */
150 char *page;
152 /* Saved in-use bit vector for pages that aren't in the topmost
153 context during collection. */
154 unsigned long *save_in_use_p;
156 /* Context depth of this page. */
157 unsigned short context_depth;
159 /* The number of free objects remaining on this page. */
160 unsigned short num_free_objects;
162 /* A likely candidate for the bit position of a free object for the
163 next allocation from this page. */
164 unsigned short next_bit_hint;
166 /* The lg of size of objects allocated from this page. */
167 unsigned char order;
169 /* A bit vector indicating whether or not objects are in use. The
170 Nth bit is one if the Nth object on this page is allocated. This
171 array is dynamically sized. */
172 unsigned long in_use_p[1];
173 } page_entry;
176 #if HOST_BITS_PER_PTR <= 32
178 /* On 32-bit hosts, we use a two level page table, as pictured above. */
179 typedef page_entry **page_table[PAGE_L1_SIZE];
181 #else
183 /* On 64-bit hosts, we use the same two level page tables plus a linked
184 list that disambiguates the top 32-bits. There will almost always be
185 exactly one entry in the list. */
186 typedef struct page_table_chain
188 struct page_table_chain *next;
189 size_t high_bits;
190 page_entry **table[PAGE_L1_SIZE];
191 } *page_table;
193 #endif
195 /* The rest of the global variables. */
196 static struct globals
198 /* The Nth element in this array is a page with objects of size 2^N.
199 If there are any pages with free objects, they will be at the
200 head of the list. NULL if there are no page-entries for this
201 object size. */
202 page_entry *pages[HOST_BITS_PER_PTR];
204 /* The Nth element in this array is the last page with objects of
205 size 2^N. NULL if there are no page-entries for this object
206 size. */
207 page_entry *page_tails[HOST_BITS_PER_PTR];
209 /* Lookup table for associating allocation pages with object addresses. */
210 page_table lookup;
212 /* The system's page size. */
213 size_t pagesize;
214 size_t lg_pagesize;
216 /* Bytes currently allocated. */
217 size_t allocated;
219 /* Bytes currently allocated at the end of the last collection. */
220 size_t allocated_last_gc;
222 /* Total amount of memory mapped. */
223 size_t bytes_mapped;
225 /* The current depth in the context stack. */
226 unsigned short context_depth;
228 /* A file descriptor open to /dev/zero for reading. */
229 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
230 int dev_zero_fd;
231 #endif
233 /* A cache of free system pages. */
234 page_entry *free_pages;
236 /* The file descriptor for debugging output. */
237 FILE *debug_file;
238 } G;
241 /* Compute DIVIDEND / DIVISOR, rounded up. */
242 #define DIV_ROUND_UP(Dividend, Divisor) \
243 (((Dividend) + (Divisor) - 1) / (Divisor))
245 /* The number of objects per allocation page, for objects of size
246 2^ORDER. */
247 #define OBJECTS_PER_PAGE(Order) \
248 ((Order) >= G.lg_pagesize ? 1 : G.pagesize / ((size_t)1 << (Order)))
250 /* The size in bytes required to maintain a bitmap for the objects
251 on a page-entry. */
252 #define BITMAP_SIZE(Num_objects) \
253 (DIV_ROUND_UP ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
255 /* Skip garbage collection if the current allocation is not at least
256 this factor times the allocation at the end of the last collection.
257 In other words, total allocation must expand by (this factor minus
258 one) before collection is performed. */
259 #define GGC_MIN_EXPAND_FOR_GC (1.3)
261 /* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
262 test from triggering too often when the heap is small. */
263 #define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
265 /* Allocate pages in chunks of this size, to throttle calls to mmap.
266 The first page is used, the rest go onto the free list. */
267 #define GGC_QUIRE_SIZE 16
270 static int ggc_allocated_p PARAMS ((const void *));
271 static page_entry *lookup_page_table_entry PARAMS ((const void *));
272 static void set_page_table_entry PARAMS ((void *, page_entry *));
273 static char *alloc_anon PARAMS ((char *, size_t));
274 static struct page_entry * alloc_page PARAMS ((unsigned));
275 static void free_page PARAMS ((struct page_entry *));
276 static void release_pages PARAMS ((void));
277 static void clear_marks PARAMS ((void));
278 static void sweep_pages PARAMS ((void));
279 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
281 #ifdef GGC_POISON
282 static void poison_pages PARAMS ((void));
283 #endif
285 void debug_print_page_list PARAMS ((int));
287 /* Returns non-zero if P was allocated in GC'able memory. */
289 static inline int
290 ggc_allocated_p (p)
291 const void *p;
293 page_entry ***base;
294 size_t L1, L2;
296 #if HOST_BITS_PER_PTR <= 32
297 base = &G.lookup[0];
298 #else
299 page_table table = G.lookup;
300 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
301 while (1)
303 if (table == NULL)
304 return 0;
305 if (table->high_bits == high_bits)
306 break;
307 table = table->next;
309 base = &table->table[0];
310 #endif
312 /* Extract the level 1 and 2 indicies. */
313 L1 = LOOKUP_L1 (p);
314 L2 = LOOKUP_L2 (p);
316 return base[L1] && base[L1][L2];
319 /* Traverse the page table and find the entry for a page.
320 Die (probably) if the object wasn't allocated via GC. */
322 static inline page_entry *
323 lookup_page_table_entry(p)
324 const void *p;
326 page_entry ***base;
327 size_t L1, L2;
329 #if HOST_BITS_PER_PTR <= 32
330 base = &G.lookup[0];
331 #else
332 page_table table = G.lookup;
333 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
334 while (table->high_bits != high_bits)
335 table = table->next;
336 base = &table->table[0];
337 #endif
339 /* Extract the level 1 and 2 indicies. */
340 L1 = LOOKUP_L1 (p);
341 L2 = LOOKUP_L2 (p);
343 return base[L1][L2];
346 /* Set the page table entry for a page. */
348 static void
349 set_page_table_entry(p, entry)
350 void *p;
351 page_entry *entry;
353 page_entry ***base;
354 size_t L1, L2;
356 #if HOST_BITS_PER_PTR <= 32
357 base = &G.lookup[0];
358 #else
359 page_table table;
360 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
361 for (table = G.lookup; table; table = table->next)
362 if (table->high_bits == high_bits)
363 goto found;
365 /* Not found -- allocate a new table. */
366 table = (page_table) xcalloc (1, sizeof(*table));
367 table->next = G.lookup;
368 table->high_bits = high_bits;
369 G.lookup = table;
370 found:
371 base = &table->table[0];
372 #endif
374 /* Extract the level 1 and 2 indicies. */
375 L1 = LOOKUP_L1 (p);
376 L2 = LOOKUP_L2 (p);
378 if (base[L1] == NULL)
379 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
381 base[L1][L2] = entry;
384 /* Prints the page-entry for object size ORDER, for debugging. */
386 void
387 debug_print_page_list (order)
388 int order;
390 page_entry *p;
391 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
392 (PTR) G.page_tails[order]);
393 p = G.pages[order];
394 while (p != NULL)
396 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
397 p->num_free_objects);
398 p = p->next;
400 printf ("NULL\n");
401 fflush (stdout);
404 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
405 (if non-null). */
407 static inline char *
408 alloc_anon (pref, size)
409 char *pref ATTRIBUTE_UNUSED;
410 size_t size;
412 char *page;
414 #ifdef HAVE_MMAP_ANYWHERE
415 #ifdef MAP_ANONYMOUS
416 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
417 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
418 #else
419 page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
420 MAP_PRIVATE, G.dev_zero_fd, 0);
421 #endif
422 if (page == (char *) MAP_FAILED)
424 fputs ("Virtual memory exhausted!\n", stderr);
425 exit(1);
427 #else
428 #ifdef HAVE_VALLOC
429 page = (char *) valloc (size);
430 if (!page)
432 fputs ("Virtual memory exhausted!\n", stderr);
433 exit(1);
435 #endif /* HAVE_VALLOC */
436 #endif /* HAVE_MMAP_ANYWHERE */
438 /* Remember that we allocated this memory. */
439 G.bytes_mapped += size;
441 return page;
444 /* Allocate a new page for allocating objects of size 2^ORDER,
445 and return an entry for it. The entry is not added to the
446 appropriate page_table list. */
448 static inline struct page_entry *
449 alloc_page (order)
450 unsigned order;
452 struct page_entry *entry, *p, **pp;
453 char *page;
454 size_t num_objects;
455 size_t bitmap_size;
456 size_t page_entry_size;
457 size_t entry_size;
459 num_objects = OBJECTS_PER_PAGE (order);
460 bitmap_size = BITMAP_SIZE (num_objects + 1);
461 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
462 entry_size = num_objects * (1 << order);
464 entry = NULL;
465 page = NULL;
467 /* Check the list of free pages for one we can use. */
468 for (pp = &G.free_pages, p = *pp; p ; pp = &p->next, p = *pp)
469 if (p->bytes == entry_size)
470 break;
472 if (p != NULL)
474 /* Recycle the allocated memory from this page ... */
475 *pp = p->next;
476 page = p->page;
477 /* ... and, if possible, the page entry itself. */
478 if (p->order == order)
480 entry = p;
481 memset (entry, 0, page_entry_size);
483 else
484 free (p);
486 #ifdef HAVE_MMAP_ANYWHERE
487 else if (entry_size == G.pagesize)
489 /* We want just one page. Allocate a bunch of them and put the
490 extras on the freelist. (Can only do this optimization with
491 mmap for backing store.) */
492 struct page_entry *e, *f = G.free_pages;
493 int i;
495 page = alloc_anon (NULL, entry_size * GGC_QUIRE_SIZE);
496 /* This loop counts down so that the chain will be in ascending
497 memory order. */
498 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
500 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
501 e->bytes = entry_size;
502 e->page = page + i*entry_size;
503 e->next = f;
504 f = e;
506 G.free_pages = f;
508 #endif
509 else
510 page = alloc_anon (NULL, entry_size);
512 if (entry == NULL)
513 entry = (struct page_entry *) xcalloc (1, page_entry_size);
515 entry->bytes = entry_size;
516 entry->page = page;
517 entry->context_depth = G.context_depth;
518 entry->order = order;
519 entry->num_free_objects = num_objects;
520 entry->next_bit_hint = 1;
522 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
523 increment the hint. */
524 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
525 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
527 set_page_table_entry (page, entry);
529 if (GGC_DEBUG_LEVEL >= 2)
530 fprintf (G.debug_file,
531 "Allocating page at %p, object size=%d, data %p-%p\n",
532 (PTR) entry, 1 << order, page, page + entry_size - 1);
534 return entry;
537 /* For a page that is no longer needed, put it on the free page list. */
539 static inline void
540 free_page (entry)
541 page_entry *entry;
543 if (GGC_DEBUG_LEVEL >= 2)
544 fprintf (G.debug_file,
545 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
546 entry->page, entry->page + entry->bytes - 1);
548 set_page_table_entry (entry->page, NULL);
550 entry->next = G.free_pages;
551 G.free_pages = entry;
554 /* Release the free page cache to the system. */
556 static void
557 release_pages ()
559 page_entry *p, *next;
561 #ifdef HAVE_MMAP_ANYWHERE
562 char *start;
563 size_t len;
565 /* Gather up adjacent pages so they are unmapped together. */
566 p = G.free_pages;
568 while (p)
570 start = p->page;
571 next = p->next;
572 len = p->bytes;
573 free (p);
574 p = next;
576 while (p && p->page == start + len)
578 next = p->next;
579 len += p->bytes;
580 free (p);
581 p = next;
584 munmap (start, len);
585 G.bytes_mapped -= len;
587 #else
588 #ifdef HAVE_VALLOC
590 for (p = G.free_pages; p; p = next)
592 next = p->next;
593 free (p->page);
594 G.bytes_mapped -= p->bytes;
595 free (p);
597 #endif /* HAVE_VALLOC */
598 #endif /* HAVE_MMAP_ANYWHERE */
600 G.free_pages = NULL;
603 /* This table provides a fast way to determine ceil(log_2(size)) for
604 allocation requests. The minimum allocation size is four bytes. */
606 static unsigned char const size_lookup[257] =
608 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
609 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
610 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
611 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
612 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
613 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
614 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
615 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
616 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
617 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
618 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
619 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
620 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
621 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
622 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
623 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
627 /* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
628 memory is zeroed; otherwise, its contents are undefined. */
630 void *
631 ggc_alloc (size)
632 size_t size;
634 unsigned order, word, bit, object_offset;
635 struct page_entry *entry;
636 void *result;
638 if (size <= 256)
639 order = size_lookup[size];
640 else
642 order = 9;
643 while (size > ((size_t) 1 << order))
644 order++;
647 /* If there are non-full pages for this size allocation, they are at
648 the head of the list. */
649 entry = G.pages[order];
651 /* If there is no page for this object size, or all pages in this
652 context are full, allocate a new page. */
653 if (entry == NULL || entry->num_free_objects == 0)
655 struct page_entry *new_entry;
656 new_entry = alloc_page (order);
658 /* If this is the only entry, it's also the tail. */
659 if (entry == NULL)
660 G.page_tails[order] = new_entry;
662 /* Put new pages at the head of the page list. */
663 new_entry->next = entry;
664 entry = new_entry;
665 G.pages[order] = new_entry;
667 /* For a new page, we know the word and bit positions (in the
668 in_use bitmap) of the first available object -- they're zero. */
669 new_entry->next_bit_hint = 1;
670 word = 0;
671 bit = 0;
672 object_offset = 0;
674 else
676 /* First try to use the hint left from the previous allocation
677 to locate a clear bit in the in-use bitmap. We've made sure
678 that the one-past-the-end bit is always set, so if the hint
679 has run over, this test will fail. */
680 unsigned hint = entry->next_bit_hint;
681 word = hint / HOST_BITS_PER_LONG;
682 bit = hint % HOST_BITS_PER_LONG;
684 /* If the hint didn't work, scan the bitmap from the beginning. */
685 if ((entry->in_use_p[word] >> bit) & 1)
687 word = bit = 0;
688 while (~entry->in_use_p[word] == 0)
689 ++word;
690 while ((entry->in_use_p[word] >> bit) & 1)
691 ++bit;
692 hint = word * HOST_BITS_PER_LONG + bit;
695 /* Next time, try the next bit. */
696 entry->next_bit_hint = hint + 1;
698 object_offset = hint << order;
701 /* Set the in-use bit. */
702 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
704 /* Keep a running total of the number of free objects. If this page
705 fills up, we may have to move it to the end of the list if the
706 next page isn't full. If the next page is full, all subsequent
707 pages are full, so there's no need to move it. */
708 if (--entry->num_free_objects == 0
709 && entry->next != NULL
710 && entry->next->num_free_objects > 0)
712 G.pages[order] = entry->next;
713 entry->next = NULL;
714 G.page_tails[order]->next = entry;
715 G.page_tails[order] = entry;
718 /* Calculate the object's address. */
719 result = entry->page + object_offset;
721 #ifdef GGC_POISON
722 /* `Poison' the entire allocated object, including any padding at
723 the end. */
724 memset (result, 0xaf, 1 << order);
725 #endif
727 /* Keep track of how many bytes are being allocated. This
728 information is used in deciding when to collect. */
729 G.allocated += (size_t) 1 << order;
731 if (GGC_DEBUG_LEVEL >= 3)
732 fprintf (G.debug_file,
733 "Allocating object, requested size=%d, actual=%d at %p on %p\n",
734 (int) size, 1 << order, result, (PTR) entry);
736 return result;
739 /* If P is not marked, marks it and return false. Otherwise return true.
740 P must have been allocated by the GC allocator; it mustn't point to
741 static objects, stack variables, or memory allocated with malloc. */
744 ggc_set_mark (p)
745 const void *p;
747 page_entry *entry;
748 unsigned bit, word;
749 unsigned long mask;
751 /* Look up the page on which the object is alloced. If the object
752 wasn't allocated by the collector, we'll probably die. */
753 entry = lookup_page_table_entry (p);
754 #ifdef ENABLE_CHECKING
755 if (entry == NULL)
756 abort ();
757 #endif
759 /* Calculate the index of the object on the page; this is its bit
760 position in the in_use_p bitmap. */
761 bit = (((const char *) p) - entry->page) >> entry->order;
762 word = bit / HOST_BITS_PER_LONG;
763 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
765 /* If the bit was previously set, skip it. */
766 if (entry->in_use_p[word] & mask)
767 return 1;
769 /* Otherwise set it, and decrement the free object count. */
770 entry->in_use_p[word] |= mask;
771 entry->num_free_objects -= 1;
773 if (GGC_DEBUG_LEVEL >= 4)
774 fprintf (G.debug_file, "Marking %p\n", p);
776 return 0;
779 /* Mark P, but check first that it was allocated by the collector. */
781 void
782 ggc_mark_if_gcable (p)
783 const void *p;
785 if (p && ggc_allocated_p (p))
786 ggc_set_mark (p);
789 /* Return the size of the gc-able object P. */
791 size_t
792 ggc_get_size (p)
793 const void *p;
795 page_entry *pe = lookup_page_table_entry (p);
796 return 1 << pe->order;
799 /* Initialize the ggc-mmap allocator. */
801 void
802 init_ggc ()
804 G.pagesize = getpagesize();
805 G.lg_pagesize = exact_log2 (G.pagesize);
807 #if defined (HAVE_MMAP_ANYWHERE) && !defined(MAP_ANONYMOUS)
808 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
809 if (G.dev_zero_fd == -1)
810 abort ();
811 #endif
813 #if 0
814 G.debug_file = fopen ("ggc-mmap.debug", "w");
815 #else
816 G.debug_file = stdout;
817 #endif
819 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
821 #ifdef HAVE_MMAP_ANYWHERE
822 /* StunOS has an amazing off-by-one error for the first mmap allocation
823 after fiddling with RLIMIT_STACK. The result, as hard as it is to
824 believe, is an unaligned page allocation, which would cause us to
825 hork badly if we tried to use it. */
827 char *p = alloc_anon (NULL, G.pagesize);
828 if ((size_t)p & (G.pagesize - 1))
830 /* How losing. Discard this one and try another. If we still
831 can't get something useful, give up. */
833 p = alloc_anon (NULL, G.pagesize);
834 if ((size_t)p & (G.pagesize - 1))
835 abort ();
837 munmap (p, G.pagesize);
839 #endif
842 /* Increment the `GC context'. Objects allocated in an outer context
843 are never freed, eliminating the need to register their roots. */
845 void
846 ggc_push_context ()
848 ++G.context_depth;
850 /* Die on wrap. */
851 if (G.context_depth == 0)
852 abort ();
855 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
856 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
858 static void
859 ggc_recalculate_in_use_p (p)
860 page_entry *p;
862 unsigned int i;
863 size_t num_objects;
865 /* Because the past-the-end bit in in_use_p is always set, we
866 pretend there is one additional object. */
867 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
869 /* Reset the free object count. */
870 p->num_free_objects = num_objects;
872 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
873 for (i = 0;
874 i < DIV_ROUND_UP (BITMAP_SIZE (num_objects),
875 sizeof (*p->in_use_p));
876 ++i)
878 unsigned long j;
880 /* Something is in use if it is marked, or if it was in use in a
881 context further down the context stack. */
882 p->in_use_p[i] |= p->save_in_use_p[i];
884 /* Decrement the free object count for every object allocated. */
885 for (j = p->in_use_p[i]; j; j >>= 1)
886 p->num_free_objects -= (j & 1);
889 if (p->num_free_objects >= num_objects)
890 abort ();
893 /* Decrement the `GC context'. All objects allocated since the
894 previous ggc_push_context are migrated to the outer context. */
896 void
897 ggc_pop_context ()
899 unsigned order, depth;
901 depth = --G.context_depth;
903 /* Any remaining pages in the popped context are lowered to the new
904 current context; i.e. objects allocated in the popped context and
905 left over are imported into the previous context. */
906 for (order = 2; order < HOST_BITS_PER_PTR; order++)
908 page_entry *p;
910 for (p = G.pages[order]; p != NULL; p = p->next)
912 if (p->context_depth > depth)
913 p->context_depth = depth;
915 /* If this page is now in the topmost context, and we'd
916 saved its allocation state, restore it. */
917 else if (p->context_depth == depth && p->save_in_use_p)
919 ggc_recalculate_in_use_p (p);
920 free (p->save_in_use_p);
921 p->save_in_use_p = 0;
927 /* Unmark all objects. */
929 static inline void
930 clear_marks ()
932 unsigned order;
934 for (order = 2; order < HOST_BITS_PER_PTR; order++)
936 size_t num_objects = OBJECTS_PER_PAGE (order);
937 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
938 page_entry *p;
940 for (p = G.pages[order]; p != NULL; p = p->next)
942 #ifdef ENABLE_CHECKING
943 /* The data should be page-aligned. */
944 if ((size_t) p->page & (G.pagesize - 1))
945 abort ();
946 #endif
948 /* Pages that aren't in the topmost context are not collected;
949 nevertheless, we need their in-use bit vectors to store GC
950 marks. So, back them up first. */
951 if (p->context_depth < G.context_depth)
953 if (! p->save_in_use_p)
954 p->save_in_use_p = xmalloc (bitmap_size);
955 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
958 /* Reset reset the number of free objects and clear the
959 in-use bits. These will be adjusted by mark_obj. */
960 p->num_free_objects = num_objects;
961 memset (p->in_use_p, 0, bitmap_size);
963 /* Make sure the one-past-the-end bit is always set. */
964 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
965 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
970 /* Free all empty pages. Partially empty pages need no attention
971 because the `mark' bit doubles as an `unused' bit. */
973 static inline void
974 sweep_pages ()
976 unsigned order;
978 for (order = 2; order < HOST_BITS_PER_PTR; order++)
980 /* The last page-entry to consider, regardless of entries
981 placed at the end of the list. */
982 page_entry * const last = G.page_tails[order];
984 size_t num_objects = OBJECTS_PER_PAGE (order);
985 size_t live_objects;
986 page_entry *p, *previous;
987 int done;
989 p = G.pages[order];
990 if (p == NULL)
991 continue;
993 previous = NULL;
996 page_entry *next = p->next;
998 /* Loop until all entries have been examined. */
999 done = (p == last);
1001 /* Add all live objects on this page to the count of
1002 allocated memory. */
1003 live_objects = num_objects - p->num_free_objects;
1005 G.allocated += ((size_t) 1 << order) * live_objects;
1007 /* Only objects on pages in the topmost context should get
1008 collected. */
1009 if (p->context_depth < G.context_depth)
1012 /* Remove the page if it's empty. */
1013 else if (live_objects == 0)
1015 if (! previous)
1016 G.pages[order] = next;
1017 else
1018 previous->next = next;
1020 /* Are we removing the last element? */
1021 if (p == G.page_tails[order])
1022 G.page_tails[order] = previous;
1023 free_page (p);
1024 p = previous;
1027 /* If the page is full, move it to the end. */
1028 else if (p->num_free_objects == 0)
1030 /* Don't move it if it's already at the end. */
1031 if (p != G.page_tails[order])
1033 /* Move p to the end of the list. */
1034 p->next = NULL;
1035 G.page_tails[order]->next = p;
1037 /* Update the tail pointer... */
1038 G.page_tails[order] = p;
1040 /* ... and the head pointer, if necessary. */
1041 if (! previous)
1042 G.pages[order] = next;
1043 else
1044 previous->next = next;
1045 p = previous;
1049 /* If we've fallen through to here, it's a page in the
1050 topmost context that is neither full nor empty. Such a
1051 page must precede pages at lesser context depth in the
1052 list, so move it to the head. */
1053 else if (p != G.pages[order])
1055 previous->next = p->next;
1056 p->next = G.pages[order];
1057 G.pages[order] = p;
1058 /* Are we moving the last element? */
1059 if (G.page_tails[order] == p)
1060 G.page_tails[order] = previous;
1061 p = previous;
1064 previous = p;
1065 p = next;
1067 while (! done);
1069 /* Now, restore the in_use_p vectors for any pages from contexts
1070 other than the current one. */
1071 for (p = G.pages[order]; p; p = p->next)
1072 if (p->context_depth != G.context_depth)
1073 ggc_recalculate_in_use_p (p);
1077 #ifdef GGC_POISON
1078 /* Clobber all free objects. */
1080 static inline void
1081 poison_pages ()
1083 unsigned order;
1085 for (order = 2; order < HOST_BITS_PER_PTR; order++)
1087 size_t num_objects = OBJECTS_PER_PAGE (order);
1088 size_t size = (size_t) 1 << order;
1089 page_entry *p;
1091 for (p = G.pages[order]; p != NULL; p = p->next)
1093 size_t i;
1095 if (p->context_depth != G.context_depth)
1096 /* Since we don't do any collection for pages in pushed
1097 contexts, there's no need to do any poisoning. And
1098 besides, the IN_USE_P array isn't valid until we pop
1099 contexts. */
1100 continue;
1102 for (i = 0; i < num_objects; i++)
1104 size_t word, bit;
1105 word = i / HOST_BITS_PER_LONG;
1106 bit = i % HOST_BITS_PER_LONG;
1107 if (((p->in_use_p[word] >> bit) & 1) == 0)
1108 memset (p->page + i * size, 0xa5, size);
1113 #endif
1115 /* Top level mark-and-sweep routine. */
1117 void
1118 ggc_collect ()
1120 /* Avoid frequent unnecessary work by skipping collection if the
1121 total allocations haven't expanded much since the last
1122 collection. */
1123 #ifndef GGC_ALWAYS_COLLECT
1124 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1125 return;
1126 #endif
1128 timevar_push (TV_GC);
1129 if (!quiet_flag)
1130 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1132 /* Zero the total allocated bytes. This will be recalculated in the
1133 sweep phase. */
1134 G.allocated = 0;
1136 /* Release the pages we freed the last time we collected, but didn't
1137 reuse in the interim. */
1138 release_pages ();
1140 clear_marks ();
1141 ggc_mark_roots ();
1143 #ifdef GGC_POISON
1144 poison_pages ();
1145 #endif
1147 sweep_pages ();
1149 G.allocated_last_gc = G.allocated;
1150 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1151 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1153 timevar_pop (TV_GC);
1155 if (!quiet_flag)
1156 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1159 /* Print allocation statistics. */
1160 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1161 ? (x) \
1162 : ((x) < 1024*1024*10 \
1163 ? (x) / 1024 \
1164 : (x) / (1024*1024))))
1165 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1167 void
1168 ggc_print_statistics ()
1170 struct ggc_statistics stats;
1171 unsigned int i;
1172 size_t total_overhead = 0;
1174 /* Clear the statistics. */
1175 memset (&stats, 0, sizeof (stats));
1177 /* Make sure collection will really occur. */
1178 G.allocated_last_gc = 0;
1180 /* Collect and print the statistics common across collectors. */
1181 ggc_print_common_statistics (stderr, &stats);
1183 /* Release free pages so that we will not count the bytes allocated
1184 there as part of the total allocated memory. */
1185 release_pages ();
1187 /* Collect some information about the various sizes of
1188 allocation. */
1189 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1190 "Log", "Allocated", "Used", "Overhead");
1191 for (i = 0; i < HOST_BITS_PER_PTR; ++i)
1193 page_entry *p;
1194 size_t allocated;
1195 size_t in_use;
1196 size_t overhead;
1198 /* Skip empty entries. */
1199 if (!G.pages[i])
1200 continue;
1202 overhead = allocated = in_use = 0;
1204 /* Figure out the total number of bytes allocated for objects of
1205 this size, and how many of them are actually in use. Also figure
1206 out how much memory the page table is using. */
1207 for (p = G.pages[i]; p; p = p->next)
1209 allocated += p->bytes;
1210 in_use +=
1211 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * (1 << i);
1213 overhead += (sizeof (page_entry) - sizeof (long)
1214 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1216 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", i,
1217 SCALE (allocated), LABEL (allocated),
1218 SCALE (in_use), LABEL (in_use),
1219 SCALE (overhead), LABEL (overhead));
1220 total_overhead += overhead;
1222 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1223 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1224 SCALE (G.allocated), LABEL(G.allocated),
1225 SCALE (total_overhead), LABEL (total_overhead));