Fix type.
[official-gcc.git] / gcc / ggc-page.c
blobf210af93abef9498d8b6943d750360f061b36768
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "tree.h"
24 #include "rtl.h"
25 #include "tm_p.h"
26 #include "toplev.h"
27 #include "varray.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "timevar.h"
31 #include "params.h"
32 #ifdef ENABLE_VALGRIND_CHECKING
33 #include <valgrind.h>
34 #else
35 /* Avoid #ifdef:s when we can help it. */
36 #define VALGRIND_DISCARD(x)
37 #endif
39 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
40 file open. Prefer either to valloc. */
41 #ifdef HAVE_MMAP_ANON
42 # undef HAVE_MMAP_DEV_ZERO
44 # include <sys/mman.h>
45 # ifndef MAP_FAILED
46 # define MAP_FAILED -1
47 # endif
48 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
49 # define MAP_ANONYMOUS MAP_ANON
50 # endif
51 # define USING_MMAP
53 #endif
55 #ifdef HAVE_MMAP_DEV_ZERO
57 # include <sys/mman.h>
58 # ifndef MAP_FAILED
59 # define MAP_FAILED -1
60 # endif
61 # define USING_MMAP
63 #endif
65 #ifndef USING_MMAP
66 #define USING_MALLOC_PAGE_GROUPS
67 #endif
69 /* Stategy:
71 This garbage-collecting allocator allocates objects on one of a set
72 of pages. Each page can allocate objects of a single size only;
73 available sizes are powers of two starting at four bytes. The size
74 of an allocation request is rounded up to the next power of two
75 (`order'), and satisfied from the appropriate page.
77 Each page is recorded in a page-entry, which also maintains an
78 in-use bitmap of object positions on the page. This allows the
79 allocation state of a particular object to be flipped without
80 touching the page itself.
82 Each page-entry also has a context depth, which is used to track
83 pushing and popping of allocation contexts. Only objects allocated
84 in the current (highest-numbered) context may be collected.
86 Page entries are arranged in an array of singly-linked lists. The
87 array is indexed by the allocation size, in bits, of the pages on
88 it; i.e. all pages on a list allocate objects of the same size.
89 Pages are ordered on the list such that all non-full pages precede
90 all full pages, with non-full pages arranged in order of decreasing
91 context depth.
93 Empty pages (of all orders) are kept on a single page cache list,
94 and are considered first when new pages are required; they are
95 deallocated at the start of the next collection if they haven't
96 been recycled by then. */
98 /* Define GGC_DEBUG_LEVEL to print debugging information.
99 0: No debugging output.
100 1: GC statistics only.
101 2: Page-entry allocations/deallocations as well.
102 3: Object allocations as well.
103 4: Object marks as well. */
104 #define GGC_DEBUG_LEVEL (0)
106 #ifndef HOST_BITS_PER_PTR
107 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
108 #endif
111 /* A two-level tree is used to look up the page-entry for a given
112 pointer. Two chunks of the pointer's bits are extracted to index
113 the first and second levels of the tree, as follows:
115 HOST_PAGE_SIZE_BITS
116 32 | |
117 msb +----------------+----+------+------+ lsb
118 | | |
119 PAGE_L1_BITS |
121 PAGE_L2_BITS
123 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
124 pages are aligned on system page boundaries. The next most
125 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
126 index values in the lookup table, respectively.
128 For 32-bit architectures and the settings below, there are no
129 leftover bits. For architectures with wider pointers, the lookup
130 tree points to a list of pages, which must be scanned to find the
131 correct one. */
133 #define PAGE_L1_BITS (8)
134 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
135 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
136 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
138 #define LOOKUP_L1(p) \
139 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
141 #define LOOKUP_L2(p) \
142 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
144 /* The number of objects per allocation page, for objects on a page of
145 the indicated ORDER. */
146 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
148 /* The size of an object on a page of the indicated ORDER. */
149 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
151 /* For speed, we avoid doing a general integer divide to locate the
152 offset in the allocation bitmap, by precalculating numbers M, S
153 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
154 within the page which is evenly divisible by the object size Z. */
155 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
156 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
157 #define OFFSET_TO_BIT(OFFSET, ORDER) \
158 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
160 /* The number of extra orders, not corresponding to power-of-two sized
161 objects. */
163 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
165 #define RTL_SIZE(NSLOTS) \
166 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
168 /* The Ith entry is the maximum size of an object to be stored in the
169 Ith extra order. Adding a new entry to this array is the *only*
170 thing you need to do to add a new special allocation size. */
172 static const size_t extra_order_size_table[] = {
173 sizeof (struct tree_decl),
174 sizeof (struct tree_list),
175 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
176 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
179 /* The total number of orders. */
181 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
183 /* We use this structure to determine the alignment required for
184 allocations. For power-of-two sized allocations, that's not a
185 problem, but it does matter for odd-sized allocations. */
187 struct max_alignment {
188 char c;
189 union {
190 HOST_WIDEST_INT i;
191 #ifdef HAVE_LONG_DOUBLE
192 long double d;
193 #else
194 double d;
195 #endif
196 } u;
199 /* The biggest alignment required. */
201 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
203 /* The Ith entry is the number of objects on a page or order I. */
205 static unsigned objects_per_page_table[NUM_ORDERS];
207 /* The Ith entry is the size of an object on a page of order I. */
209 static size_t object_size_table[NUM_ORDERS];
211 /* The Ith entry is a pair of numbers (mult, shift) such that
212 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
213 for all k evenly divisible by OBJECT_SIZE(I). */
215 static struct
217 unsigned int mult;
218 unsigned int shift;
220 inverse_table[NUM_ORDERS];
222 /* A page_entry records the status of an allocation page. This
223 structure is dynamically sized to fit the bitmap in_use_p. */
224 typedef struct page_entry
226 /* The next page-entry with objects of the same size, or NULL if
227 this is the last page-entry. */
228 struct page_entry *next;
230 /* The number of bytes allocated. (This will always be a multiple
231 of the host system page size.) */
232 size_t bytes;
234 /* The address at which the memory is allocated. */
235 char *page;
237 #ifdef USING_MALLOC_PAGE_GROUPS
238 /* Back pointer to the page group this page came from. */
239 struct page_group *group;
240 #endif
242 /* Saved in-use bit vector for pages that aren't in the topmost
243 context during collection. */
244 unsigned long *save_in_use_p;
246 /* Context depth of this page. */
247 unsigned short context_depth;
249 /* The number of free objects remaining on this page. */
250 unsigned short num_free_objects;
252 /* A likely candidate for the bit position of a free object for the
253 next allocation from this page. */
254 unsigned short next_bit_hint;
256 /* The lg of size of objects allocated from this page. */
257 unsigned char order;
259 /* A bit vector indicating whether or not objects are in use. The
260 Nth bit is one if the Nth object on this page is allocated. This
261 array is dynamically sized. */
262 unsigned long in_use_p[1];
263 } page_entry;
265 #ifdef USING_MALLOC_PAGE_GROUPS
266 /* A page_group describes a large allocation from malloc, from which
267 we parcel out aligned pages. */
268 typedef struct page_group
270 /* A linked list of all extant page groups. */
271 struct page_group *next;
273 /* The address we received from malloc. */
274 char *allocation;
276 /* The size of the block. */
277 size_t alloc_size;
279 /* A bitmask of pages in use. */
280 unsigned int in_use;
281 } page_group;
282 #endif
284 #if HOST_BITS_PER_PTR <= 32
286 /* On 32-bit hosts, we use a two level page table, as pictured above. */
287 typedef page_entry **page_table[PAGE_L1_SIZE];
289 #else
291 /* On 64-bit hosts, we use the same two level page tables plus a linked
292 list that disambiguates the top 32-bits. There will almost always be
293 exactly one entry in the list. */
294 typedef struct page_table_chain
296 struct page_table_chain *next;
297 size_t high_bits;
298 page_entry **table[PAGE_L1_SIZE];
299 } *page_table;
301 #endif
303 /* The rest of the global variables. */
304 static struct globals
306 /* The Nth element in this array is a page with objects of size 2^N.
307 If there are any pages with free objects, they will be at the
308 head of the list. NULL if there are no page-entries for this
309 object size. */
310 page_entry *pages[NUM_ORDERS];
312 /* The Nth element in this array is the last page with objects of
313 size 2^N. NULL if there are no page-entries for this object
314 size. */
315 page_entry *page_tails[NUM_ORDERS];
317 /* Lookup table for associating allocation pages with object addresses. */
318 page_table lookup;
320 /* The system's page size. */
321 size_t pagesize;
322 size_t lg_pagesize;
324 /* Bytes currently allocated. */
325 size_t allocated;
327 /* Bytes currently allocated at the end of the last collection. */
328 size_t allocated_last_gc;
330 /* Total amount of memory mapped. */
331 size_t bytes_mapped;
333 /* The current depth in the context stack. */
334 unsigned short context_depth;
336 /* A file descriptor open to /dev/zero for reading. */
337 #if defined (HAVE_MMAP_DEV_ZERO)
338 int dev_zero_fd;
339 #endif
341 /* A cache of free system pages. */
342 page_entry *free_pages;
344 #ifdef USING_MALLOC_PAGE_GROUPS
345 page_group *page_groups;
346 #endif
348 /* The file descriptor for debugging output. */
349 FILE *debug_file;
350 } G;
352 /* The size in bytes required to maintain a bitmap for the objects
353 on a page-entry. */
354 #define BITMAP_SIZE(Num_objects) \
355 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
357 /* Allocate pages in chunks of this size, to throttle calls to memory
358 allocation routines. The first page is used, the rest go onto the
359 free list. This cannot be larger than HOST_BITS_PER_INT for the
360 in_use bitmask for page_group. */
361 #define GGC_QUIRE_SIZE 16
363 static int ggc_allocated_p PARAMS ((const void *));
364 static page_entry *lookup_page_table_entry PARAMS ((const void *));
365 static void set_page_table_entry PARAMS ((void *, page_entry *));
366 #ifdef USING_MMAP
367 static char *alloc_anon PARAMS ((char *, size_t));
368 #endif
369 #ifdef USING_MALLOC_PAGE_GROUPS
370 static size_t page_group_index PARAMS ((char *, char *));
371 static void set_page_group_in_use PARAMS ((page_group *, char *));
372 static void clear_page_group_in_use PARAMS ((page_group *, char *));
373 #endif
374 static struct page_entry * alloc_page PARAMS ((unsigned));
375 static void free_page PARAMS ((struct page_entry *));
376 static void release_pages PARAMS ((void));
377 static void clear_marks PARAMS ((void));
378 static void sweep_pages PARAMS ((void));
379 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
380 static void compute_inverse PARAMS ((unsigned));
382 #ifdef ENABLE_GC_CHECKING
383 static void poison_pages PARAMS ((void));
384 #endif
386 void debug_print_page_list PARAMS ((int));
388 /* Returns nonzero if P was allocated in GC'able memory. */
390 static inline int
391 ggc_allocated_p (p)
392 const void *p;
394 page_entry ***base;
395 size_t L1, L2;
397 #if HOST_BITS_PER_PTR <= 32
398 base = &G.lookup[0];
399 #else
400 page_table table = G.lookup;
401 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
402 while (1)
404 if (table == NULL)
405 return 0;
406 if (table->high_bits == high_bits)
407 break;
408 table = table->next;
410 base = &table->table[0];
411 #endif
413 /* Extract the level 1 and 2 indices. */
414 L1 = LOOKUP_L1 (p);
415 L2 = LOOKUP_L2 (p);
417 return base[L1] && base[L1][L2];
420 /* Traverse the page table and find the entry for a page.
421 Die (probably) if the object wasn't allocated via GC. */
423 static inline page_entry *
424 lookup_page_table_entry(p)
425 const void *p;
427 page_entry ***base;
428 size_t L1, L2;
430 #if HOST_BITS_PER_PTR <= 32
431 base = &G.lookup[0];
432 #else
433 page_table table = G.lookup;
434 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
435 while (table->high_bits != high_bits)
436 table = table->next;
437 base = &table->table[0];
438 #endif
440 /* Extract the level 1 and 2 indices. */
441 L1 = LOOKUP_L1 (p);
442 L2 = LOOKUP_L2 (p);
444 return base[L1][L2];
447 /* Set the page table entry for a page. */
449 static void
450 set_page_table_entry(p, entry)
451 void *p;
452 page_entry *entry;
454 page_entry ***base;
455 size_t L1, L2;
457 #if HOST_BITS_PER_PTR <= 32
458 base = &G.lookup[0];
459 #else
460 page_table table;
461 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
462 for (table = G.lookup; table; table = table->next)
463 if (table->high_bits == high_bits)
464 goto found;
466 /* Not found -- allocate a new table. */
467 table = (page_table) xcalloc (1, sizeof(*table));
468 table->next = G.lookup;
469 table->high_bits = high_bits;
470 G.lookup = table;
471 found:
472 base = &table->table[0];
473 #endif
475 /* Extract the level 1 and 2 indices. */
476 L1 = LOOKUP_L1 (p);
477 L2 = LOOKUP_L2 (p);
479 if (base[L1] == NULL)
480 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
482 base[L1][L2] = entry;
485 /* Prints the page-entry for object size ORDER, for debugging. */
487 void
488 debug_print_page_list (order)
489 int order;
491 page_entry *p;
492 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
493 (PTR) G.page_tails[order]);
494 p = G.pages[order];
495 while (p != NULL)
497 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
498 p->num_free_objects);
499 p = p->next;
501 printf ("NULL\n");
502 fflush (stdout);
505 #ifdef USING_MMAP
506 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
507 (if non-null). The ifdef structure here is intended to cause a
508 compile error unless exactly one of the HAVE_* is defined. */
510 static inline char *
511 alloc_anon (pref, size)
512 char *pref ATTRIBUTE_UNUSED;
513 size_t size;
515 #ifdef HAVE_MMAP_ANON
516 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
517 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
518 #endif
519 #ifdef HAVE_MMAP_DEV_ZERO
520 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
521 MAP_PRIVATE, G.dev_zero_fd, 0);
522 #endif
524 if (page == (char *) MAP_FAILED)
526 perror ("virtual memory exhausted");
527 exit (FATAL_EXIT_CODE);
530 /* Remember that we allocated this memory. */
531 G.bytes_mapped += size;
533 /* Pretend we don't have access to the allocated pages. We'll enable
534 access to smaller pieces of the area in ggc_alloc. Discard the
535 handle to avoid handle leak. */
536 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
538 return page;
540 #endif
541 #ifdef USING_MALLOC_PAGE_GROUPS
542 /* Compute the index for this page into the page group. */
544 static inline size_t
545 page_group_index (allocation, page)
546 char *allocation, *page;
548 return (size_t) (page - allocation) >> G.lg_pagesize;
551 /* Set and clear the in_use bit for this page in the page group. */
553 static inline void
554 set_page_group_in_use (group, page)
555 page_group *group;
556 char *page;
558 group->in_use |= 1 << page_group_index (group->allocation, page);
561 static inline void
562 clear_page_group_in_use (group, page)
563 page_group *group;
564 char *page;
566 group->in_use &= ~(1 << page_group_index (group->allocation, page));
568 #endif
570 /* Allocate a new page for allocating objects of size 2^ORDER,
571 and return an entry for it. The entry is not added to the
572 appropriate page_table list. */
574 static inline struct page_entry *
575 alloc_page (order)
576 unsigned order;
578 struct page_entry *entry, *p, **pp;
579 char *page;
580 size_t num_objects;
581 size_t bitmap_size;
582 size_t page_entry_size;
583 size_t entry_size;
584 #ifdef USING_MALLOC_PAGE_GROUPS
585 page_group *group;
586 #endif
588 num_objects = OBJECTS_PER_PAGE (order);
589 bitmap_size = BITMAP_SIZE (num_objects + 1);
590 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
591 entry_size = num_objects * OBJECT_SIZE (order);
592 if (entry_size < G.pagesize)
593 entry_size = G.pagesize;
595 entry = NULL;
596 page = NULL;
598 /* Check the list of free pages for one we can use. */
599 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
600 if (p->bytes == entry_size)
601 break;
603 if (p != NULL)
605 /* Recycle the allocated memory from this page ... */
606 *pp = p->next;
607 page = p->page;
609 #ifdef USING_MALLOC_PAGE_GROUPS
610 group = p->group;
611 #endif
613 /* ... and, if possible, the page entry itself. */
614 if (p->order == order)
616 entry = p;
617 memset (entry, 0, page_entry_size);
619 else
620 free (p);
622 #ifdef USING_MMAP
623 else if (entry_size == G.pagesize)
625 /* We want just one page. Allocate a bunch of them and put the
626 extras on the freelist. (Can only do this optimization with
627 mmap for backing store.) */
628 struct page_entry *e, *f = G.free_pages;
629 int i;
631 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
633 /* This loop counts down so that the chain will be in ascending
634 memory order. */
635 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
637 e = (struct page_entry *) xcalloc (1, page_entry_size);
638 e->order = order;
639 e->bytes = G.pagesize;
640 e->page = page + (i << G.lg_pagesize);
641 e->next = f;
642 f = e;
645 G.free_pages = f;
647 else
648 page = alloc_anon (NULL, entry_size);
649 #endif
650 #ifdef USING_MALLOC_PAGE_GROUPS
651 else
653 /* Allocate a large block of memory and serve out the aligned
654 pages therein. This results in much less memory wastage
655 than the traditional implementation of valloc. */
657 char *allocation, *a, *enda;
658 size_t alloc_size, head_slop, tail_slop;
659 int multiple_pages = (entry_size == G.pagesize);
661 if (multiple_pages)
662 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
663 else
664 alloc_size = entry_size + G.pagesize - 1;
665 allocation = xmalloc (alloc_size);
667 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
668 head_slop = page - allocation;
669 if (multiple_pages)
670 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
671 else
672 tail_slop = alloc_size - entry_size - head_slop;
673 enda = allocation + alloc_size - tail_slop;
675 /* We allocated N pages, which are likely not aligned, leaving
676 us with N-1 usable pages. We plan to place the page_group
677 structure somewhere in the slop. */
678 if (head_slop >= sizeof (page_group))
679 group = (page_group *)page - 1;
680 else
682 /* We magically got an aligned allocation. Too bad, we have
683 to waste a page anyway. */
684 if (tail_slop == 0)
686 enda -= G.pagesize;
687 tail_slop += G.pagesize;
689 if (tail_slop < sizeof (page_group))
690 abort ();
691 group = (page_group *)enda;
692 tail_slop -= sizeof (page_group);
695 /* Remember that we allocated this memory. */
696 group->next = G.page_groups;
697 group->allocation = allocation;
698 group->alloc_size = alloc_size;
699 group->in_use = 0;
700 G.page_groups = group;
701 G.bytes_mapped += alloc_size;
703 /* If we allocated multiple pages, put the rest on the free list. */
704 if (multiple_pages)
706 struct page_entry *e, *f = G.free_pages;
707 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
709 e = (struct page_entry *) xcalloc (1, page_entry_size);
710 e->order = order;
711 e->bytes = G.pagesize;
712 e->page = a;
713 e->group = group;
714 e->next = f;
715 f = e;
717 G.free_pages = f;
720 #endif
722 if (entry == NULL)
723 entry = (struct page_entry *) xcalloc (1, page_entry_size);
725 entry->bytes = entry_size;
726 entry->page = page;
727 entry->context_depth = G.context_depth;
728 entry->order = order;
729 entry->num_free_objects = num_objects;
730 entry->next_bit_hint = 1;
732 #ifdef USING_MALLOC_PAGE_GROUPS
733 entry->group = group;
734 set_page_group_in_use (group, page);
735 #endif
737 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
738 increment the hint. */
739 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
740 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
742 set_page_table_entry (page, entry);
744 if (GGC_DEBUG_LEVEL >= 2)
745 fprintf (G.debug_file,
746 "Allocating page at %p, object size=%lu, data %p-%p\n",
747 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
748 page + entry_size - 1);
750 return entry;
753 /* For a page that is no longer needed, put it on the free page list. */
755 static inline void
756 free_page (entry)
757 page_entry *entry;
759 if (GGC_DEBUG_LEVEL >= 2)
760 fprintf (G.debug_file,
761 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
762 entry->page, entry->page + entry->bytes - 1);
764 /* Mark the page as inaccessible. Discard the handle to avoid handle
765 leak. */
766 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
768 set_page_table_entry (entry->page, NULL);
770 #ifdef USING_MALLOC_PAGE_GROUPS
771 clear_page_group_in_use (entry->group, entry->page);
772 #endif
774 entry->next = G.free_pages;
775 G.free_pages = entry;
778 /* Release the free page cache to the system. */
780 static void
781 release_pages ()
783 #ifdef USING_MMAP
784 page_entry *p, *next;
785 char *start;
786 size_t len;
788 /* Gather up adjacent pages so they are unmapped together. */
789 p = G.free_pages;
791 while (p)
793 start = p->page;
794 next = p->next;
795 len = p->bytes;
796 free (p);
797 p = next;
799 while (p && p->page == start + len)
801 next = p->next;
802 len += p->bytes;
803 free (p);
804 p = next;
807 munmap (start, len);
808 G.bytes_mapped -= len;
811 G.free_pages = NULL;
812 #endif
813 #ifdef USING_MALLOC_PAGE_GROUPS
814 page_entry **pp, *p;
815 page_group **gp, *g;
817 /* Remove all pages from free page groups from the list. */
818 pp = &G.free_pages;
819 while ((p = *pp) != NULL)
820 if (p->group->in_use == 0)
822 *pp = p->next;
823 free (p);
825 else
826 pp = &p->next;
828 /* Remove all free page groups, and release the storage. */
829 gp = &G.page_groups;
830 while ((g = *gp) != NULL)
831 if (g->in_use == 0)
833 *gp = g->next;
834 G.bytes_mapped -= g->alloc_size;
835 free (g->allocation);
837 else
838 gp = &g->next;
839 #endif
842 /* This table provides a fast way to determine ceil(log_2(size)) for
843 allocation requests. The minimum allocation size is eight bytes. */
845 static unsigned char size_lookup[257] =
847 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
848 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
849 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
850 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
851 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
852 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
853 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
854 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
855 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
856 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
857 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
858 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
859 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
860 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
861 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
862 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
866 /* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
867 memory is zeroed; otherwise, its contents are undefined. */
869 void *
870 ggc_alloc (size)
871 size_t size;
873 unsigned order, word, bit, object_offset;
874 struct page_entry *entry;
875 void *result;
877 if (size <= 256)
878 order = size_lookup[size];
879 else
881 order = 9;
882 while (size > OBJECT_SIZE (order))
883 order++;
886 /* If there are non-full pages for this size allocation, they are at
887 the head of the list. */
888 entry = G.pages[order];
890 /* If there is no page for this object size, or all pages in this
891 context are full, allocate a new page. */
892 if (entry == NULL || entry->num_free_objects == 0)
894 struct page_entry *new_entry;
895 new_entry = alloc_page (order);
897 /* If this is the only entry, it's also the tail. */
898 if (entry == NULL)
899 G.page_tails[order] = new_entry;
901 /* Put new pages at the head of the page list. */
902 new_entry->next = entry;
903 entry = new_entry;
904 G.pages[order] = new_entry;
906 /* For a new page, we know the word and bit positions (in the
907 in_use bitmap) of the first available object -- they're zero. */
908 new_entry->next_bit_hint = 1;
909 word = 0;
910 bit = 0;
911 object_offset = 0;
913 else
915 /* First try to use the hint left from the previous allocation
916 to locate a clear bit in the in-use bitmap. We've made sure
917 that the one-past-the-end bit is always set, so if the hint
918 has run over, this test will fail. */
919 unsigned hint = entry->next_bit_hint;
920 word = hint / HOST_BITS_PER_LONG;
921 bit = hint % HOST_BITS_PER_LONG;
923 /* If the hint didn't work, scan the bitmap from the beginning. */
924 if ((entry->in_use_p[word] >> bit) & 1)
926 word = bit = 0;
927 while (~entry->in_use_p[word] == 0)
928 ++word;
929 while ((entry->in_use_p[word] >> bit) & 1)
930 ++bit;
931 hint = word * HOST_BITS_PER_LONG + bit;
934 /* Next time, try the next bit. */
935 entry->next_bit_hint = hint + 1;
937 object_offset = hint * OBJECT_SIZE (order);
940 /* Set the in-use bit. */
941 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
943 /* Keep a running total of the number of free objects. If this page
944 fills up, we may have to move it to the end of the list if the
945 next page isn't full. If the next page is full, all subsequent
946 pages are full, so there's no need to move it. */
947 if (--entry->num_free_objects == 0
948 && entry->next != NULL
949 && entry->next->num_free_objects > 0)
951 G.pages[order] = entry->next;
952 entry->next = NULL;
953 G.page_tails[order]->next = entry;
954 G.page_tails[order] = entry;
957 /* Calculate the object's address. */
958 result = entry->page + object_offset;
960 #ifdef ENABLE_GC_CHECKING
961 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
962 exact same semantics in presence of memory bugs, regardless of
963 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
964 handle to avoid handle leak. */
965 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
967 /* `Poison' the entire allocated object, including any padding at
968 the end. */
969 memset (result, 0xaf, OBJECT_SIZE (order));
971 /* Make the bytes after the end of the object unaccessible. Discard the
972 handle to avoid handle leak. */
973 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
974 OBJECT_SIZE (order) - size));
975 #endif
977 /* Tell Valgrind that the memory is there, but its content isn't
978 defined. The bytes at the end of the object are still marked
979 unaccessible. */
980 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
982 /* Keep track of how many bytes are being allocated. This
983 information is used in deciding when to collect. */
984 G.allocated += OBJECT_SIZE (order);
986 if (GGC_DEBUG_LEVEL >= 3)
987 fprintf (G.debug_file,
988 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
989 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
990 (PTR) entry);
992 return result;
995 /* If P is not marked, marks it and return false. Otherwise return true.
996 P must have been allocated by the GC allocator; it mustn't point to
997 static objects, stack variables, or memory allocated with malloc. */
1000 ggc_set_mark (p)
1001 const void *p;
1003 page_entry *entry;
1004 unsigned bit, word;
1005 unsigned long mask;
1007 /* Look up the page on which the object is alloced. If the object
1008 wasn't allocated by the collector, we'll probably die. */
1009 entry = lookup_page_table_entry (p);
1010 #ifdef ENABLE_CHECKING
1011 if (entry == NULL)
1012 abort ();
1013 #endif
1015 /* Calculate the index of the object on the page; this is its bit
1016 position in the in_use_p bitmap. */
1017 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1018 word = bit / HOST_BITS_PER_LONG;
1019 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1021 /* If the bit was previously set, skip it. */
1022 if (entry->in_use_p[word] & mask)
1023 return 1;
1025 /* Otherwise set it, and decrement the free object count. */
1026 entry->in_use_p[word] |= mask;
1027 entry->num_free_objects -= 1;
1029 if (GGC_DEBUG_LEVEL >= 4)
1030 fprintf (G.debug_file, "Marking %p\n", p);
1032 return 0;
1035 /* Return 1 if P has been marked, zero otherwise.
1036 P must have been allocated by the GC allocator; it mustn't point to
1037 static objects, stack variables, or memory allocated with malloc. */
1040 ggc_marked_p (p)
1041 const void *p;
1043 page_entry *entry;
1044 unsigned bit, word;
1045 unsigned long mask;
1047 /* Look up the page on which the object is alloced. If the object
1048 wasn't allocated by the collector, we'll probably die. */
1049 entry = lookup_page_table_entry (p);
1050 #ifdef ENABLE_CHECKING
1051 if (entry == NULL)
1052 abort ();
1053 #endif
1055 /* Calculate the index of the object on the page; this is its bit
1056 position in the in_use_p bitmap. */
1057 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1058 word = bit / HOST_BITS_PER_LONG;
1059 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1061 return (entry->in_use_p[word] & mask) != 0;
1064 /* Return the size of the gc-able object P. */
1066 size_t
1067 ggc_get_size (p)
1068 const void *p;
1070 page_entry *pe = lookup_page_table_entry (p);
1071 return OBJECT_SIZE (pe->order);
1074 /* Subroutine of init_ggc which computes the pair of numbers used to
1075 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1077 This algorithm is taken from Granlund and Montgomery's paper
1078 "Division by Invariant Integers using Multiplication"
1079 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1080 constants). */
1082 static void
1083 compute_inverse (order)
1084 unsigned order;
1086 unsigned size, inv, e;
1088 /* There can be only one object per "page" in a bucket for sizes
1089 larger than half a machine page; it will always have offset zero. */
1090 if (OBJECT_SIZE (order) > G.pagesize/2)
1092 if (OBJECTS_PER_PAGE (order) != 1)
1093 abort ();
1095 DIV_MULT (order) = 1;
1096 DIV_SHIFT (order) = 0;
1097 return;
1100 size = OBJECT_SIZE (order);
1101 e = 0;
1102 while (size % 2 == 0)
1104 e++;
1105 size >>= 1;
1108 inv = size;
1109 while (inv * size != 1)
1110 inv = inv * (2 - inv*size);
1112 DIV_MULT (order) = inv;
1113 DIV_SHIFT (order) = e;
1116 /* Initialize the ggc-mmap allocator. */
1117 void
1118 init_ggc ()
1120 unsigned order;
1122 G.pagesize = getpagesize();
1123 G.lg_pagesize = exact_log2 (G.pagesize);
1125 #ifdef HAVE_MMAP_DEV_ZERO
1126 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1127 if (G.dev_zero_fd == -1)
1128 abort ();
1129 #endif
1131 #if 0
1132 G.debug_file = fopen ("ggc-mmap.debug", "w");
1133 #else
1134 G.debug_file = stdout;
1135 #endif
1137 #ifdef USING_MMAP
1138 /* StunOS has an amazing off-by-one error for the first mmap allocation
1139 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1140 believe, is an unaligned page allocation, which would cause us to
1141 hork badly if we tried to use it. */
1143 char *p = alloc_anon (NULL, G.pagesize);
1144 struct page_entry *e;
1145 if ((size_t)p & (G.pagesize - 1))
1147 /* How losing. Discard this one and try another. If we still
1148 can't get something useful, give up. */
1150 p = alloc_anon (NULL, G.pagesize);
1151 if ((size_t)p & (G.pagesize - 1))
1152 abort ();
1155 /* We have a good page, might as well hold onto it... */
1156 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1157 e->bytes = G.pagesize;
1158 e->page = p;
1159 e->next = G.free_pages;
1160 G.free_pages = e;
1162 #endif
1164 /* Initialize the object size table. */
1165 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1166 object_size_table[order] = (size_t) 1 << order;
1167 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1169 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1171 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1172 so that we're sure of getting aligned memory. */
1173 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1174 object_size_table[order] = s;
1177 /* Initialize the objects-per-page and inverse tables. */
1178 for (order = 0; order < NUM_ORDERS; ++order)
1180 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1181 if (objects_per_page_table[order] == 0)
1182 objects_per_page_table[order] = 1;
1183 compute_inverse (order);
1186 /* Reset the size_lookup array to put appropriately sized objects in
1187 the special orders. All objects bigger than the previous power
1188 of two, but no greater than the special size, should go in the
1189 new order. */
1190 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1192 int o;
1193 int i;
1195 o = size_lookup[OBJECT_SIZE (order)];
1196 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1197 size_lookup[i] = order;
1201 /* Increment the `GC context'. Objects allocated in an outer context
1202 are never freed, eliminating the need to register their roots. */
1204 void
1205 ggc_push_context ()
1207 ++G.context_depth;
1209 /* Die on wrap. */
1210 if (G.context_depth == 0)
1211 abort ();
1214 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1215 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1217 static void
1218 ggc_recalculate_in_use_p (p)
1219 page_entry *p;
1221 unsigned int i;
1222 size_t num_objects;
1224 /* Because the past-the-end bit in in_use_p is always set, we
1225 pretend there is one additional object. */
1226 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1228 /* Reset the free object count. */
1229 p->num_free_objects = num_objects;
1231 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1232 for (i = 0;
1233 i < CEIL (BITMAP_SIZE (num_objects),
1234 sizeof (*p->in_use_p));
1235 ++i)
1237 unsigned long j;
1239 /* Something is in use if it is marked, or if it was in use in a
1240 context further down the context stack. */
1241 p->in_use_p[i] |= p->save_in_use_p[i];
1243 /* Decrement the free object count for every object allocated. */
1244 for (j = p->in_use_p[i]; j; j >>= 1)
1245 p->num_free_objects -= (j & 1);
1248 if (p->num_free_objects >= num_objects)
1249 abort ();
1252 /* Decrement the `GC context'. All objects allocated since the
1253 previous ggc_push_context are migrated to the outer context. */
1255 void
1256 ggc_pop_context ()
1258 unsigned order, depth;
1260 depth = --G.context_depth;
1262 /* Any remaining pages in the popped context are lowered to the new
1263 current context; i.e. objects allocated in the popped context and
1264 left over are imported into the previous context. */
1265 for (order = 2; order < NUM_ORDERS; order++)
1267 page_entry *p;
1269 for (p = G.pages[order]; p != NULL; p = p->next)
1271 if (p->context_depth > depth)
1272 p->context_depth = depth;
1274 /* If this page is now in the topmost context, and we'd
1275 saved its allocation state, restore it. */
1276 else if (p->context_depth == depth && p->save_in_use_p)
1278 ggc_recalculate_in_use_p (p);
1279 free (p->save_in_use_p);
1280 p->save_in_use_p = 0;
1286 /* Unmark all objects. */
1288 static inline void
1289 clear_marks ()
1291 unsigned order;
1293 for (order = 2; order < NUM_ORDERS; order++)
1295 size_t num_objects = OBJECTS_PER_PAGE (order);
1296 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1297 page_entry *p;
1299 for (p = G.pages[order]; p != NULL; p = p->next)
1301 #ifdef ENABLE_CHECKING
1302 /* The data should be page-aligned. */
1303 if ((size_t) p->page & (G.pagesize - 1))
1304 abort ();
1305 #endif
1307 /* Pages that aren't in the topmost context are not collected;
1308 nevertheless, we need their in-use bit vectors to store GC
1309 marks. So, back them up first. */
1310 if (p->context_depth < G.context_depth)
1312 if (! p->save_in_use_p)
1313 p->save_in_use_p = xmalloc (bitmap_size);
1314 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1317 /* Reset reset the number of free objects and clear the
1318 in-use bits. These will be adjusted by mark_obj. */
1319 p->num_free_objects = num_objects;
1320 memset (p->in_use_p, 0, bitmap_size);
1322 /* Make sure the one-past-the-end bit is always set. */
1323 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1324 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1329 /* Free all empty pages. Partially empty pages need no attention
1330 because the `mark' bit doubles as an `unused' bit. */
1332 static inline void
1333 sweep_pages ()
1335 unsigned order;
1337 for (order = 2; order < NUM_ORDERS; order++)
1339 /* The last page-entry to consider, regardless of entries
1340 placed at the end of the list. */
1341 page_entry * const last = G.page_tails[order];
1343 size_t num_objects = OBJECTS_PER_PAGE (order);
1344 size_t live_objects;
1345 page_entry *p, *previous;
1346 int done;
1348 p = G.pages[order];
1349 if (p == NULL)
1350 continue;
1352 previous = NULL;
1355 page_entry *next = p->next;
1357 /* Loop until all entries have been examined. */
1358 done = (p == last);
1360 /* Add all live objects on this page to the count of
1361 allocated memory. */
1362 live_objects = num_objects - p->num_free_objects;
1364 G.allocated += OBJECT_SIZE (order) * live_objects;
1366 /* Only objects on pages in the topmost context should get
1367 collected. */
1368 if (p->context_depth < G.context_depth)
1371 /* Remove the page if it's empty. */
1372 else if (live_objects == 0)
1374 if (! previous)
1375 G.pages[order] = next;
1376 else
1377 previous->next = next;
1379 /* Are we removing the last element? */
1380 if (p == G.page_tails[order])
1381 G.page_tails[order] = previous;
1382 free_page (p);
1383 p = previous;
1386 /* If the page is full, move it to the end. */
1387 else if (p->num_free_objects == 0)
1389 /* Don't move it if it's already at the end. */
1390 if (p != G.page_tails[order])
1392 /* Move p to the end of the list. */
1393 p->next = NULL;
1394 G.page_tails[order]->next = p;
1396 /* Update the tail pointer... */
1397 G.page_tails[order] = p;
1399 /* ... and the head pointer, if necessary. */
1400 if (! previous)
1401 G.pages[order] = next;
1402 else
1403 previous->next = next;
1404 p = previous;
1408 /* If we've fallen through to here, it's a page in the
1409 topmost context that is neither full nor empty. Such a
1410 page must precede pages at lesser context depth in the
1411 list, so move it to the head. */
1412 else if (p != G.pages[order])
1414 previous->next = p->next;
1415 p->next = G.pages[order];
1416 G.pages[order] = p;
1417 /* Are we moving the last element? */
1418 if (G.page_tails[order] == p)
1419 G.page_tails[order] = previous;
1420 p = previous;
1423 previous = p;
1424 p = next;
1426 while (! done);
1428 /* Now, restore the in_use_p vectors for any pages from contexts
1429 other than the current one. */
1430 for (p = G.pages[order]; p; p = p->next)
1431 if (p->context_depth != G.context_depth)
1432 ggc_recalculate_in_use_p (p);
1436 #ifdef ENABLE_GC_CHECKING
1437 /* Clobber all free objects. */
1439 static inline void
1440 poison_pages ()
1442 unsigned order;
1444 for (order = 2; order < NUM_ORDERS; order++)
1446 size_t num_objects = OBJECTS_PER_PAGE (order);
1447 size_t size = OBJECT_SIZE (order);
1448 page_entry *p;
1450 for (p = G.pages[order]; p != NULL; p = p->next)
1452 size_t i;
1454 if (p->context_depth != G.context_depth)
1455 /* Since we don't do any collection for pages in pushed
1456 contexts, there's no need to do any poisoning. And
1457 besides, the IN_USE_P array isn't valid until we pop
1458 contexts. */
1459 continue;
1461 for (i = 0; i < num_objects; i++)
1463 size_t word, bit;
1464 word = i / HOST_BITS_PER_LONG;
1465 bit = i % HOST_BITS_PER_LONG;
1466 if (((p->in_use_p[word] >> bit) & 1) == 0)
1468 char *object = p->page + i * size;
1470 /* Keep poison-by-write when we expect to use Valgrind,
1471 so the exact same memory semantics is kept, in case
1472 there are memory errors. We override this request
1473 below. */
1474 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1475 memset (object, 0xa5, size);
1477 /* Drop the handle to avoid handle leak. */
1478 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1484 #endif
1486 /* Top level mark-and-sweep routine. */
1488 void
1489 ggc_collect ()
1491 /* Avoid frequent unnecessary work by skipping collection if the
1492 total allocations haven't expanded much since the last
1493 collection. */
1494 size_t allocated_last_gc =
1495 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1497 size_t min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1499 if (G.allocated < allocated_last_gc + min_expand)
1500 return;
1502 timevar_push (TV_GC);
1503 if (!quiet_flag)
1504 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1506 /* Zero the total allocated bytes. This will be recalculated in the
1507 sweep phase. */
1508 G.allocated = 0;
1510 /* Release the pages we freed the last time we collected, but didn't
1511 reuse in the interim. */
1512 release_pages ();
1514 clear_marks ();
1515 ggc_mark_roots ();
1517 #ifdef ENABLE_GC_CHECKING
1518 poison_pages ();
1519 #endif
1521 sweep_pages ();
1523 G.allocated_last_gc = G.allocated;
1525 timevar_pop (TV_GC);
1527 if (!quiet_flag)
1528 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1531 /* Print allocation statistics. */
1532 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1533 ? (x) \
1534 : ((x) < 1024*1024*10 \
1535 ? (x) / 1024 \
1536 : (x) / (1024*1024))))
1537 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1539 void
1540 ggc_print_statistics ()
1542 struct ggc_statistics stats;
1543 unsigned int i;
1544 size_t total_overhead = 0;
1546 /* Clear the statistics. */
1547 memset (&stats, 0, sizeof (stats));
1549 /* Make sure collection will really occur. */
1550 G.allocated_last_gc = 0;
1552 /* Collect and print the statistics common across collectors. */
1553 ggc_print_common_statistics (stderr, &stats);
1555 /* Release free pages so that we will not count the bytes allocated
1556 there as part of the total allocated memory. */
1557 release_pages ();
1559 /* Collect some information about the various sizes of
1560 allocation. */
1561 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1562 "Size", "Allocated", "Used", "Overhead");
1563 for (i = 0; i < NUM_ORDERS; ++i)
1565 page_entry *p;
1566 size_t allocated;
1567 size_t in_use;
1568 size_t overhead;
1570 /* Skip empty entries. */
1571 if (!G.pages[i])
1572 continue;
1574 overhead = allocated = in_use = 0;
1576 /* Figure out the total number of bytes allocated for objects of
1577 this size, and how many of them are actually in use. Also figure
1578 out how much memory the page table is using. */
1579 for (p = G.pages[i]; p; p = p->next)
1581 allocated += p->bytes;
1582 in_use +=
1583 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1585 overhead += (sizeof (page_entry) - sizeof (long)
1586 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1588 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1589 (unsigned long) OBJECT_SIZE (i),
1590 SCALE (allocated), LABEL (allocated),
1591 SCALE (in_use), LABEL (in_use),
1592 SCALE (overhead), LABEL (overhead));
1593 total_overhead += overhead;
1595 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1596 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1597 SCALE (G.allocated), LABEL(G.allocated),
1598 SCALE (total_overhead), LABEL (total_overhead));