Add an UNSPEC_PROLOGUE_USE to prevent the link register from being considered dead.
[official-gcc.git] / gcc / ggc-page.c
blob4898f074ee1f6598a3208b9dc7aaec53965accb3
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "varray.h"
30 #include "flags.h"
31 #include "ggc.h"
32 #include "timevar.h"
33 #include "params.h"
34 #ifdef ENABLE_VALGRIND_CHECKING
35 #include <valgrind.h>
36 #else
37 /* Avoid #ifdef:s when we can help it. */
38 #define VALGRIND_DISCARD(x)
39 #endif
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
42 file open. Prefer either to valloc. */
43 #ifdef HAVE_MMAP_ANON
44 # undef HAVE_MMAP_DEV_ZERO
46 # include <sys/mman.h>
47 # ifndef MAP_FAILED
48 # define MAP_FAILED -1
49 # endif
50 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
51 # define MAP_ANONYMOUS MAP_ANON
52 # endif
53 # define USING_MMAP
55 #endif
57 #ifdef HAVE_MMAP_DEV_ZERO
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 # define MAP_FAILED -1
62 # endif
63 # define USING_MMAP
65 #endif
67 #ifndef USING_MMAP
68 #define USING_MALLOC_PAGE_GROUPS
69 #endif
71 /* Stategy:
73 This garbage-collecting allocator allocates objects on one of a set
74 of pages. Each page can allocate objects of a single size only;
75 available sizes are powers of two starting at four bytes. The size
76 of an allocation request is rounded up to the next power of two
77 (`order'), and satisfied from the appropriate page.
79 Each page is recorded in a page-entry, which also maintains an
80 in-use bitmap of object positions on the page. This allows the
81 allocation state of a particular object to be flipped without
82 touching the page itself.
84 Each page-entry also has a context depth, which is used to track
85 pushing and popping of allocation contexts. Only objects allocated
86 in the current (highest-numbered) context may be collected.
88 Page entries are arranged in an array of singly-linked lists. The
89 array is indexed by the allocation size, in bits, of the pages on
90 it; i.e. all pages on a list allocate objects of the same size.
91 Pages are ordered on the list such that all non-full pages precede
92 all full pages, with non-full pages arranged in order of decreasing
93 context depth.
95 Empty pages (of all orders) are kept on a single page cache list,
96 and are considered first when new pages are required; they are
97 deallocated at the start of the next collection if they haven't
98 been recycled by then. */
100 /* Define GGC_DEBUG_LEVEL to print debugging information.
101 0: No debugging output.
102 1: GC statistics only.
103 2: Page-entry allocations/deallocations as well.
104 3: Object allocations as well.
105 4: Object marks as well. */
106 #define GGC_DEBUG_LEVEL (0)
108 #ifndef HOST_BITS_PER_PTR
109 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
110 #endif
113 /* A two-level tree is used to look up the page-entry for a given
114 pointer. Two chunks of the pointer's bits are extracted to index
115 the first and second levels of the tree, as follows:
117 HOST_PAGE_SIZE_BITS
118 32 | |
119 msb +----------------+----+------+------+ lsb
120 | | |
121 PAGE_L1_BITS |
123 PAGE_L2_BITS
125 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
126 pages are aligned on system page boundaries. The next most
127 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
128 index values in the lookup table, respectively.
130 For 32-bit architectures and the settings below, there are no
131 leftover bits. For architectures with wider pointers, the lookup
132 tree points to a list of pages, which must be scanned to find the
133 correct one. */
135 #define PAGE_L1_BITS (8)
136 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
137 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
138 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
140 #define LOOKUP_L1(p) \
141 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
143 #define LOOKUP_L2(p) \
144 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
146 /* The number of objects per allocation page, for objects on a page of
147 the indicated ORDER. */
148 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
150 /* The number of objects in P. */
151 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
153 /* The size of an object on a page of the indicated ORDER. */
154 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
156 /* For speed, we avoid doing a general integer divide to locate the
157 offset in the allocation bitmap, by precalculating numbers M, S
158 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
159 within the page which is evenly divisible by the object size Z. */
160 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
161 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
162 #define OFFSET_TO_BIT(OFFSET, ORDER) \
163 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
165 /* The number of extra orders, not corresponding to power-of-two sized
166 objects. */
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
170 #define RTL_SIZE(NSLOTS) \
171 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
173 /* The Ith entry is the maximum size of an object to be stored in the
174 Ith extra order. Adding a new entry to this array is the *only*
175 thing you need to do to add a new special allocation size. */
177 static const size_t extra_order_size_table[] = {
178 sizeof (struct tree_decl),
179 sizeof (struct tree_list),
180 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
181 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
184 /* The total number of orders. */
186 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
188 /* We use this structure to determine the alignment required for
189 allocations. For power-of-two sized allocations, that's not a
190 problem, but it does matter for odd-sized allocations. */
192 struct max_alignment {
193 char c;
194 union {
195 HOST_WIDEST_INT i;
196 #ifdef HAVE_LONG_DOUBLE
197 long double d;
198 #else
199 double d;
200 #endif
201 } u;
204 /* The biggest alignment required. */
206 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
208 /* Compute the smallest nonnegative number which when added to X gives
209 a multiple of F. */
211 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 /* Compute the smallest multiple of F that is >= X. */
215 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
217 /* The Ith entry is the number of objects on a page or order I. */
219 static unsigned objects_per_page_table[NUM_ORDERS];
221 /* The Ith entry is the size of an object on a page of order I. */
223 static size_t object_size_table[NUM_ORDERS];
225 /* The Ith entry is a pair of numbers (mult, shift) such that
226 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
227 for all k evenly divisible by OBJECT_SIZE(I). */
229 static struct
231 unsigned int mult;
232 unsigned int shift;
234 inverse_table[NUM_ORDERS];
236 /* A page_entry records the status of an allocation page. This
237 structure is dynamically sized to fit the bitmap in_use_p. */
238 typedef struct page_entry
240 /* The next page-entry with objects of the same size, or NULL if
241 this is the last page-entry. */
242 struct page_entry *next;
244 /* The number of bytes allocated. (This will always be a multiple
245 of the host system page size.) */
246 size_t bytes;
248 /* The address at which the memory is allocated. */
249 char *page;
251 #ifdef USING_MALLOC_PAGE_GROUPS
252 /* Back pointer to the page group this page came from. */
253 struct page_group *group;
254 #endif
256 /* Saved in-use bit vector for pages that aren't in the topmost
257 context during collection. */
258 unsigned long *save_in_use_p;
260 /* Context depth of this page. */
261 unsigned short context_depth;
263 /* The number of free objects remaining on this page. */
264 unsigned short num_free_objects;
266 /* A likely candidate for the bit position of a free object for the
267 next allocation from this page. */
268 unsigned short next_bit_hint;
270 /* The lg of size of objects allocated from this page. */
271 unsigned char order;
273 /* A bit vector indicating whether or not objects are in use. The
274 Nth bit is one if the Nth object on this page is allocated. This
275 array is dynamically sized. */
276 unsigned long in_use_p[1];
277 } page_entry;
279 #ifdef USING_MALLOC_PAGE_GROUPS
280 /* A page_group describes a large allocation from malloc, from which
281 we parcel out aligned pages. */
282 typedef struct page_group
284 /* A linked list of all extant page groups. */
285 struct page_group *next;
287 /* The address we received from malloc. */
288 char *allocation;
290 /* The size of the block. */
291 size_t alloc_size;
293 /* A bitmask of pages in use. */
294 unsigned int in_use;
295 } page_group;
296 #endif
298 #if HOST_BITS_PER_PTR <= 32
300 /* On 32-bit hosts, we use a two level page table, as pictured above. */
301 typedef page_entry **page_table[PAGE_L1_SIZE];
303 #else
305 /* On 64-bit hosts, we use the same two level page tables plus a linked
306 list that disambiguates the top 32-bits. There will almost always be
307 exactly one entry in the list. */
308 typedef struct page_table_chain
310 struct page_table_chain *next;
311 size_t high_bits;
312 page_entry **table[PAGE_L1_SIZE];
313 } *page_table;
315 #endif
317 /* The rest of the global variables. */
318 static struct globals
320 /* The Nth element in this array is a page with objects of size 2^N.
321 If there are any pages with free objects, they will be at the
322 head of the list. NULL if there are no page-entries for this
323 object size. */
324 page_entry *pages[NUM_ORDERS];
326 /* The Nth element in this array is the last page with objects of
327 size 2^N. NULL if there are no page-entries for this object
328 size. */
329 page_entry *page_tails[NUM_ORDERS];
331 /* Lookup table for associating allocation pages with object addresses. */
332 page_table lookup;
334 /* The system's page size. */
335 size_t pagesize;
336 size_t lg_pagesize;
338 /* Bytes currently allocated. */
339 size_t allocated;
341 /* Bytes currently allocated at the end of the last collection. */
342 size_t allocated_last_gc;
344 /* Total amount of memory mapped. */
345 size_t bytes_mapped;
347 /* The current depth in the context stack. */
348 unsigned short context_depth;
350 /* A file descriptor open to /dev/zero for reading. */
351 #if defined (HAVE_MMAP_DEV_ZERO)
352 int dev_zero_fd;
353 #endif
355 /* A cache of free system pages. */
356 page_entry *free_pages;
358 #ifdef USING_MALLOC_PAGE_GROUPS
359 page_group *page_groups;
360 #endif
362 /* The file descriptor for debugging output. */
363 FILE *debug_file;
364 } G;
366 /* The size in bytes required to maintain a bitmap for the objects
367 on a page-entry. */
368 #define BITMAP_SIZE(Num_objects) \
369 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
371 /* Allocate pages in chunks of this size, to throttle calls to memory
372 allocation routines. The first page is used, the rest go onto the
373 free list. This cannot be larger than HOST_BITS_PER_INT for the
374 in_use bitmask for page_group. */
375 #define GGC_QUIRE_SIZE 16
377 static int ggc_allocated_p PARAMS ((const void *));
378 static page_entry *lookup_page_table_entry PARAMS ((const void *));
379 static void set_page_table_entry PARAMS ((void *, page_entry *));
380 #ifdef USING_MMAP
381 static char *alloc_anon PARAMS ((char *, size_t));
382 #endif
383 #ifdef USING_MALLOC_PAGE_GROUPS
384 static size_t page_group_index PARAMS ((char *, char *));
385 static void set_page_group_in_use PARAMS ((page_group *, char *));
386 static void clear_page_group_in_use PARAMS ((page_group *, char *));
387 #endif
388 static struct page_entry * alloc_page PARAMS ((unsigned));
389 static void free_page PARAMS ((struct page_entry *));
390 static void release_pages PARAMS ((void));
391 static void clear_marks PARAMS ((void));
392 static void sweep_pages PARAMS ((void));
393 static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
394 static void compute_inverse PARAMS ((unsigned));
396 #ifdef ENABLE_GC_CHECKING
397 static void poison_pages PARAMS ((void));
398 #endif
400 void debug_print_page_list PARAMS ((int));
402 /* Returns nonzero if P was allocated in GC'able memory. */
404 static inline int
405 ggc_allocated_p (p)
406 const void *p;
408 page_entry ***base;
409 size_t L1, L2;
411 #if HOST_BITS_PER_PTR <= 32
412 base = &G.lookup[0];
413 #else
414 page_table table = G.lookup;
415 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
416 while (1)
418 if (table == NULL)
419 return 0;
420 if (table->high_bits == high_bits)
421 break;
422 table = table->next;
424 base = &table->table[0];
425 #endif
427 /* Extract the level 1 and 2 indices. */
428 L1 = LOOKUP_L1 (p);
429 L2 = LOOKUP_L2 (p);
431 return base[L1] && base[L1][L2];
434 /* Traverse the page table and find the entry for a page.
435 Die (probably) if the object wasn't allocated via GC. */
437 static inline page_entry *
438 lookup_page_table_entry(p)
439 const void *p;
441 page_entry ***base;
442 size_t L1, L2;
444 #if HOST_BITS_PER_PTR <= 32
445 base = &G.lookup[0];
446 #else
447 page_table table = G.lookup;
448 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
449 while (table->high_bits != high_bits)
450 table = table->next;
451 base = &table->table[0];
452 #endif
454 /* Extract the level 1 and 2 indices. */
455 L1 = LOOKUP_L1 (p);
456 L2 = LOOKUP_L2 (p);
458 return base[L1][L2];
461 /* Set the page table entry for a page. */
463 static void
464 set_page_table_entry(p, entry)
465 void *p;
466 page_entry *entry;
468 page_entry ***base;
469 size_t L1, L2;
471 #if HOST_BITS_PER_PTR <= 32
472 base = &G.lookup[0];
473 #else
474 page_table table;
475 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
476 for (table = G.lookup; table; table = table->next)
477 if (table->high_bits == high_bits)
478 goto found;
480 /* Not found -- allocate a new table. */
481 table = (page_table) xcalloc (1, sizeof(*table));
482 table->next = G.lookup;
483 table->high_bits = high_bits;
484 G.lookup = table;
485 found:
486 base = &table->table[0];
487 #endif
489 /* Extract the level 1 and 2 indices. */
490 L1 = LOOKUP_L1 (p);
491 L2 = LOOKUP_L2 (p);
493 if (base[L1] == NULL)
494 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
496 base[L1][L2] = entry;
499 /* Prints the page-entry for object size ORDER, for debugging. */
501 void
502 debug_print_page_list (order)
503 int order;
505 page_entry *p;
506 printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
507 (PTR) G.page_tails[order]);
508 p = G.pages[order];
509 while (p != NULL)
511 printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
512 p->num_free_objects);
513 p = p->next;
515 printf ("NULL\n");
516 fflush (stdout);
519 #ifdef USING_MMAP
520 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
521 (if non-null). The ifdef structure here is intended to cause a
522 compile error unless exactly one of the HAVE_* is defined. */
524 static inline char *
525 alloc_anon (pref, size)
526 char *pref ATTRIBUTE_UNUSED;
527 size_t size;
529 #ifdef HAVE_MMAP_ANON
530 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
531 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
532 #endif
533 #ifdef HAVE_MMAP_DEV_ZERO
534 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
535 MAP_PRIVATE, G.dev_zero_fd, 0);
536 #endif
538 if (page == (char *) MAP_FAILED)
540 perror ("virtual memory exhausted");
541 exit (FATAL_EXIT_CODE);
544 /* Remember that we allocated this memory. */
545 G.bytes_mapped += size;
547 /* Pretend we don't have access to the allocated pages. We'll enable
548 access to smaller pieces of the area in ggc_alloc. Discard the
549 handle to avoid handle leak. */
550 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
552 return page;
554 #endif
555 #ifdef USING_MALLOC_PAGE_GROUPS
556 /* Compute the index for this page into the page group. */
558 static inline size_t
559 page_group_index (allocation, page)
560 char *allocation, *page;
562 return (size_t) (page - allocation) >> G.lg_pagesize;
565 /* Set and clear the in_use bit for this page in the page group. */
567 static inline void
568 set_page_group_in_use (group, page)
569 page_group *group;
570 char *page;
572 group->in_use |= 1 << page_group_index (group->allocation, page);
575 static inline void
576 clear_page_group_in_use (group, page)
577 page_group *group;
578 char *page;
580 group->in_use &= ~(1 << page_group_index (group->allocation, page));
582 #endif
584 /* Allocate a new page for allocating objects of size 2^ORDER,
585 and return an entry for it. The entry is not added to the
586 appropriate page_table list. */
588 static inline struct page_entry *
589 alloc_page (order)
590 unsigned order;
592 struct page_entry *entry, *p, **pp;
593 char *page;
594 size_t num_objects;
595 size_t bitmap_size;
596 size_t page_entry_size;
597 size_t entry_size;
598 #ifdef USING_MALLOC_PAGE_GROUPS
599 page_group *group;
600 #endif
602 num_objects = OBJECTS_PER_PAGE (order);
603 bitmap_size = BITMAP_SIZE (num_objects + 1);
604 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
605 entry_size = num_objects * OBJECT_SIZE (order);
606 if (entry_size < G.pagesize)
607 entry_size = G.pagesize;
609 entry = NULL;
610 page = NULL;
612 /* Check the list of free pages for one we can use. */
613 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
614 if (p->bytes == entry_size)
615 break;
617 if (p != NULL)
619 /* Recycle the allocated memory from this page ... */
620 *pp = p->next;
621 page = p->page;
623 #ifdef USING_MALLOC_PAGE_GROUPS
624 group = p->group;
625 #endif
627 /* ... and, if possible, the page entry itself. */
628 if (p->order == order)
630 entry = p;
631 memset (entry, 0, page_entry_size);
633 else
634 free (p);
636 #ifdef USING_MMAP
637 else if (entry_size == G.pagesize)
639 /* We want just one page. Allocate a bunch of them and put the
640 extras on the freelist. (Can only do this optimization with
641 mmap for backing store.) */
642 struct page_entry *e, *f = G.free_pages;
643 int i;
645 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
647 /* This loop counts down so that the chain will be in ascending
648 memory order. */
649 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
651 e = (struct page_entry *) xcalloc (1, page_entry_size);
652 e->order = order;
653 e->bytes = G.pagesize;
654 e->page = page + (i << G.lg_pagesize);
655 e->next = f;
656 f = e;
659 G.free_pages = f;
661 else
662 page = alloc_anon (NULL, entry_size);
663 #endif
664 #ifdef USING_MALLOC_PAGE_GROUPS
665 else
667 /* Allocate a large block of memory and serve out the aligned
668 pages therein. This results in much less memory wastage
669 than the traditional implementation of valloc. */
671 char *allocation, *a, *enda;
672 size_t alloc_size, head_slop, tail_slop;
673 int multiple_pages = (entry_size == G.pagesize);
675 if (multiple_pages)
676 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
677 else
678 alloc_size = entry_size + G.pagesize - 1;
679 allocation = xmalloc (alloc_size);
681 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
682 head_slop = page - allocation;
683 if (multiple_pages)
684 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
685 else
686 tail_slop = alloc_size - entry_size - head_slop;
687 enda = allocation + alloc_size - tail_slop;
689 /* We allocated N pages, which are likely not aligned, leaving
690 us with N-1 usable pages. We plan to place the page_group
691 structure somewhere in the slop. */
692 if (head_slop >= sizeof (page_group))
693 group = (page_group *)page - 1;
694 else
696 /* We magically got an aligned allocation. Too bad, we have
697 to waste a page anyway. */
698 if (tail_slop == 0)
700 enda -= G.pagesize;
701 tail_slop += G.pagesize;
703 if (tail_slop < sizeof (page_group))
704 abort ();
705 group = (page_group *)enda;
706 tail_slop -= sizeof (page_group);
709 /* Remember that we allocated this memory. */
710 group->next = G.page_groups;
711 group->allocation = allocation;
712 group->alloc_size = alloc_size;
713 group->in_use = 0;
714 G.page_groups = group;
715 G.bytes_mapped += alloc_size;
717 /* If we allocated multiple pages, put the rest on the free list. */
718 if (multiple_pages)
720 struct page_entry *e, *f = G.free_pages;
721 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
723 e = (struct page_entry *) xcalloc (1, page_entry_size);
724 e->order = order;
725 e->bytes = G.pagesize;
726 e->page = a;
727 e->group = group;
728 e->next = f;
729 f = e;
731 G.free_pages = f;
734 #endif
736 if (entry == NULL)
737 entry = (struct page_entry *) xcalloc (1, page_entry_size);
739 entry->bytes = entry_size;
740 entry->page = page;
741 entry->context_depth = G.context_depth;
742 entry->order = order;
743 entry->num_free_objects = num_objects;
744 entry->next_bit_hint = 1;
746 #ifdef USING_MALLOC_PAGE_GROUPS
747 entry->group = group;
748 set_page_group_in_use (group, page);
749 #endif
751 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
752 increment the hint. */
753 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
754 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
756 set_page_table_entry (page, entry);
758 if (GGC_DEBUG_LEVEL >= 2)
759 fprintf (G.debug_file,
760 "Allocating page at %p, object size=%lu, data %p-%p\n",
761 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
762 page + entry_size - 1);
764 return entry;
767 /* For a page that is no longer needed, put it on the free page list. */
769 static inline void
770 free_page (entry)
771 page_entry *entry;
773 if (GGC_DEBUG_LEVEL >= 2)
774 fprintf (G.debug_file,
775 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
776 entry->page, entry->page + entry->bytes - 1);
778 /* Mark the page as inaccessible. Discard the handle to avoid handle
779 leak. */
780 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
782 set_page_table_entry (entry->page, NULL);
784 #ifdef USING_MALLOC_PAGE_GROUPS
785 clear_page_group_in_use (entry->group, entry->page);
786 #endif
788 entry->next = G.free_pages;
789 G.free_pages = entry;
792 /* Release the free page cache to the system. */
794 static void
795 release_pages ()
797 #ifdef USING_MMAP
798 page_entry *p, *next;
799 char *start;
800 size_t len;
802 /* Gather up adjacent pages so they are unmapped together. */
803 p = G.free_pages;
805 while (p)
807 start = p->page;
808 next = p->next;
809 len = p->bytes;
810 free (p);
811 p = next;
813 while (p && p->page == start + len)
815 next = p->next;
816 len += p->bytes;
817 free (p);
818 p = next;
821 munmap (start, len);
822 G.bytes_mapped -= len;
825 G.free_pages = NULL;
826 #endif
827 #ifdef USING_MALLOC_PAGE_GROUPS
828 page_entry **pp, *p;
829 page_group **gp, *g;
831 /* Remove all pages from free page groups from the list. */
832 pp = &G.free_pages;
833 while ((p = *pp) != NULL)
834 if (p->group->in_use == 0)
836 *pp = p->next;
837 free (p);
839 else
840 pp = &p->next;
842 /* Remove all free page groups, and release the storage. */
843 gp = &G.page_groups;
844 while ((g = *gp) != NULL)
845 if (g->in_use == 0)
847 *gp = g->next;
848 G.bytes_mapped -= g->alloc_size;
849 free (g->allocation);
851 else
852 gp = &g->next;
853 #endif
856 /* This table provides a fast way to determine ceil(log_2(size)) for
857 allocation requests. The minimum allocation size is eight bytes. */
859 static unsigned char size_lookup[257] =
861 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
862 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
863 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
864 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
865 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
866 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
867 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
868 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
869 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
870 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
871 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
872 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
873 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
874 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
875 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
876 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
880 /* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
881 memory is zeroed; otherwise, its contents are undefined. */
883 void *
884 ggc_alloc (size)
885 size_t size;
887 unsigned order, word, bit, object_offset;
888 struct page_entry *entry;
889 void *result;
891 if (size <= 256)
892 order = size_lookup[size];
893 else
895 order = 9;
896 while (size > OBJECT_SIZE (order))
897 order++;
900 /* If there are non-full pages for this size allocation, they are at
901 the head of the list. */
902 entry = G.pages[order];
904 /* If there is no page for this object size, or all pages in this
905 context are full, allocate a new page. */
906 if (entry == NULL || entry->num_free_objects == 0)
908 struct page_entry *new_entry;
909 new_entry = alloc_page (order);
911 /* If this is the only entry, it's also the tail. */
912 if (entry == NULL)
913 G.page_tails[order] = new_entry;
915 /* Put new pages at the head of the page list. */
916 new_entry->next = entry;
917 entry = new_entry;
918 G.pages[order] = new_entry;
920 /* For a new page, we know the word and bit positions (in the
921 in_use bitmap) of the first available object -- they're zero. */
922 new_entry->next_bit_hint = 1;
923 word = 0;
924 bit = 0;
925 object_offset = 0;
927 else
929 /* First try to use the hint left from the previous allocation
930 to locate a clear bit in the in-use bitmap. We've made sure
931 that the one-past-the-end bit is always set, so if the hint
932 has run over, this test will fail. */
933 unsigned hint = entry->next_bit_hint;
934 word = hint / HOST_BITS_PER_LONG;
935 bit = hint % HOST_BITS_PER_LONG;
937 /* If the hint didn't work, scan the bitmap from the beginning. */
938 if ((entry->in_use_p[word] >> bit) & 1)
940 word = bit = 0;
941 while (~entry->in_use_p[word] == 0)
942 ++word;
943 while ((entry->in_use_p[word] >> bit) & 1)
944 ++bit;
945 hint = word * HOST_BITS_PER_LONG + bit;
948 /* Next time, try the next bit. */
949 entry->next_bit_hint = hint + 1;
951 object_offset = hint * OBJECT_SIZE (order);
954 /* Set the in-use bit. */
955 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
957 /* Keep a running total of the number of free objects. If this page
958 fills up, we may have to move it to the end of the list if the
959 next page isn't full. If the next page is full, all subsequent
960 pages are full, so there's no need to move it. */
961 if (--entry->num_free_objects == 0
962 && entry->next != NULL
963 && entry->next->num_free_objects > 0)
965 G.pages[order] = entry->next;
966 entry->next = NULL;
967 G.page_tails[order]->next = entry;
968 G.page_tails[order] = entry;
971 /* Calculate the object's address. */
972 result = entry->page + object_offset;
974 #ifdef ENABLE_GC_CHECKING
975 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
976 exact same semantics in presence of memory bugs, regardless of
977 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
978 handle to avoid handle leak. */
979 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
981 /* `Poison' the entire allocated object, including any padding at
982 the end. */
983 memset (result, 0xaf, OBJECT_SIZE (order));
985 /* Make the bytes after the end of the object unaccessible. Discard the
986 handle to avoid handle leak. */
987 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
988 OBJECT_SIZE (order) - size));
989 #endif
991 /* Tell Valgrind that the memory is there, but its content isn't
992 defined. The bytes at the end of the object are still marked
993 unaccessible. */
994 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
996 /* Keep track of how many bytes are being allocated. This
997 information is used in deciding when to collect. */
998 G.allocated += OBJECT_SIZE (order);
1000 if (GGC_DEBUG_LEVEL >= 3)
1001 fprintf (G.debug_file,
1002 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1003 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1004 (PTR) entry);
1006 return result;
1009 /* If P is not marked, marks it and return false. Otherwise return true.
1010 P must have been allocated by the GC allocator; it mustn't point to
1011 static objects, stack variables, or memory allocated with malloc. */
1014 ggc_set_mark (p)
1015 const void *p;
1017 page_entry *entry;
1018 unsigned bit, word;
1019 unsigned long mask;
1021 /* Look up the page on which the object is alloced. If the object
1022 wasn't allocated by the collector, we'll probably die. */
1023 entry = lookup_page_table_entry (p);
1024 #ifdef ENABLE_CHECKING
1025 if (entry == NULL)
1026 abort ();
1027 #endif
1029 /* Calculate the index of the object on the page; this is its bit
1030 position in the in_use_p bitmap. */
1031 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1032 word = bit / HOST_BITS_PER_LONG;
1033 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1035 /* If the bit was previously set, skip it. */
1036 if (entry->in_use_p[word] & mask)
1037 return 1;
1039 /* Otherwise set it, and decrement the free object count. */
1040 entry->in_use_p[word] |= mask;
1041 entry->num_free_objects -= 1;
1043 if (GGC_DEBUG_LEVEL >= 4)
1044 fprintf (G.debug_file, "Marking %p\n", p);
1046 return 0;
1049 /* Return 1 if P has been marked, zero otherwise.
1050 P must have been allocated by the GC allocator; it mustn't point to
1051 static objects, stack variables, or memory allocated with malloc. */
1054 ggc_marked_p (p)
1055 const void *p;
1057 page_entry *entry;
1058 unsigned bit, word;
1059 unsigned long mask;
1061 /* Look up the page on which the object is alloced. If the object
1062 wasn't allocated by the collector, we'll probably die. */
1063 entry = lookup_page_table_entry (p);
1064 #ifdef ENABLE_CHECKING
1065 if (entry == NULL)
1066 abort ();
1067 #endif
1069 /* Calculate the index of the object on the page; this is its bit
1070 position in the in_use_p bitmap. */
1071 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1072 word = bit / HOST_BITS_PER_LONG;
1073 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1075 return (entry->in_use_p[word] & mask) != 0;
1078 /* Return the size of the gc-able object P. */
1080 size_t
1081 ggc_get_size (p)
1082 const void *p;
1084 page_entry *pe = lookup_page_table_entry (p);
1085 return OBJECT_SIZE (pe->order);
1088 /* Subroutine of init_ggc which computes the pair of numbers used to
1089 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1091 This algorithm is taken from Granlund and Montgomery's paper
1092 "Division by Invariant Integers using Multiplication"
1093 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1094 constants). */
1096 static void
1097 compute_inverse (order)
1098 unsigned order;
1100 unsigned size, inv, e;
1102 /* There can be only one object per "page" in a bucket for sizes
1103 larger than half a machine page; it will always have offset zero. */
1104 if (OBJECT_SIZE (order) > G.pagesize/2)
1106 if (OBJECTS_PER_PAGE (order) != 1)
1107 abort ();
1109 DIV_MULT (order) = 1;
1110 DIV_SHIFT (order) = 0;
1111 return;
1114 size = OBJECT_SIZE (order);
1115 e = 0;
1116 while (size % 2 == 0)
1118 e++;
1119 size >>= 1;
1122 inv = size;
1123 while (inv * size != 1)
1124 inv = inv * (2 - inv*size);
1126 DIV_MULT (order) = inv;
1127 DIV_SHIFT (order) = e;
1130 /* Initialize the ggc-mmap allocator. */
1131 void
1132 init_ggc ()
1134 unsigned order;
1136 G.pagesize = getpagesize();
1137 G.lg_pagesize = exact_log2 (G.pagesize);
1139 #ifdef HAVE_MMAP_DEV_ZERO
1140 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1141 if (G.dev_zero_fd == -1)
1142 abort ();
1143 #endif
1145 #if 0
1146 G.debug_file = fopen ("ggc-mmap.debug", "w");
1147 #else
1148 G.debug_file = stdout;
1149 #endif
1151 #ifdef USING_MMAP
1152 /* StunOS has an amazing off-by-one error for the first mmap allocation
1153 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1154 believe, is an unaligned page allocation, which would cause us to
1155 hork badly if we tried to use it. */
1157 char *p = alloc_anon (NULL, G.pagesize);
1158 struct page_entry *e;
1159 if ((size_t)p & (G.pagesize - 1))
1161 /* How losing. Discard this one and try another. If we still
1162 can't get something useful, give up. */
1164 p = alloc_anon (NULL, G.pagesize);
1165 if ((size_t)p & (G.pagesize - 1))
1166 abort ();
1169 /* We have a good page, might as well hold onto it... */
1170 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1171 e->bytes = G.pagesize;
1172 e->page = p;
1173 e->next = G.free_pages;
1174 G.free_pages = e;
1176 #endif
1178 /* Initialize the object size table. */
1179 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1180 object_size_table[order] = (size_t) 1 << order;
1181 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1183 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1185 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1186 so that we're sure of getting aligned memory. */
1187 s = ROUND_UP (s, MAX_ALIGNMENT);
1188 object_size_table[order] = s;
1191 /* Initialize the objects-per-page and inverse tables. */
1192 for (order = 0; order < NUM_ORDERS; ++order)
1194 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1195 if (objects_per_page_table[order] == 0)
1196 objects_per_page_table[order] = 1;
1197 compute_inverse (order);
1200 /* Reset the size_lookup array to put appropriately sized objects in
1201 the special orders. All objects bigger than the previous power
1202 of two, but no greater than the special size, should go in the
1203 new order. */
1204 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1206 int o;
1207 int i;
1209 o = size_lookup[OBJECT_SIZE (order)];
1210 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1211 size_lookup[i] = order;
1215 /* Increment the `GC context'. Objects allocated in an outer context
1216 are never freed, eliminating the need to register their roots. */
1218 void
1219 ggc_push_context ()
1221 ++G.context_depth;
1223 /* Die on wrap. */
1224 if (G.context_depth == 0)
1225 abort ();
1228 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1229 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1231 static void
1232 ggc_recalculate_in_use_p (p)
1233 page_entry *p;
1235 unsigned int i;
1236 size_t num_objects;
1238 /* Because the past-the-end bit in in_use_p is always set, we
1239 pretend there is one additional object. */
1240 num_objects = OBJECTS_IN_PAGE (p) + 1;
1242 /* Reset the free object count. */
1243 p->num_free_objects = num_objects;
1245 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1246 for (i = 0;
1247 i < CEIL (BITMAP_SIZE (num_objects),
1248 sizeof (*p->in_use_p));
1249 ++i)
1251 unsigned long j;
1253 /* Something is in use if it is marked, or if it was in use in a
1254 context further down the context stack. */
1255 p->in_use_p[i] |= p->save_in_use_p[i];
1257 /* Decrement the free object count for every object allocated. */
1258 for (j = p->in_use_p[i]; j; j >>= 1)
1259 p->num_free_objects -= (j & 1);
1262 if (p->num_free_objects >= num_objects)
1263 abort ();
1266 /* Decrement the `GC context'. All objects allocated since the
1267 previous ggc_push_context are migrated to the outer context. */
1269 void
1270 ggc_pop_context ()
1272 unsigned order, depth;
1274 depth = --G.context_depth;
1276 /* Any remaining pages in the popped context are lowered to the new
1277 current context; i.e. objects allocated in the popped context and
1278 left over are imported into the previous context. */
1279 for (order = 2; order < NUM_ORDERS; order++)
1281 page_entry *p;
1283 for (p = G.pages[order]; p != NULL; p = p->next)
1285 if (p->context_depth > depth)
1286 p->context_depth = depth;
1288 /* If this page is now in the topmost context, and we'd
1289 saved its allocation state, restore it. */
1290 else if (p->context_depth == depth && p->save_in_use_p)
1292 ggc_recalculate_in_use_p (p);
1293 free (p->save_in_use_p);
1294 p->save_in_use_p = 0;
1300 /* Unmark all objects. */
1302 static inline void
1303 clear_marks ()
1305 unsigned order;
1307 for (order = 2; order < NUM_ORDERS; order++)
1309 page_entry *p;
1311 for (p = G.pages[order]; p != NULL; p = p->next)
1313 size_t num_objects = OBJECTS_IN_PAGE (p);
1314 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1316 #ifdef ENABLE_CHECKING
1317 /* The data should be page-aligned. */
1318 if ((size_t) p->page & (G.pagesize - 1))
1319 abort ();
1320 #endif
1322 /* Pages that aren't in the topmost context are not collected;
1323 nevertheless, we need their in-use bit vectors to store GC
1324 marks. So, back them up first. */
1325 if (p->context_depth < G.context_depth)
1327 if (! p->save_in_use_p)
1328 p->save_in_use_p = xmalloc (bitmap_size);
1329 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1332 /* Reset reset the number of free objects and clear the
1333 in-use bits. These will be adjusted by mark_obj. */
1334 p->num_free_objects = num_objects;
1335 memset (p->in_use_p, 0, bitmap_size);
1337 /* Make sure the one-past-the-end bit is always set. */
1338 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1339 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1344 /* Free all empty pages. Partially empty pages need no attention
1345 because the `mark' bit doubles as an `unused' bit. */
1347 static inline void
1348 sweep_pages ()
1350 unsigned order;
1352 for (order = 2; order < NUM_ORDERS; order++)
1354 /* The last page-entry to consider, regardless of entries
1355 placed at the end of the list. */
1356 page_entry * const last = G.page_tails[order];
1358 size_t num_objects;
1359 size_t live_objects;
1360 page_entry *p, *previous;
1361 int done;
1363 p = G.pages[order];
1364 if (p == NULL)
1365 continue;
1367 previous = NULL;
1370 page_entry *next = p->next;
1372 /* Loop until all entries have been examined. */
1373 done = (p == last);
1375 num_objects = OBJECTS_IN_PAGE (p);
1377 /* Add all live objects on this page to the count of
1378 allocated memory. */
1379 live_objects = num_objects - p->num_free_objects;
1381 G.allocated += OBJECT_SIZE (order) * live_objects;
1383 /* Only objects on pages in the topmost context should get
1384 collected. */
1385 if (p->context_depth < G.context_depth)
1388 /* Remove the page if it's empty. */
1389 else if (live_objects == 0)
1391 if (! previous)
1392 G.pages[order] = next;
1393 else
1394 previous->next = next;
1396 /* Are we removing the last element? */
1397 if (p == G.page_tails[order])
1398 G.page_tails[order] = previous;
1399 free_page (p);
1400 p = previous;
1403 /* If the page is full, move it to the end. */
1404 else if (p->num_free_objects == 0)
1406 /* Don't move it if it's already at the end. */
1407 if (p != G.page_tails[order])
1409 /* Move p to the end of the list. */
1410 p->next = NULL;
1411 G.page_tails[order]->next = p;
1413 /* Update the tail pointer... */
1414 G.page_tails[order] = p;
1416 /* ... and the head pointer, if necessary. */
1417 if (! previous)
1418 G.pages[order] = next;
1419 else
1420 previous->next = next;
1421 p = previous;
1425 /* If we've fallen through to here, it's a page in the
1426 topmost context that is neither full nor empty. Such a
1427 page must precede pages at lesser context depth in the
1428 list, so move it to the head. */
1429 else if (p != G.pages[order])
1431 previous->next = p->next;
1432 p->next = G.pages[order];
1433 G.pages[order] = p;
1434 /* Are we moving the last element? */
1435 if (G.page_tails[order] == p)
1436 G.page_tails[order] = previous;
1437 p = previous;
1440 previous = p;
1441 p = next;
1443 while (! done);
1445 /* Now, restore the in_use_p vectors for any pages from contexts
1446 other than the current one. */
1447 for (p = G.pages[order]; p; p = p->next)
1448 if (p->context_depth != G.context_depth)
1449 ggc_recalculate_in_use_p (p);
1453 #ifdef ENABLE_GC_CHECKING
1454 /* Clobber all free objects. */
1456 static inline void
1457 poison_pages ()
1459 unsigned order;
1461 for (order = 2; order < NUM_ORDERS; order++)
1463 size_t size = OBJECT_SIZE (order);
1464 page_entry *p;
1466 for (p = G.pages[order]; p != NULL; p = p->next)
1468 size_t num_objects;
1469 size_t i;
1471 if (p->context_depth != G.context_depth)
1472 /* Since we don't do any collection for pages in pushed
1473 contexts, there's no need to do any poisoning. And
1474 besides, the IN_USE_P array isn't valid until we pop
1475 contexts. */
1476 continue;
1478 num_objects = OBJECTS_IN_PAGE (p);
1479 for (i = 0; i < num_objects; i++)
1481 size_t word, bit;
1482 word = i / HOST_BITS_PER_LONG;
1483 bit = i % HOST_BITS_PER_LONG;
1484 if (((p->in_use_p[word] >> bit) & 1) == 0)
1486 char *object = p->page + i * size;
1488 /* Keep poison-by-write when we expect to use Valgrind,
1489 so the exact same memory semantics is kept, in case
1490 there are memory errors. We override this request
1491 below. */
1492 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1493 memset (object, 0xa5, size);
1495 /* Drop the handle to avoid handle leak. */
1496 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1502 #endif
1504 /* Top level mark-and-sweep routine. */
1506 void
1507 ggc_collect ()
1509 /* Avoid frequent unnecessary work by skipping collection if the
1510 total allocations haven't expanded much since the last
1511 collection. */
1512 float allocated_last_gc =
1513 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1515 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1517 if (G.allocated < allocated_last_gc + min_expand)
1518 return;
1520 timevar_push (TV_GC);
1521 if (!quiet_flag)
1522 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1524 /* Zero the total allocated bytes. This will be recalculated in the
1525 sweep phase. */
1526 G.allocated = 0;
1528 /* Release the pages we freed the last time we collected, but didn't
1529 reuse in the interim. */
1530 release_pages ();
1532 clear_marks ();
1533 ggc_mark_roots ();
1535 #ifdef ENABLE_GC_CHECKING
1536 poison_pages ();
1537 #endif
1539 sweep_pages ();
1541 G.allocated_last_gc = G.allocated;
1543 timevar_pop (TV_GC);
1545 if (!quiet_flag)
1546 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1549 /* Print allocation statistics. */
1550 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1551 ? (x) \
1552 : ((x) < 1024*1024*10 \
1553 ? (x) / 1024 \
1554 : (x) / (1024*1024))))
1555 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1557 void
1558 ggc_print_statistics ()
1560 struct ggc_statistics stats;
1561 unsigned int i;
1562 size_t total_overhead = 0;
1564 /* Clear the statistics. */
1565 memset (&stats, 0, sizeof (stats));
1567 /* Make sure collection will really occur. */
1568 G.allocated_last_gc = 0;
1570 /* Collect and print the statistics common across collectors. */
1571 ggc_print_common_statistics (stderr, &stats);
1573 /* Release free pages so that we will not count the bytes allocated
1574 there as part of the total allocated memory. */
1575 release_pages ();
1577 /* Collect some information about the various sizes of
1578 allocation. */
1579 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1580 "Size", "Allocated", "Used", "Overhead");
1581 for (i = 0; i < NUM_ORDERS; ++i)
1583 page_entry *p;
1584 size_t allocated;
1585 size_t in_use;
1586 size_t overhead;
1588 /* Skip empty entries. */
1589 if (!G.pages[i])
1590 continue;
1592 overhead = allocated = in_use = 0;
1594 /* Figure out the total number of bytes allocated for objects of
1595 this size, and how many of them are actually in use. Also figure
1596 out how much memory the page table is using. */
1597 for (p = G.pages[i]; p; p = p->next)
1599 allocated += p->bytes;
1600 in_use +=
1601 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1603 overhead += (sizeof (page_entry) - sizeof (long)
1604 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1606 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1607 (unsigned long) OBJECT_SIZE (i),
1608 SCALE (allocated), LABEL (allocated),
1609 SCALE (in_use), LABEL (in_use),
1610 SCALE (overhead), LABEL (overhead));
1611 total_overhead += overhead;
1613 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1614 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1615 SCALE (G.allocated), LABEL(G.allocated),
1616 SCALE (total_overhead), LABEL (total_overhead));
1619 struct ggc_pch_data
1621 struct ggc_pch_ondisk
1623 unsigned totals[NUM_ORDERS];
1624 } d;
1625 size_t base[NUM_ORDERS];
1626 size_t written[NUM_ORDERS];
1629 struct ggc_pch_data *
1630 init_ggc_pch ()
1632 return xcalloc (sizeof (struct ggc_pch_data), 1);
1635 void
1636 ggc_pch_count_object (d, x, size)
1637 struct ggc_pch_data *d;
1638 void *x ATTRIBUTE_UNUSED;
1639 size_t size;
1641 unsigned order;
1643 if (size <= 256)
1644 order = size_lookup[size];
1645 else
1647 order = 9;
1648 while (size > OBJECT_SIZE (order))
1649 order++;
1652 d->d.totals[order]++;
1655 size_t
1656 ggc_pch_total_size (d)
1657 struct ggc_pch_data *d;
1659 size_t a = 0;
1660 unsigned i;
1662 for (i = 0; i < NUM_ORDERS; i++)
1663 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1664 return a;
1667 void
1668 ggc_pch_this_base (d, base)
1669 struct ggc_pch_data *d;
1670 void *base;
1672 size_t a = (size_t) base;
1673 unsigned i;
1675 for (i = 0; i < NUM_ORDERS; i++)
1677 d->base[i] = a;
1678 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1683 char *
1684 ggc_pch_alloc_object (d, x, size)
1685 struct ggc_pch_data *d;
1686 void *x ATTRIBUTE_UNUSED;
1687 size_t size;
1689 unsigned order;
1690 char *result;
1692 if (size <= 256)
1693 order = size_lookup[size];
1694 else
1696 order = 9;
1697 while (size > OBJECT_SIZE (order))
1698 order++;
1701 result = (char *) d->base[order];
1702 d->base[order] += OBJECT_SIZE (order);
1703 return result;
1706 void
1707 ggc_pch_prepare_write (d, f)
1708 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1709 FILE * f ATTRIBUTE_UNUSED;
1711 /* Nothing to do. */
1714 void
1715 ggc_pch_write_object (d, f, x, newx, size)
1716 struct ggc_pch_data * d ATTRIBUTE_UNUSED;
1717 FILE *f;
1718 void *x;
1719 void *newx ATTRIBUTE_UNUSED;
1720 size_t size;
1722 unsigned order;
1724 if (size <= 256)
1725 order = size_lookup[size];
1726 else
1728 order = 9;
1729 while (size > OBJECT_SIZE (order))
1730 order++;
1733 if (fwrite (x, size, 1, f) != 1)
1734 fatal_io_error ("can't write PCH file");
1736 /* In the current implementation, SIZE is always equal to
1737 OBJECT_SIZE (order) and so the fseek is never executed. */
1738 if (size != OBJECT_SIZE (order)
1739 && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
1740 fatal_io_error ("can't write PCH file");
1742 d->written[order]++;
1743 if (d->written[order] == d->d.totals[order]
1744 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
1745 G.pagesize),
1746 SEEK_CUR) != 0)
1747 fatal_io_error ("can't write PCH file");
1750 void
1751 ggc_pch_finish (d, f)
1752 struct ggc_pch_data * d;
1753 FILE *f;
1755 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1756 fatal_io_error ("can't write PCH file");
1757 free (d);
1760 void
1761 ggc_pch_read (f, addr)
1762 FILE *f;
1763 void *addr;
1765 struct ggc_pch_ondisk d;
1766 unsigned i;
1767 char *offs = addr;
1769 /* We've just read in a PCH file. So, every object that used to be allocated
1770 is now free. */
1771 clear_marks ();
1772 #ifdef GGC_POISON
1773 poison_pages ();
1774 #endif
1776 /* No object read from a PCH file should ever be freed. So, set the
1777 context depth to 1, and set the depth of all the currently-allocated
1778 pages to be 1 too. PCH pages will have depth 0. */
1779 if (G.context_depth != 0)
1780 abort ();
1781 G.context_depth = 1;
1782 for (i = 0; i < NUM_ORDERS; i++)
1784 page_entry *p;
1785 for (p = G.pages[i]; p != NULL; p = p->next)
1786 p->context_depth = G.context_depth;
1789 /* Allocate the appropriate page-table entries for the pages read from
1790 the PCH file. */
1791 if (fread (&d, sizeof (d), 1, f) != 1)
1792 fatal_io_error ("can't read PCH file");
1794 for (i = 0; i < NUM_ORDERS; i++)
1796 struct page_entry *entry;
1797 char *pte;
1798 size_t bytes;
1799 size_t num_objs;
1800 size_t j;
1802 if (d.totals[i] == 0)
1803 continue;
1805 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1806 num_objs = bytes / OBJECT_SIZE (i);
1807 entry = xcalloc (1, (sizeof (struct page_entry)
1808 - sizeof (long)
1809 + BITMAP_SIZE (num_objs + 1)));
1810 entry->bytes = bytes;
1811 entry->page = offs;
1812 entry->context_depth = 0;
1813 offs += bytes;
1814 entry->num_free_objects = 0;
1815 entry->order = i;
1817 for (j = 0;
1818 j + HOST_BITS_PER_LONG <= num_objs + 1;
1819 j += HOST_BITS_PER_LONG)
1820 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
1821 for (; j < num_objs + 1; j++)
1822 entry->in_use_p[j / HOST_BITS_PER_LONG]
1823 |= 1L << (j % HOST_BITS_PER_LONG);
1825 for (pte = entry->page;
1826 pte < entry->page + entry->bytes;
1827 pte += G.pagesize)
1828 set_page_table_entry (pte, entry);
1830 if (G.page_tails[i] != NULL)
1831 G.page_tails[i]->next = entry;
1832 else
1833 G.pages[i] = entry;
1834 G.page_tails[i] = entry;
1837 /* Update the statistics. */
1838 G.allocated = G.allocated_last_gc = offs - (char *)addr;