Missed one in last change.
[official-gcc.git] / gcc / ggc-page.c
blob5063ccd18bab67104ddcf16bdc686c81d44bf5b3
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "toplev.h"
29 #include "flags.h"
30 #include "ggc.h"
31 #include "timevar.h"
32 #include "params.h"
33 #ifdef ENABLE_VALGRIND_CHECKING
34 # ifdef HAVE_MEMCHECK_H
35 # include <memcheck.h>
36 # else
37 # include <valgrind.h>
38 # endif
39 #else
40 /* Avoid #ifdef:s when we can help it. */
41 #define VALGRIND_DISCARD(x)
42 #endif
44 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
45 file open. Prefer either to valloc. */
46 #ifdef HAVE_MMAP_ANON
47 # undef HAVE_MMAP_DEV_ZERO
49 # include <sys/mman.h>
50 # ifndef MAP_FAILED
51 # define MAP_FAILED -1
52 # endif
53 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
54 # define MAP_ANONYMOUS MAP_ANON
55 # endif
56 # define USING_MMAP
58 #endif
60 #ifdef HAVE_MMAP_DEV_ZERO
62 # include <sys/mman.h>
63 # ifndef MAP_FAILED
64 # define MAP_FAILED -1
65 # endif
66 # define USING_MMAP
68 #endif
70 #ifndef USING_MMAP
71 #define USING_MALLOC_PAGE_GROUPS
72 #endif
74 /* Stategy:
76 This garbage-collecting allocator allocates objects on one of a set
77 of pages. Each page can allocate objects of a single size only;
78 available sizes are powers of two starting at four bytes. The size
79 of an allocation request is rounded up to the next power of two
80 (`order'), and satisfied from the appropriate page.
82 Each page is recorded in a page-entry, which also maintains an
83 in-use bitmap of object positions on the page. This allows the
84 allocation state of a particular object to be flipped without
85 touching the page itself.
87 Each page-entry also has a context depth, which is used to track
88 pushing and popping of allocation contexts. Only objects allocated
89 in the current (highest-numbered) context may be collected.
91 Page entries are arranged in an array of singly-linked lists. The
92 array is indexed by the allocation size, in bits, of the pages on
93 it; i.e. all pages on a list allocate objects of the same size.
94 Pages are ordered on the list such that all non-full pages precede
95 all full pages, with non-full pages arranged in order of decreasing
96 context depth.
98 Empty pages (of all orders) are kept on a single page cache list,
99 and are considered first when new pages are required; they are
100 deallocated at the start of the next collection if they haven't
101 been recycled by then. */
103 /* Define GGC_DEBUG_LEVEL to print debugging information.
104 0: No debugging output.
105 1: GC statistics only.
106 2: Page-entry allocations/deallocations as well.
107 3: Object allocations as well.
108 4: Object marks as well. */
109 #define GGC_DEBUG_LEVEL (0)
111 #ifndef HOST_BITS_PER_PTR
112 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
113 #endif
116 /* A two-level tree is used to look up the page-entry for a given
117 pointer. Two chunks of the pointer's bits are extracted to index
118 the first and second levels of the tree, as follows:
120 HOST_PAGE_SIZE_BITS
121 32 | |
122 msb +----------------+----+------+------+ lsb
123 | | |
124 PAGE_L1_BITS |
126 PAGE_L2_BITS
128 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
129 pages are aligned on system page boundaries. The next most
130 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
131 index values in the lookup table, respectively.
133 For 32-bit architectures and the settings below, there are no
134 leftover bits. For architectures with wider pointers, the lookup
135 tree points to a list of pages, which must be scanned to find the
136 correct one. */
138 #define PAGE_L1_BITS (8)
139 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
140 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
141 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
143 #define LOOKUP_L1(p) \
144 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
146 #define LOOKUP_L2(p) \
147 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
149 /* The number of objects per allocation page, for objects on a page of
150 the indicated ORDER. */
151 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
153 /* The number of objects in P. */
154 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
156 /* The size of an object on a page of the indicated ORDER. */
157 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
159 /* For speed, we avoid doing a general integer divide to locate the
160 offset in the allocation bitmap, by precalculating numbers M, S
161 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
162 within the page which is evenly divisible by the object size Z. */
163 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
164 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
165 #define OFFSET_TO_BIT(OFFSET, ORDER) \
166 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
168 /* The number of extra orders, not corresponding to power-of-two sized
169 objects. */
171 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
173 #define RTL_SIZE(NSLOTS) \
174 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
176 #define TREE_EXP_SIZE(OPS) \
177 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179 /* The Ith entry is the maximum size of an object to be stored in the
180 Ith extra order. Adding a new entry to this array is the *only*
181 thing you need to do to add a new special allocation size. */
183 static const size_t extra_order_size_table[] = {
184 sizeof (struct tree_decl),
185 sizeof (struct tree_list),
186 TREE_EXP_SIZE (2),
187 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
188 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
191 /* The total number of orders. */
193 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
195 /* We use this structure to determine the alignment required for
196 allocations. For power-of-two sized allocations, that's not a
197 problem, but it does matter for odd-sized allocations. */
199 struct max_alignment {
200 char c;
201 union {
202 HOST_WIDEST_INT i;
203 long double d;
204 } u;
207 /* The biggest alignment required. */
209 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
211 /* Compute the smallest nonnegative number which when added to X gives
212 a multiple of F. */
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
216 /* Compute the smallest multiple of F that is >= X. */
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
220 /* The Ith entry is the number of objects on a page or order I. */
222 static unsigned objects_per_page_table[NUM_ORDERS];
224 /* The Ith entry is the size of an object on a page of order I. */
226 static size_t object_size_table[NUM_ORDERS];
228 /* The Ith entry is a pair of numbers (mult, shift) such that
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
230 for all k evenly divisible by OBJECT_SIZE(I). */
232 static struct
234 size_t mult;
235 unsigned int shift;
237 inverse_table[NUM_ORDERS];
239 /* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241 typedef struct page_entry
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
247 /* The number of bytes allocated. (This will always be a multiple
248 of the host system page size.) */
249 size_t bytes;
251 /* The address at which the memory is allocated. */
252 char *page;
254 #ifdef USING_MALLOC_PAGE_GROUPS
255 /* Back pointer to the page group this page came from. */
256 struct page_group *group;
257 #endif
259 /* This is the index in the by_depth varray where this page table
260 can be found. */
261 unsigned long index_by_depth;
263 /* Context depth of this page. */
264 unsigned short context_depth;
266 /* The number of free objects remaining on this page. */
267 unsigned short num_free_objects;
269 /* A likely candidate for the bit position of a free object for the
270 next allocation from this page. */
271 unsigned short next_bit_hint;
273 /* The lg of size of objects allocated from this page. */
274 unsigned char order;
276 /* A bit vector indicating whether or not objects are in use. The
277 Nth bit is one if the Nth object on this page is allocated. This
278 array is dynamically sized. */
279 unsigned long in_use_p[1];
280 } page_entry;
282 #ifdef USING_MALLOC_PAGE_GROUPS
283 /* A page_group describes a large allocation from malloc, from which
284 we parcel out aligned pages. */
285 typedef struct page_group
287 /* A linked list of all extant page groups. */
288 struct page_group *next;
290 /* The address we received from malloc. */
291 char *allocation;
293 /* The size of the block. */
294 size_t alloc_size;
296 /* A bitmask of pages in use. */
297 unsigned int in_use;
298 } page_group;
299 #endif
301 #if HOST_BITS_PER_PTR <= 32
303 /* On 32-bit hosts, we use a two level page table, as pictured above. */
304 typedef page_entry **page_table[PAGE_L1_SIZE];
306 #else
308 /* On 64-bit hosts, we use the same two level page tables plus a linked
309 list that disambiguates the top 32-bits. There will almost always be
310 exactly one entry in the list. */
311 typedef struct page_table_chain
313 struct page_table_chain *next;
314 size_t high_bits;
315 page_entry **table[PAGE_L1_SIZE];
316 } *page_table;
318 #endif
320 /* The rest of the global variables. */
321 static struct globals
323 /* The Nth element in this array is a page with objects of size 2^N.
324 If there are any pages with free objects, they will be at the
325 head of the list. NULL if there are no page-entries for this
326 object size. */
327 page_entry *pages[NUM_ORDERS];
329 /* The Nth element in this array is the last page with objects of
330 size 2^N. NULL if there are no page-entries for this object
331 size. */
332 page_entry *page_tails[NUM_ORDERS];
334 /* Lookup table for associating allocation pages with object addresses. */
335 page_table lookup;
337 /* The system's page size. */
338 size_t pagesize;
339 size_t lg_pagesize;
341 /* Bytes currently allocated. */
342 size_t allocated;
344 /* Bytes currently allocated at the end of the last collection. */
345 size_t allocated_last_gc;
347 /* Total amount of memory mapped. */
348 size_t bytes_mapped;
350 /* Bit N set if any allocations have been done at context depth N. */
351 unsigned long context_depth_allocations;
353 /* Bit N set if any collections have been done at context depth N. */
354 unsigned long context_depth_collections;
356 /* The current depth in the context stack. */
357 unsigned short context_depth;
359 /* A file descriptor open to /dev/zero for reading. */
360 #if defined (HAVE_MMAP_DEV_ZERO)
361 int dev_zero_fd;
362 #endif
364 /* A cache of free system pages. */
365 page_entry *free_pages;
367 #ifdef USING_MALLOC_PAGE_GROUPS
368 page_group *page_groups;
369 #endif
371 /* The file descriptor for debugging output. */
372 FILE *debug_file;
374 /* Current number of elements in use in depth below. */
375 unsigned int depth_in_use;
377 /* Maximum number of elements that can be used before resizing. */
378 unsigned int depth_max;
380 /* Each element of this arry is an index in by_depth where the given
381 depth starts. This structure is indexed by that given depth we
382 are interested in. */
383 unsigned int *depth;
385 /* Current number of elements in use in by_depth below. */
386 unsigned int by_depth_in_use;
388 /* Maximum number of elements that can be used before resizing. */
389 unsigned int by_depth_max;
391 /* Each element of this array is a pointer to a page_entry, all
392 page_entries can be found in here by increasing depth.
393 index_by_depth in the page_entry is the index into this data
394 structure where that page_entry can be found. This is used to
395 speed up finding all page_entries at a particular depth. */
396 page_entry **by_depth;
398 /* Each element is a pointer to the saved in_use_p bits, if any,
399 zero otherwise. We allocate them all together, to enable a
400 better runtime data access pattern. */
401 unsigned long **save_in_use;
403 } G;
405 /* The size in bytes required to maintain a bitmap for the objects
406 on a page-entry. */
407 #define BITMAP_SIZE(Num_objects) \
408 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
410 /* Allocate pages in chunks of this size, to throttle calls to memory
411 allocation routines. The first page is used, the rest go onto the
412 free list. This cannot be larger than HOST_BITS_PER_INT for the
413 in_use bitmask for page_group. */
414 #define GGC_QUIRE_SIZE 16
416 /* Initial guess as to how many page table entries we might need. */
417 #define INITIAL_PTE_COUNT 128
419 static int ggc_allocated_p (const void *);
420 static page_entry *lookup_page_table_entry (const void *);
421 static void set_page_table_entry (void *, page_entry *);
422 #ifdef USING_MMAP
423 static char *alloc_anon (char *, size_t);
424 #endif
425 #ifdef USING_MALLOC_PAGE_GROUPS
426 static size_t page_group_index (char *, char *);
427 static void set_page_group_in_use (page_group *, char *);
428 static void clear_page_group_in_use (page_group *, char *);
429 #endif
430 static struct page_entry * alloc_page (unsigned);
431 static void free_page (struct page_entry *);
432 static void release_pages (void);
433 static void clear_marks (void);
434 static void sweep_pages (void);
435 static void ggc_recalculate_in_use_p (page_entry *);
436 static void compute_inverse (unsigned);
437 static inline void adjust_depth (void);
438 static void move_ptes_to_front (int, int);
440 #ifdef ENABLE_GC_CHECKING
441 static void poison_pages (void);
442 #endif
444 void debug_print_page_list (int);
445 static void push_depth (unsigned int);
446 static void push_by_depth (page_entry *, unsigned long *);
448 /* Push an entry onto G.depth. */
450 inline static void
451 push_depth (unsigned int i)
453 if (G.depth_in_use >= G.depth_max)
455 G.depth_max *= 2;
456 G.depth = (unsigned int *) xrealloc ((char *) G.depth,
457 G.depth_max * sizeof (unsigned int));
459 G.depth[G.depth_in_use++] = i;
462 /* Push an entry onto G.by_depth and G.save_in_use. */
464 inline static void
465 push_by_depth (page_entry *p, unsigned long *s)
467 if (G.by_depth_in_use >= G.by_depth_max)
469 G.by_depth_max *= 2;
470 G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
471 G.by_depth_max * sizeof (page_entry *));
472 G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
473 G.by_depth_max * sizeof (unsigned long *));
475 G.by_depth[G.by_depth_in_use] = p;
476 G.save_in_use[G.by_depth_in_use++] = s;
479 #if (GCC_VERSION < 3001)
480 #define prefetch(X) ((void) X)
481 #else
482 #define prefetch(X) __builtin_prefetch (X)
483 #endif
485 #define save_in_use_p_i(__i) \
486 (G.save_in_use[__i])
487 #define save_in_use_p(__p) \
488 (save_in_use_p_i (__p->index_by_depth))
490 /* Returns nonzero if P was allocated in GC'able memory. */
492 static inline int
493 ggc_allocated_p (const void *p)
495 page_entry ***base;
496 size_t L1, L2;
498 #if HOST_BITS_PER_PTR <= 32
499 base = &G.lookup[0];
500 #else
501 page_table table = G.lookup;
502 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
503 while (1)
505 if (table == NULL)
506 return 0;
507 if (table->high_bits == high_bits)
508 break;
509 table = table->next;
511 base = &table->table[0];
512 #endif
514 /* Extract the level 1 and 2 indices. */
515 L1 = LOOKUP_L1 (p);
516 L2 = LOOKUP_L2 (p);
518 return base[L1] && base[L1][L2];
521 /* Traverse the page table and find the entry for a page.
522 Die (probably) if the object wasn't allocated via GC. */
524 static inline page_entry *
525 lookup_page_table_entry (const void *p)
527 page_entry ***base;
528 size_t L1, L2;
530 #if HOST_BITS_PER_PTR <= 32
531 base = &G.lookup[0];
532 #else
533 page_table table = G.lookup;
534 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
535 while (table->high_bits != high_bits)
536 table = table->next;
537 base = &table->table[0];
538 #endif
540 /* Extract the level 1 and 2 indices. */
541 L1 = LOOKUP_L1 (p);
542 L2 = LOOKUP_L2 (p);
544 return base[L1][L2];
547 /* Set the page table entry for a page. */
549 static void
550 set_page_table_entry (void *p, page_entry *entry)
552 page_entry ***base;
553 size_t L1, L2;
555 #if HOST_BITS_PER_PTR <= 32
556 base = &G.lookup[0];
557 #else
558 page_table table;
559 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
560 for (table = G.lookup; table; table = table->next)
561 if (table->high_bits == high_bits)
562 goto found;
564 /* Not found -- allocate a new table. */
565 table = (page_table) xcalloc (1, sizeof(*table));
566 table->next = G.lookup;
567 table->high_bits = high_bits;
568 G.lookup = table;
569 found:
570 base = &table->table[0];
571 #endif
573 /* Extract the level 1 and 2 indices. */
574 L1 = LOOKUP_L1 (p);
575 L2 = LOOKUP_L2 (p);
577 if (base[L1] == NULL)
578 base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
580 base[L1][L2] = entry;
583 /* Prints the page-entry for object size ORDER, for debugging. */
585 void
586 debug_print_page_list (int order)
588 page_entry *p;
589 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
590 (void *) G.page_tails[order]);
591 p = G.pages[order];
592 while (p != NULL)
594 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
595 p->num_free_objects);
596 p = p->next;
598 printf ("NULL\n");
599 fflush (stdout);
602 #ifdef USING_MMAP
603 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
604 (if non-null). The ifdef structure here is intended to cause a
605 compile error unless exactly one of the HAVE_* is defined. */
607 static inline char *
608 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
610 #ifdef HAVE_MMAP_ANON
611 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
612 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
613 #endif
614 #ifdef HAVE_MMAP_DEV_ZERO
615 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
616 MAP_PRIVATE, G.dev_zero_fd, 0);
617 #endif
619 if (page == (char *) MAP_FAILED)
621 perror ("virtual memory exhausted");
622 exit (FATAL_EXIT_CODE);
625 /* Remember that we allocated this memory. */
626 G.bytes_mapped += size;
628 /* Pretend we don't have access to the allocated pages. We'll enable
629 access to smaller pieces of the area in ggc_alloc. Discard the
630 handle to avoid handle leak. */
631 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
633 return page;
635 #endif
636 #ifdef USING_MALLOC_PAGE_GROUPS
637 /* Compute the index for this page into the page group. */
639 static inline size_t
640 page_group_index (char *allocation, char *page)
642 return (size_t) (page - allocation) >> G.lg_pagesize;
645 /* Set and clear the in_use bit for this page in the page group. */
647 static inline void
648 set_page_group_in_use (page_group *group, char *page)
650 group->in_use |= 1 << page_group_index (group->allocation, page);
653 static inline void
654 clear_page_group_in_use (page_group *group, char *page)
656 group->in_use &= ~(1 << page_group_index (group->allocation, page));
658 #endif
660 /* Allocate a new page for allocating objects of size 2^ORDER,
661 and return an entry for it. The entry is not added to the
662 appropriate page_table list. */
664 static inline struct page_entry *
665 alloc_page (unsigned order)
667 struct page_entry *entry, *p, **pp;
668 char *page;
669 size_t num_objects;
670 size_t bitmap_size;
671 size_t page_entry_size;
672 size_t entry_size;
673 #ifdef USING_MALLOC_PAGE_GROUPS
674 page_group *group;
675 #endif
677 num_objects = OBJECTS_PER_PAGE (order);
678 bitmap_size = BITMAP_SIZE (num_objects + 1);
679 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
680 entry_size = num_objects * OBJECT_SIZE (order);
681 if (entry_size < G.pagesize)
682 entry_size = G.pagesize;
684 entry = NULL;
685 page = NULL;
687 /* Check the list of free pages for one we can use. */
688 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
689 if (p->bytes == entry_size)
690 break;
692 if (p != NULL)
694 /* Recycle the allocated memory from this page ... */
695 *pp = p->next;
696 page = p->page;
698 #ifdef USING_MALLOC_PAGE_GROUPS
699 group = p->group;
700 #endif
702 /* ... and, if possible, the page entry itself. */
703 if (p->order == order)
705 entry = p;
706 memset (entry, 0, page_entry_size);
708 else
709 free (p);
711 #ifdef USING_MMAP
712 else if (entry_size == G.pagesize)
714 /* We want just one page. Allocate a bunch of them and put the
715 extras on the freelist. (Can only do this optimization with
716 mmap for backing store.) */
717 struct page_entry *e, *f = G.free_pages;
718 int i;
720 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
722 /* This loop counts down so that the chain will be in ascending
723 memory order. */
724 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
726 e = (struct page_entry *) xcalloc (1, page_entry_size);
727 e->order = order;
728 e->bytes = G.pagesize;
729 e->page = page + (i << G.lg_pagesize);
730 e->next = f;
731 f = e;
734 G.free_pages = f;
736 else
737 page = alloc_anon (NULL, entry_size);
738 #endif
739 #ifdef USING_MALLOC_PAGE_GROUPS
740 else
742 /* Allocate a large block of memory and serve out the aligned
743 pages therein. This results in much less memory wastage
744 than the traditional implementation of valloc. */
746 char *allocation, *a, *enda;
747 size_t alloc_size, head_slop, tail_slop;
748 int multiple_pages = (entry_size == G.pagesize);
750 if (multiple_pages)
751 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
752 else
753 alloc_size = entry_size + G.pagesize - 1;
754 allocation = xmalloc (alloc_size);
756 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
757 head_slop = page - allocation;
758 if (multiple_pages)
759 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
760 else
761 tail_slop = alloc_size - entry_size - head_slop;
762 enda = allocation + alloc_size - tail_slop;
764 /* We allocated N pages, which are likely not aligned, leaving
765 us with N-1 usable pages. We plan to place the page_group
766 structure somewhere in the slop. */
767 if (head_slop >= sizeof (page_group))
768 group = (page_group *)page - 1;
769 else
771 /* We magically got an aligned allocation. Too bad, we have
772 to waste a page anyway. */
773 if (tail_slop == 0)
775 enda -= G.pagesize;
776 tail_slop += G.pagesize;
778 if (tail_slop < sizeof (page_group))
779 abort ();
780 group = (page_group *)enda;
781 tail_slop -= sizeof (page_group);
784 /* Remember that we allocated this memory. */
785 group->next = G.page_groups;
786 group->allocation = allocation;
787 group->alloc_size = alloc_size;
788 group->in_use = 0;
789 G.page_groups = group;
790 G.bytes_mapped += alloc_size;
792 /* If we allocated multiple pages, put the rest on the free list. */
793 if (multiple_pages)
795 struct page_entry *e, *f = G.free_pages;
796 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
798 e = (struct page_entry *) xcalloc (1, page_entry_size);
799 e->order = order;
800 e->bytes = G.pagesize;
801 e->page = a;
802 e->group = group;
803 e->next = f;
804 f = e;
806 G.free_pages = f;
809 #endif
811 if (entry == NULL)
812 entry = (struct page_entry *) xcalloc (1, page_entry_size);
814 entry->bytes = entry_size;
815 entry->page = page;
816 entry->context_depth = G.context_depth;
817 entry->order = order;
818 entry->num_free_objects = num_objects;
819 entry->next_bit_hint = 1;
821 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
823 #ifdef USING_MALLOC_PAGE_GROUPS
824 entry->group = group;
825 set_page_group_in_use (group, page);
826 #endif
828 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
829 increment the hint. */
830 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
831 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
833 set_page_table_entry (page, entry);
835 if (GGC_DEBUG_LEVEL >= 2)
836 fprintf (G.debug_file,
837 "Allocating page at %p, object size=%lu, data %p-%p\n",
838 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
839 page + entry_size - 1);
841 return entry;
844 /* Adjust the size of G.depth so that no index greater than the one
845 used by the top of the G.by_depth is used. */
847 static inline void
848 adjust_depth (void)
850 page_entry *top;
852 if (G.by_depth_in_use)
854 top = G.by_depth[G.by_depth_in_use-1];
856 /* Peel back indices in depth that index into by_depth, so that
857 as new elements are added to by_depth, we note the indices
858 of those elements, if they are for new context depths. */
859 while (G.depth_in_use > (size_t)top->context_depth+1)
860 --G.depth_in_use;
864 /* For a page that is no longer needed, put it on the free page list. */
866 static inline void
867 free_page (page_entry *entry)
869 if (GGC_DEBUG_LEVEL >= 2)
870 fprintf (G.debug_file,
871 "Deallocating page at %p, data %p-%p\n", (void *) entry,
872 entry->page, entry->page + entry->bytes - 1);
874 /* Mark the page as inaccessible. Discard the handle to avoid handle
875 leak. */
876 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
878 set_page_table_entry (entry->page, NULL);
880 #ifdef USING_MALLOC_PAGE_GROUPS
881 clear_page_group_in_use (entry->group, entry->page);
882 #endif
884 if (G.by_depth_in_use > 1)
886 page_entry *top = G.by_depth[G.by_depth_in_use-1];
888 /* If they are at the same depth, put top element into freed
889 slot. */
890 if (entry->context_depth == top->context_depth)
892 int i = entry->index_by_depth;
893 G.by_depth[i] = top;
894 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
895 top->index_by_depth = i;
897 else
899 /* We cannot free a page from a context deeper than the
900 current one. */
901 abort ();
904 --G.by_depth_in_use;
906 adjust_depth ();
908 entry->next = G.free_pages;
909 G.free_pages = entry;
912 /* Release the free page cache to the system. */
914 static void
915 release_pages (void)
917 #ifdef USING_MMAP
918 page_entry *p, *next;
919 char *start;
920 size_t len;
922 /* Gather up adjacent pages so they are unmapped together. */
923 p = G.free_pages;
925 while (p)
927 start = p->page;
928 next = p->next;
929 len = p->bytes;
930 free (p);
931 p = next;
933 while (p && p->page == start + len)
935 next = p->next;
936 len += p->bytes;
937 free (p);
938 p = next;
941 munmap (start, len);
942 G.bytes_mapped -= len;
945 G.free_pages = NULL;
946 #endif
947 #ifdef USING_MALLOC_PAGE_GROUPS
948 page_entry **pp, *p;
949 page_group **gp, *g;
951 /* Remove all pages from free page groups from the list. */
952 pp = &G.free_pages;
953 while ((p = *pp) != NULL)
954 if (p->group->in_use == 0)
956 *pp = p->next;
957 free (p);
959 else
960 pp = &p->next;
962 /* Remove all free page groups, and release the storage. */
963 gp = &G.page_groups;
964 while ((g = *gp) != NULL)
965 if (g->in_use == 0)
967 *gp = g->next;
968 G.bytes_mapped -= g->alloc_size;
969 free (g->allocation);
971 else
972 gp = &g->next;
973 #endif
976 /* This table provides a fast way to determine ceil(log_2(size)) for
977 allocation requests. The minimum allocation size is eight bytes. */
979 static unsigned char size_lookup[257] =
981 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
982 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
983 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
984 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
985 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
986 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
987 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
988 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
989 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
990 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
991 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
992 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
993 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
994 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
995 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
996 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1000 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1002 void *
1003 ggc_alloc (size_t size)
1005 unsigned order, word, bit, object_offset;
1006 struct page_entry *entry;
1007 void *result;
1009 if (size <= 256)
1010 order = size_lookup[size];
1011 else
1013 order = 9;
1014 while (size > OBJECT_SIZE (order))
1015 order++;
1018 /* If there are non-full pages for this size allocation, they are at
1019 the head of the list. */
1020 entry = G.pages[order];
1022 /* If there is no page for this object size, or all pages in this
1023 context are full, allocate a new page. */
1024 if (entry == NULL || entry->num_free_objects == 0)
1026 struct page_entry *new_entry;
1027 new_entry = alloc_page (order);
1029 new_entry->index_by_depth = G.by_depth_in_use;
1030 push_by_depth (new_entry, 0);
1032 /* We can skip context depths, if we do, make sure we go all the
1033 way to the new depth. */
1034 while (new_entry->context_depth >= G.depth_in_use)
1035 push_depth (G.by_depth_in_use-1);
1037 /* If this is the only entry, it's also the tail. */
1038 if (entry == NULL)
1039 G.page_tails[order] = new_entry;
1041 /* Put new pages at the head of the page list. */
1042 new_entry->next = entry;
1043 entry = new_entry;
1044 G.pages[order] = new_entry;
1046 /* For a new page, we know the word and bit positions (in the
1047 in_use bitmap) of the first available object -- they're zero. */
1048 new_entry->next_bit_hint = 1;
1049 word = 0;
1050 bit = 0;
1051 object_offset = 0;
1053 else
1055 /* First try to use the hint left from the previous allocation
1056 to locate a clear bit in the in-use bitmap. We've made sure
1057 that the one-past-the-end bit is always set, so if the hint
1058 has run over, this test will fail. */
1059 unsigned hint = entry->next_bit_hint;
1060 word = hint / HOST_BITS_PER_LONG;
1061 bit = hint % HOST_BITS_PER_LONG;
1063 /* If the hint didn't work, scan the bitmap from the beginning. */
1064 if ((entry->in_use_p[word] >> bit) & 1)
1066 word = bit = 0;
1067 while (~entry->in_use_p[word] == 0)
1068 ++word;
1069 while ((entry->in_use_p[word] >> bit) & 1)
1070 ++bit;
1071 hint = word * HOST_BITS_PER_LONG + bit;
1074 /* Next time, try the next bit. */
1075 entry->next_bit_hint = hint + 1;
1077 object_offset = hint * OBJECT_SIZE (order);
1080 /* Set the in-use bit. */
1081 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1083 /* Keep a running total of the number of free objects. If this page
1084 fills up, we may have to move it to the end of the list if the
1085 next page isn't full. If the next page is full, all subsequent
1086 pages are full, so there's no need to move it. */
1087 if (--entry->num_free_objects == 0
1088 && entry->next != NULL
1089 && entry->next->num_free_objects > 0)
1091 G.pages[order] = entry->next;
1092 entry->next = NULL;
1093 G.page_tails[order]->next = entry;
1094 G.page_tails[order] = entry;
1097 /* Calculate the object's address. */
1098 result = entry->page + object_offset;
1100 #ifdef ENABLE_GC_CHECKING
1101 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1102 exact same semantics in presence of memory bugs, regardless of
1103 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1104 handle to avoid handle leak. */
1105 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1107 /* `Poison' the entire allocated object, including any padding at
1108 the end. */
1109 memset (result, 0xaf, OBJECT_SIZE (order));
1111 /* Make the bytes after the end of the object unaccessible. Discard the
1112 handle to avoid handle leak. */
1113 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1114 OBJECT_SIZE (order) - size));
1115 #endif
1117 /* Tell Valgrind that the memory is there, but its content isn't
1118 defined. The bytes at the end of the object are still marked
1119 unaccessible. */
1120 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1122 /* Keep track of how many bytes are being allocated. This
1123 information is used in deciding when to collect. */
1124 G.allocated += OBJECT_SIZE (order);
1126 if (GGC_DEBUG_LEVEL >= 3)
1127 fprintf (G.debug_file,
1128 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1129 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1130 (void *) entry);
1132 return result;
1135 /* If P is not marked, marks it and return false. Otherwise return true.
1136 P must have been allocated by the GC allocator; it mustn't point to
1137 static objects, stack variables, or memory allocated with malloc. */
1140 ggc_set_mark (const void *p)
1142 page_entry *entry;
1143 unsigned bit, word;
1144 unsigned long mask;
1146 /* Look up the page on which the object is alloced. If the object
1147 wasn't allocated by the collector, we'll probably die. */
1148 entry = lookup_page_table_entry (p);
1149 #ifdef ENABLE_CHECKING
1150 if (entry == NULL)
1151 abort ();
1152 #endif
1154 /* Calculate the index of the object on the page; this is its bit
1155 position in the in_use_p bitmap. */
1156 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1157 word = bit / HOST_BITS_PER_LONG;
1158 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1160 /* If the bit was previously set, skip it. */
1161 if (entry->in_use_p[word] & mask)
1162 return 1;
1164 /* Otherwise set it, and decrement the free object count. */
1165 entry->in_use_p[word] |= mask;
1166 entry->num_free_objects -= 1;
1168 if (GGC_DEBUG_LEVEL >= 4)
1169 fprintf (G.debug_file, "Marking %p\n", p);
1171 return 0;
1174 /* Return 1 if P has been marked, zero otherwise.
1175 P must have been allocated by the GC allocator; it mustn't point to
1176 static objects, stack variables, or memory allocated with malloc. */
1179 ggc_marked_p (const void *p)
1181 page_entry *entry;
1182 unsigned bit, word;
1183 unsigned long mask;
1185 /* Look up the page on which the object is alloced. If the object
1186 wasn't allocated by the collector, we'll probably die. */
1187 entry = lookup_page_table_entry (p);
1188 #ifdef ENABLE_CHECKING
1189 if (entry == NULL)
1190 abort ();
1191 #endif
1193 /* Calculate the index of the object on the page; this is its bit
1194 position in the in_use_p bitmap. */
1195 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1196 word = bit / HOST_BITS_PER_LONG;
1197 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1199 return (entry->in_use_p[word] & mask) != 0;
1202 /* Return the size of the gc-able object P. */
1204 size_t
1205 ggc_get_size (const void *p)
1207 page_entry *pe = lookup_page_table_entry (p);
1208 return OBJECT_SIZE (pe->order);
1211 /* Subroutine of init_ggc which computes the pair of numbers used to
1212 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1214 This algorithm is taken from Granlund and Montgomery's paper
1215 "Division by Invariant Integers using Multiplication"
1216 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1217 constants). */
1219 static void
1220 compute_inverse (unsigned order)
1222 size_t size, inv;
1223 unsigned int e;
1225 size = OBJECT_SIZE (order);
1226 e = 0;
1227 while (size % 2 == 0)
1229 e++;
1230 size >>= 1;
1233 inv = size;
1234 while (inv * size != 1)
1235 inv = inv * (2 - inv*size);
1237 DIV_MULT (order) = inv;
1238 DIV_SHIFT (order) = e;
1241 /* Initialize the ggc-mmap allocator. */
1242 void
1243 init_ggc (void)
1245 unsigned order;
1247 G.pagesize = getpagesize();
1248 G.lg_pagesize = exact_log2 (G.pagesize);
1250 #ifdef HAVE_MMAP_DEV_ZERO
1251 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1252 if (G.dev_zero_fd == -1)
1253 internal_error ("open /dev/zero: %m");
1254 #endif
1256 #if 0
1257 G.debug_file = fopen ("ggc-mmap.debug", "w");
1258 #else
1259 G.debug_file = stdout;
1260 #endif
1262 #ifdef USING_MMAP
1263 /* StunOS has an amazing off-by-one error for the first mmap allocation
1264 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1265 believe, is an unaligned page allocation, which would cause us to
1266 hork badly if we tried to use it. */
1268 char *p = alloc_anon (NULL, G.pagesize);
1269 struct page_entry *e;
1270 if ((size_t)p & (G.pagesize - 1))
1272 /* How losing. Discard this one and try another. If we still
1273 can't get something useful, give up. */
1275 p = alloc_anon (NULL, G.pagesize);
1276 if ((size_t)p & (G.pagesize - 1))
1277 abort ();
1280 /* We have a good page, might as well hold onto it... */
1281 e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
1282 e->bytes = G.pagesize;
1283 e->page = p;
1284 e->next = G.free_pages;
1285 G.free_pages = e;
1287 #endif
1289 /* Initialize the object size table. */
1290 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1291 object_size_table[order] = (size_t) 1 << order;
1292 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1294 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1296 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1297 so that we're sure of getting aligned memory. */
1298 s = ROUND_UP (s, MAX_ALIGNMENT);
1299 object_size_table[order] = s;
1302 /* Initialize the objects-per-page and inverse tables. */
1303 for (order = 0; order < NUM_ORDERS; ++order)
1305 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1306 if (objects_per_page_table[order] == 0)
1307 objects_per_page_table[order] = 1;
1308 compute_inverse (order);
1311 /* Reset the size_lookup array to put appropriately sized objects in
1312 the special orders. All objects bigger than the previous power
1313 of two, but no greater than the special size, should go in the
1314 new order. */
1315 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1317 int o;
1318 int i;
1320 o = size_lookup[OBJECT_SIZE (order)];
1321 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1322 size_lookup[i] = order;
1325 G.depth_in_use = 0;
1326 G.depth_max = 10;
1327 G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
1329 G.by_depth_in_use = 0;
1330 G.by_depth_max = INITIAL_PTE_COUNT;
1331 G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1332 G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
1335 /* Increment the `GC context'. Objects allocated in an outer context
1336 are never freed, eliminating the need to register their roots. */
1338 void
1339 ggc_push_context (void)
1341 ++G.context_depth;
1343 /* Die on wrap. */
1344 if (G.context_depth >= HOST_BITS_PER_LONG)
1345 abort ();
1348 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1349 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1351 static void
1352 ggc_recalculate_in_use_p (page_entry *p)
1354 unsigned int i;
1355 size_t num_objects;
1357 /* Because the past-the-end bit in in_use_p is always set, we
1358 pretend there is one additional object. */
1359 num_objects = OBJECTS_IN_PAGE (p) + 1;
1361 /* Reset the free object count. */
1362 p->num_free_objects = num_objects;
1364 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1365 for (i = 0;
1366 i < CEIL (BITMAP_SIZE (num_objects),
1367 sizeof (*p->in_use_p));
1368 ++i)
1370 unsigned long j;
1372 /* Something is in use if it is marked, or if it was in use in a
1373 context further down the context stack. */
1374 p->in_use_p[i] |= save_in_use_p (p)[i];
1376 /* Decrement the free object count for every object allocated. */
1377 for (j = p->in_use_p[i]; j; j >>= 1)
1378 p->num_free_objects -= (j & 1);
1381 if (p->num_free_objects >= num_objects)
1382 abort ();
1385 /* Decrement the `GC context'. All objects allocated since the
1386 previous ggc_push_context are migrated to the outer context. */
1388 void
1389 ggc_pop_context (void)
1391 unsigned long omask;
1392 unsigned int depth, i, e;
1393 #ifdef ENABLE_CHECKING
1394 unsigned int order;
1395 #endif
1397 depth = --G.context_depth;
1398 omask = (unsigned long)1 << (depth + 1);
1400 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1401 return;
1403 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1404 G.context_depth_allocations &= omask - 1;
1405 G.context_depth_collections &= omask - 1;
1407 /* The G.depth array is shortend so that the last index is the
1408 context_depth of the top element of by_depth. */
1409 if (depth+1 < G.depth_in_use)
1410 e = G.depth[depth+1];
1411 else
1412 e = G.by_depth_in_use;
1414 /* We might not have any PTEs of depth depth. */
1415 if (depth < G.depth_in_use)
1418 /* First we go through all the pages at depth depth to
1419 recalculate the in use bits. */
1420 for (i = G.depth[depth]; i < e; ++i)
1422 page_entry *p;
1424 #ifdef ENABLE_CHECKING
1425 p = G.by_depth[i];
1427 /* Check that all of the pages really are at the depth that
1428 we expect. */
1429 if (p->context_depth != depth)
1430 abort ();
1431 if (p->index_by_depth != i)
1432 abort ();
1433 #endif
1435 prefetch (&save_in_use_p_i (i+8));
1436 prefetch (&save_in_use_p_i (i+16));
1437 if (save_in_use_p_i (i))
1439 p = G.by_depth[i];
1440 ggc_recalculate_in_use_p (p);
1441 free (save_in_use_p_i (i));
1442 save_in_use_p_i (i) = 0;
1447 /* Then, we reset all page_entries with a depth greater than depth
1448 to be at depth. */
1449 for (i = e; i < G.by_depth_in_use; ++i)
1451 page_entry *p = G.by_depth[i];
1453 /* Check that all of the pages really are at the depth we
1454 expect. */
1455 #ifdef ENABLE_CHECKING
1456 if (p->context_depth <= depth)
1457 abort ();
1458 if (p->index_by_depth != i)
1459 abort ();
1460 #endif
1461 p->context_depth = depth;
1464 adjust_depth ();
1466 #ifdef ENABLE_CHECKING
1467 for (order = 2; order < NUM_ORDERS; order++)
1469 page_entry *p;
1471 for (p = G.pages[order]; p != NULL; p = p->next)
1473 if (p->context_depth > depth)
1474 abort ();
1475 else if (p->context_depth == depth && save_in_use_p (p))
1476 abort ();
1479 #endif
1482 /* Unmark all objects. */
1484 static inline void
1485 clear_marks (void)
1487 unsigned order;
1489 for (order = 2; order < NUM_ORDERS; order++)
1491 page_entry *p;
1493 for (p = G.pages[order]; p != NULL; p = p->next)
1495 size_t num_objects = OBJECTS_IN_PAGE (p);
1496 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1498 #ifdef ENABLE_CHECKING
1499 /* The data should be page-aligned. */
1500 if ((size_t) p->page & (G.pagesize - 1))
1501 abort ();
1502 #endif
1504 /* Pages that aren't in the topmost context are not collected;
1505 nevertheless, we need their in-use bit vectors to store GC
1506 marks. So, back them up first. */
1507 if (p->context_depth < G.context_depth)
1509 if (! save_in_use_p (p))
1510 save_in_use_p (p) = xmalloc (bitmap_size);
1511 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1514 /* Reset reset the number of free objects and clear the
1515 in-use bits. These will be adjusted by mark_obj. */
1516 p->num_free_objects = num_objects;
1517 memset (p->in_use_p, 0, bitmap_size);
1519 /* Make sure the one-past-the-end bit is always set. */
1520 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1521 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1526 /* Free all empty pages. Partially empty pages need no attention
1527 because the `mark' bit doubles as an `unused' bit. */
1529 static inline void
1530 sweep_pages (void)
1532 unsigned order;
1534 for (order = 2; order < NUM_ORDERS; order++)
1536 /* The last page-entry to consider, regardless of entries
1537 placed at the end of the list. */
1538 page_entry * const last = G.page_tails[order];
1540 size_t num_objects;
1541 size_t live_objects;
1542 page_entry *p, *previous;
1543 int done;
1545 p = G.pages[order];
1546 if (p == NULL)
1547 continue;
1549 previous = NULL;
1552 page_entry *next = p->next;
1554 /* Loop until all entries have been examined. */
1555 done = (p == last);
1557 num_objects = OBJECTS_IN_PAGE (p);
1559 /* Add all live objects on this page to the count of
1560 allocated memory. */
1561 live_objects = num_objects - p->num_free_objects;
1563 G.allocated += OBJECT_SIZE (order) * live_objects;
1565 /* Only objects on pages in the topmost context should get
1566 collected. */
1567 if (p->context_depth < G.context_depth)
1570 /* Remove the page if it's empty. */
1571 else if (live_objects == 0)
1573 if (! previous)
1574 G.pages[order] = next;
1575 else
1576 previous->next = next;
1578 /* Are we removing the last element? */
1579 if (p == G.page_tails[order])
1580 G.page_tails[order] = previous;
1581 free_page (p);
1582 p = previous;
1585 /* If the page is full, move it to the end. */
1586 else if (p->num_free_objects == 0)
1588 /* Don't move it if it's already at the end. */
1589 if (p != G.page_tails[order])
1591 /* Move p to the end of the list. */
1592 p->next = NULL;
1593 G.page_tails[order]->next = p;
1595 /* Update the tail pointer... */
1596 G.page_tails[order] = p;
1598 /* ... and the head pointer, if necessary. */
1599 if (! previous)
1600 G.pages[order] = next;
1601 else
1602 previous->next = next;
1603 p = previous;
1607 /* If we've fallen through to here, it's a page in the
1608 topmost context that is neither full nor empty. Such a
1609 page must precede pages at lesser context depth in the
1610 list, so move it to the head. */
1611 else if (p != G.pages[order])
1613 previous->next = p->next;
1614 p->next = G.pages[order];
1615 G.pages[order] = p;
1616 /* Are we moving the last element? */
1617 if (G.page_tails[order] == p)
1618 G.page_tails[order] = previous;
1619 p = previous;
1622 previous = p;
1623 p = next;
1625 while (! done);
1627 /* Now, restore the in_use_p vectors for any pages from contexts
1628 other than the current one. */
1629 for (p = G.pages[order]; p; p = p->next)
1630 if (p->context_depth != G.context_depth)
1631 ggc_recalculate_in_use_p (p);
1635 #ifdef ENABLE_GC_CHECKING
1636 /* Clobber all free objects. */
1638 static inline void
1639 poison_pages (void)
1641 unsigned order;
1643 for (order = 2; order < NUM_ORDERS; order++)
1645 size_t size = OBJECT_SIZE (order);
1646 page_entry *p;
1648 for (p = G.pages[order]; p != NULL; p = p->next)
1650 size_t num_objects;
1651 size_t i;
1653 if (p->context_depth != G.context_depth)
1654 /* Since we don't do any collection for pages in pushed
1655 contexts, there's no need to do any poisoning. And
1656 besides, the IN_USE_P array isn't valid until we pop
1657 contexts. */
1658 continue;
1660 num_objects = OBJECTS_IN_PAGE (p);
1661 for (i = 0; i < num_objects; i++)
1663 size_t word, bit;
1664 word = i / HOST_BITS_PER_LONG;
1665 bit = i % HOST_BITS_PER_LONG;
1666 if (((p->in_use_p[word] >> bit) & 1) == 0)
1668 char *object = p->page + i * size;
1670 /* Keep poison-by-write when we expect to use Valgrind,
1671 so the exact same memory semantics is kept, in case
1672 there are memory errors. We override this request
1673 below. */
1674 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1675 memset (object, 0xa5, size);
1677 /* Drop the handle to avoid handle leak. */
1678 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1684 #endif
1686 /* Top level mark-and-sweep routine. */
1688 void
1689 ggc_collect (void)
1691 /* Avoid frequent unnecessary work by skipping collection if the
1692 total allocations haven't expanded much since the last
1693 collection. */
1694 float allocated_last_gc =
1695 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1697 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1699 if (G.allocated < allocated_last_gc + min_expand)
1700 return;
1702 timevar_push (TV_GC);
1703 if (!quiet_flag)
1704 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1706 /* Zero the total allocated bytes. This will be recalculated in the
1707 sweep phase. */
1708 G.allocated = 0;
1710 /* Release the pages we freed the last time we collected, but didn't
1711 reuse in the interim. */
1712 release_pages ();
1714 /* Indicate that we've seen collections at this context depth. */
1715 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1717 clear_marks ();
1718 ggc_mark_roots ();
1720 #ifdef ENABLE_GC_CHECKING
1721 poison_pages ();
1722 #endif
1724 sweep_pages ();
1726 G.allocated_last_gc = G.allocated;
1728 timevar_pop (TV_GC);
1730 if (!quiet_flag)
1731 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1734 /* Print allocation statistics. */
1735 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1736 ? (x) \
1737 : ((x) < 1024*1024*10 \
1738 ? (x) / 1024 \
1739 : (x) / (1024*1024))))
1740 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1742 void
1743 ggc_print_statistics (void)
1745 struct ggc_statistics stats;
1746 unsigned int i;
1747 size_t total_overhead = 0;
1749 /* Clear the statistics. */
1750 memset (&stats, 0, sizeof (stats));
1752 /* Make sure collection will really occur. */
1753 G.allocated_last_gc = 0;
1755 /* Collect and print the statistics common across collectors. */
1756 ggc_print_common_statistics (stderr, &stats);
1758 /* Release free pages so that we will not count the bytes allocated
1759 there as part of the total allocated memory. */
1760 release_pages ();
1762 /* Collect some information about the various sizes of
1763 allocation. */
1764 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1765 "Size", "Allocated", "Used", "Overhead");
1766 for (i = 0; i < NUM_ORDERS; ++i)
1768 page_entry *p;
1769 size_t allocated;
1770 size_t in_use;
1771 size_t overhead;
1773 /* Skip empty entries. */
1774 if (!G.pages[i])
1775 continue;
1777 overhead = allocated = in_use = 0;
1779 /* Figure out the total number of bytes allocated for objects of
1780 this size, and how many of them are actually in use. Also figure
1781 out how much memory the page table is using. */
1782 for (p = G.pages[i]; p; p = p->next)
1784 allocated += p->bytes;
1785 in_use +=
1786 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1788 overhead += (sizeof (page_entry) - sizeof (long)
1789 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1791 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1792 (unsigned long) OBJECT_SIZE (i),
1793 SCALE (allocated), LABEL (allocated),
1794 SCALE (in_use), LABEL (in_use),
1795 SCALE (overhead), LABEL (overhead));
1796 total_overhead += overhead;
1798 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1799 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1800 SCALE (G.allocated), LABEL(G.allocated),
1801 SCALE (total_overhead), LABEL (total_overhead));
1804 struct ggc_pch_data
1806 struct ggc_pch_ondisk
1808 unsigned totals[NUM_ORDERS];
1809 } d;
1810 size_t base[NUM_ORDERS];
1811 size_t written[NUM_ORDERS];
1814 struct ggc_pch_data *
1815 init_ggc_pch (void)
1817 return xcalloc (sizeof (struct ggc_pch_data), 1);
1820 void
1821 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1822 size_t size)
1824 unsigned order;
1826 if (size <= 256)
1827 order = size_lookup[size];
1828 else
1830 order = 9;
1831 while (size > OBJECT_SIZE (order))
1832 order++;
1835 d->d.totals[order]++;
1838 size_t
1839 ggc_pch_total_size (struct ggc_pch_data *d)
1841 size_t a = 0;
1842 unsigned i;
1844 for (i = 0; i < NUM_ORDERS; i++)
1845 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1846 return a;
1849 void
1850 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1852 size_t a = (size_t) base;
1853 unsigned i;
1855 for (i = 0; i < NUM_ORDERS; i++)
1857 d->base[i] = a;
1858 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
1863 char *
1864 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1865 size_t size)
1867 unsigned order;
1868 char *result;
1870 if (size <= 256)
1871 order = size_lookup[size];
1872 else
1874 order = 9;
1875 while (size > OBJECT_SIZE (order))
1876 order++;
1879 result = (char *) d->base[order];
1880 d->base[order] += OBJECT_SIZE (order);
1881 return result;
1884 void
1885 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1886 FILE *f ATTRIBUTE_UNUSED)
1888 /* Nothing to do. */
1891 void
1892 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1893 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1894 size_t size)
1896 unsigned order;
1898 if (size <= 256)
1899 order = size_lookup[size];
1900 else
1902 order = 9;
1903 while (size > OBJECT_SIZE (order))
1904 order++;
1907 if (fwrite (x, size, 1, f) != 1)
1908 fatal_error ("can't write PCH file: %m");
1910 /* In the current implementation, SIZE is always equal to
1911 OBJECT_SIZE (order) and so the fseek is never executed. */
1912 if (size != OBJECT_SIZE (order)
1913 && fseek (f, OBJECT_SIZE (order) - size, SEEK_CUR) != 0)
1914 fatal_error ("can't write PCH file: %m");
1916 d->written[order]++;
1917 if (d->written[order] == d->d.totals[order]
1918 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
1919 G.pagesize),
1920 SEEK_CUR) != 0)
1921 fatal_error ("can't write PCH file: %m");
1924 void
1925 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1927 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1928 fatal_error ("can't write PCH file: %m");
1929 free (d);
1932 /* Move the PCH PTE entries just added to the end of by_depth, to the
1933 front. */
1935 static void
1936 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
1938 unsigned i;
1940 /* First, we swap the new entries to the front of the varrays. */
1941 page_entry **new_by_depth;
1942 unsigned long **new_save_in_use;
1944 new_by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1945 new_save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
1947 memcpy (&new_by_depth[0],
1948 &G.by_depth[count_old_page_tables],
1949 count_new_page_tables * sizeof (void *));
1950 memcpy (&new_by_depth[count_new_page_tables],
1951 &G.by_depth[0],
1952 count_old_page_tables * sizeof (void *));
1953 memcpy (&new_save_in_use[0],
1954 &G.save_in_use[count_old_page_tables],
1955 count_new_page_tables * sizeof (void *));
1956 memcpy (&new_save_in_use[count_new_page_tables],
1957 &G.save_in_use[0],
1958 count_old_page_tables * sizeof (void *));
1960 free (G.by_depth);
1961 free (G.save_in_use);
1963 G.by_depth = new_by_depth;
1964 G.save_in_use = new_save_in_use;
1966 /* Now update all the index_by_depth fields. */
1967 for (i = G.by_depth_in_use; i > 0; --i)
1969 page_entry *p = G.by_depth[i-1];
1970 p->index_by_depth = i-1;
1973 /* And last, we update the depth pointers in G.depth. The first
1974 entry is already 0, and context 0 entries always start at index
1975 0, so there is nothing to update in the first slot. We need a
1976 second slot, only if we have old ptes, and if we do, they start
1977 at index count_new_page_tables. */
1978 if (count_old_page_tables)
1979 push_depth (count_new_page_tables);
1982 void
1983 ggc_pch_read (FILE *f, void *addr)
1985 struct ggc_pch_ondisk d;
1986 unsigned i;
1987 char *offs = addr;
1988 unsigned long count_old_page_tables;
1989 unsigned long count_new_page_tables;
1991 count_old_page_tables = G.by_depth_in_use;
1993 /* We've just read in a PCH file. So, every object that used to be
1994 allocated is now free. */
1995 clear_marks ();
1996 #ifdef GGC_POISON
1997 poison_pages ();
1998 #endif
2000 /* No object read from a PCH file should ever be freed. So, set the
2001 context depth to 1, and set the depth of all the currently-allocated
2002 pages to be 1 too. PCH pages will have depth 0. */
2003 if (G.context_depth != 0)
2004 abort ();
2005 G.context_depth = 1;
2006 for (i = 0; i < NUM_ORDERS; i++)
2008 page_entry *p;
2009 for (p = G.pages[i]; p != NULL; p = p->next)
2010 p->context_depth = G.context_depth;
2013 /* Allocate the appropriate page-table entries for the pages read from
2014 the PCH file. */
2015 if (fread (&d, sizeof (d), 1, f) != 1)
2016 fatal_error ("can't read PCH file: %m");
2018 for (i = 0; i < NUM_ORDERS; i++)
2020 struct page_entry *entry;
2021 char *pte;
2022 size_t bytes;
2023 size_t num_objs;
2024 size_t j;
2026 if (d.totals[i] == 0)
2027 continue;
2029 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2030 num_objs = bytes / OBJECT_SIZE (i);
2031 entry = xcalloc (1, (sizeof (struct page_entry)
2032 - sizeof (long)
2033 + BITMAP_SIZE (num_objs + 1)));
2034 entry->bytes = bytes;
2035 entry->page = offs;
2036 entry->context_depth = 0;
2037 offs += bytes;
2038 entry->num_free_objects = 0;
2039 entry->order = i;
2041 for (j = 0;
2042 j + HOST_BITS_PER_LONG <= num_objs + 1;
2043 j += HOST_BITS_PER_LONG)
2044 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2045 for (; j < num_objs + 1; j++)
2046 entry->in_use_p[j / HOST_BITS_PER_LONG]
2047 |= 1L << (j % HOST_BITS_PER_LONG);
2049 for (pte = entry->page;
2050 pte < entry->page + entry->bytes;
2051 pte += G.pagesize)
2052 set_page_table_entry (pte, entry);
2054 if (G.page_tails[i] != NULL)
2055 G.page_tails[i]->next = entry;
2056 else
2057 G.pages[i] = entry;
2058 G.page_tails[i] = entry;
2060 /* We start off by just adding all the new information to the
2061 end of the varrays, later, we will move the new information
2062 to the front of the varrays, as the PCH page tables are at
2063 context 0. */
2064 push_by_depth (entry, 0);
2067 /* Now, we update the various data structures that speed page table
2068 handling. */
2069 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2071 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2073 /* Update the statistics. */
2074 G.allocated = G.allocated_last_gc = offs - (char *)addr;