* config/arm/arm.h (REG_CLASS_NAMES): Add missing comma.
[official-gcc.git] / gcc / ggc-page.c
blob5628b046541e5da4b230769ab38808d10f78ae50
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "toplev.h"
30 #include "flags.h"
31 #include "ggc.h"
32 #include "timevar.h"
33 #include "params.h"
34 #ifdef ENABLE_VALGRIND_CHECKING
35 # ifdef HAVE_VALGRIND_MEMCHECK_H
36 # include <valgrind/memcheck.h>
37 # elif defined HAVE_MEMCHECK_H
38 # include <memcheck.h>
39 # else
40 # include <valgrind.h>
41 # endif
42 #else
43 /* Avoid #ifdef:s when we can help it. */
44 #define VALGRIND_DISCARD(x)
45 #endif
47 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
48 file open. Prefer either to valloc. */
49 #ifdef HAVE_MMAP_ANON
50 # undef HAVE_MMAP_DEV_ZERO
52 # include <sys/mman.h>
53 # ifndef MAP_FAILED
54 # define MAP_FAILED -1
55 # endif
56 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
57 # define MAP_ANONYMOUS MAP_ANON
58 # endif
59 # define USING_MMAP
61 #endif
63 #ifdef HAVE_MMAP_DEV_ZERO
65 # include <sys/mman.h>
66 # ifndef MAP_FAILED
67 # define MAP_FAILED -1
68 # endif
69 # define USING_MMAP
71 #endif
73 #ifndef USING_MMAP
74 #define USING_MALLOC_PAGE_GROUPS
75 #endif
77 /* Stategy:
79 This garbage-collecting allocator allocates objects on one of a set
80 of pages. Each page can allocate objects of a single size only;
81 available sizes are powers of two starting at four bytes. The size
82 of an allocation request is rounded up to the next power of two
83 (`order'), and satisfied from the appropriate page.
85 Each page is recorded in a page-entry, which also maintains an
86 in-use bitmap of object positions on the page. This allows the
87 allocation state of a particular object to be flipped without
88 touching the page itself.
90 Each page-entry also has a context depth, which is used to track
91 pushing and popping of allocation contexts. Only objects allocated
92 in the current (highest-numbered) context may be collected.
94 Page entries are arranged in an array of singly-linked lists. The
95 array is indexed by the allocation size, in bits, of the pages on
96 it; i.e. all pages on a list allocate objects of the same size.
97 Pages are ordered on the list such that all non-full pages precede
98 all full pages, with non-full pages arranged in order of decreasing
99 context depth.
101 Empty pages (of all orders) are kept on a single page cache list,
102 and are considered first when new pages are required; they are
103 deallocated at the start of the next collection if they haven't
104 been recycled by then. */
106 /* Define GGC_DEBUG_LEVEL to print debugging information.
107 0: No debugging output.
108 1: GC statistics only.
109 2: Page-entry allocations/deallocations as well.
110 3: Object allocations as well.
111 4: Object marks as well. */
112 #define GGC_DEBUG_LEVEL (0)
114 #ifndef HOST_BITS_PER_PTR
115 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 #endif
119 /* A two-level tree is used to look up the page-entry for a given
120 pointer. Two chunks of the pointer's bits are extracted to index
121 the first and second levels of the tree, as follows:
123 HOST_PAGE_SIZE_BITS
124 32 | |
125 msb +----------------+----+------+------+ lsb
126 | | |
127 PAGE_L1_BITS |
129 PAGE_L2_BITS
131 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
132 pages are aligned on system page boundaries. The next most
133 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
134 index values in the lookup table, respectively.
136 For 32-bit architectures and the settings below, there are no
137 leftover bits. For architectures with wider pointers, the lookup
138 tree points to a list of pages, which must be scanned to find the
139 correct one. */
141 #define PAGE_L1_BITS (8)
142 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
143 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
144 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
146 #define LOOKUP_L1(p) \
147 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149 #define LOOKUP_L2(p) \
150 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152 /* The number of objects per allocation page, for objects on a page of
153 the indicated ORDER. */
154 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156 /* The number of objects in P. */
157 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159 /* The size of an object on a page of the indicated ORDER. */
160 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162 /* For speed, we avoid doing a general integer divide to locate the
163 offset in the allocation bitmap, by precalculating numbers M, S
164 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
165 within the page which is evenly divisible by the object size Z. */
166 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
167 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
168 #define OFFSET_TO_BIT(OFFSET, ORDER) \
169 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171 /* The number of extra orders, not corresponding to power-of-two sized
172 objects. */
174 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
176 #define RTL_SIZE(NSLOTS) \
177 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
179 #define TREE_EXP_SIZE(OPS) \
180 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182 /* The Ith entry is the maximum size of an object to be stored in the
183 Ith extra order. Adding a new entry to this array is the *only*
184 thing you need to do to add a new special allocation size. */
186 static const size_t extra_order_size_table[] = {
187 sizeof (struct tree_decl),
188 sizeof (struct tree_list),
189 TREE_EXP_SIZE (2),
190 RTL_SIZE (2), /* MEM, PLUS, etc. */
191 RTL_SIZE (9), /* INSN */
194 /* The total number of orders. */
196 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
198 /* We use this structure to determine the alignment required for
199 allocations. For power-of-two sized allocations, that's not a
200 problem, but it does matter for odd-sized allocations. */
202 struct max_alignment {
203 char c;
204 union {
205 HOST_WIDEST_INT i;
206 long double d;
207 } u;
210 /* The biggest alignment required. */
212 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
214 /* Compute the smallest nonnegative number which when added to X gives
215 a multiple of F. */
217 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
219 /* Compute the smallest multiple of F that is >= X. */
221 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
223 /* The Ith entry is the number of objects on a page or order I. */
225 static unsigned objects_per_page_table[NUM_ORDERS];
227 /* The Ith entry is the size of an object on a page of order I. */
229 static size_t object_size_table[NUM_ORDERS];
231 /* The Ith entry is a pair of numbers (mult, shift) such that
232 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
233 for all k evenly divisible by OBJECT_SIZE(I). */
235 static struct
237 size_t mult;
238 unsigned int shift;
240 inverse_table[NUM_ORDERS];
242 /* A page_entry records the status of an allocation page. This
243 structure is dynamically sized to fit the bitmap in_use_p. */
244 typedef struct page_entry
246 /* The next page-entry with objects of the same size, or NULL if
247 this is the last page-entry. */
248 struct page_entry *next;
250 /* The number of bytes allocated. (This will always be a multiple
251 of the host system page size.) */
252 size_t bytes;
254 /* The address at which the memory is allocated. */
255 char *page;
257 #ifdef USING_MALLOC_PAGE_GROUPS
258 /* Back pointer to the page group this page came from. */
259 struct page_group *group;
260 #endif
262 /* This is the index in the by_depth varray where this page table
263 can be found. */
264 unsigned long index_by_depth;
266 /* Context depth of this page. */
267 unsigned short context_depth;
269 /* The number of free objects remaining on this page. */
270 unsigned short num_free_objects;
272 /* A likely candidate for the bit position of a free object for the
273 next allocation from this page. */
274 unsigned short next_bit_hint;
276 /* The lg of size of objects allocated from this page. */
277 unsigned char order;
279 /* A bit vector indicating whether or not objects are in use. The
280 Nth bit is one if the Nth object on this page is allocated. This
281 array is dynamically sized. */
282 unsigned long in_use_p[1];
283 } page_entry;
285 #ifdef USING_MALLOC_PAGE_GROUPS
286 /* A page_group describes a large allocation from malloc, from which
287 we parcel out aligned pages. */
288 typedef struct page_group
290 /* A linked list of all extant page groups. */
291 struct page_group *next;
293 /* The address we received from malloc. */
294 char *allocation;
296 /* The size of the block. */
297 size_t alloc_size;
299 /* A bitmask of pages in use. */
300 unsigned int in_use;
301 } page_group;
302 #endif
304 #if HOST_BITS_PER_PTR <= 32
306 /* On 32-bit hosts, we use a two level page table, as pictured above. */
307 typedef page_entry **page_table[PAGE_L1_SIZE];
309 #else
311 /* On 64-bit hosts, we use the same two level page tables plus a linked
312 list that disambiguates the top 32-bits. There will almost always be
313 exactly one entry in the list. */
314 typedef struct page_table_chain
316 struct page_table_chain *next;
317 size_t high_bits;
318 page_entry **table[PAGE_L1_SIZE];
319 } *page_table;
321 #endif
323 /* The rest of the global variables. */
324 static struct globals
326 /* The Nth element in this array is a page with objects of size 2^N.
327 If there are any pages with free objects, they will be at the
328 head of the list. NULL if there are no page-entries for this
329 object size. */
330 page_entry *pages[NUM_ORDERS];
332 /* The Nth element in this array is the last page with objects of
333 size 2^N. NULL if there are no page-entries for this object
334 size. */
335 page_entry *page_tails[NUM_ORDERS];
337 /* Lookup table for associating allocation pages with object addresses. */
338 page_table lookup;
340 /* The system's page size. */
341 size_t pagesize;
342 size_t lg_pagesize;
344 /* Bytes currently allocated. */
345 size_t allocated;
347 /* Bytes currently allocated at the end of the last collection. */
348 size_t allocated_last_gc;
350 /* Total amount of memory mapped. */
351 size_t bytes_mapped;
353 /* Bit N set if any allocations have been done at context depth N. */
354 unsigned long context_depth_allocations;
356 /* Bit N set if any collections have been done at context depth N. */
357 unsigned long context_depth_collections;
359 /* The current depth in the context stack. */
360 unsigned short context_depth;
362 /* A file descriptor open to /dev/zero for reading. */
363 #if defined (HAVE_MMAP_DEV_ZERO)
364 int dev_zero_fd;
365 #endif
367 /* A cache of free system pages. */
368 page_entry *free_pages;
370 #ifdef USING_MALLOC_PAGE_GROUPS
371 page_group *page_groups;
372 #endif
374 /* The file descriptor for debugging output. */
375 FILE *debug_file;
377 /* Current number of elements in use in depth below. */
378 unsigned int depth_in_use;
380 /* Maximum number of elements that can be used before resizing. */
381 unsigned int depth_max;
383 /* Each element of this arry is an index in by_depth where the given
384 depth starts. This structure is indexed by that given depth we
385 are interested in. */
386 unsigned int *depth;
388 /* Current number of elements in use in by_depth below. */
389 unsigned int by_depth_in_use;
391 /* Maximum number of elements that can be used before resizing. */
392 unsigned int by_depth_max;
394 /* Each element of this array is a pointer to a page_entry, all
395 page_entries can be found in here by increasing depth.
396 index_by_depth in the page_entry is the index into this data
397 structure where that page_entry can be found. This is used to
398 speed up finding all page_entries at a particular depth. */
399 page_entry **by_depth;
401 /* Each element is a pointer to the saved in_use_p bits, if any,
402 zero otherwise. We allocate them all together, to enable a
403 better runtime data access pattern. */
404 unsigned long **save_in_use;
406 #ifdef ENABLE_GC_ALWAYS_COLLECT
407 /* List of free objects to be verified as actually free on the
408 next collection. */
409 struct free_object
411 void *object;
412 struct free_object *next;
413 } *free_object_list;
414 #endif
416 #ifdef GATHER_STATISTICS
417 struct
419 /* Total memory allocated with ggc_alloc. */
420 unsigned long long total_allocated;
421 /* Total overhead for memory to be allocated with ggc_alloc. */
422 unsigned long long total_overhead;
424 /* Total allocations and overhead for sizes less than 32, 64 and 128.
425 These sizes are interesting because they are typical cache line
426 sizes. */
428 unsigned long long total_allocated_under32;
429 unsigned long long total_overhead_under32;
431 unsigned long long total_allocated_under64;
432 unsigned long long total_overhead_under64;
434 unsigned long long total_allocated_under128;
435 unsigned long long total_overhead_under128;
437 /* The allocations for each of the allocation orders. */
438 unsigned long long total_allocated_per_order[NUM_ORDERS];
440 /* The overhead for each of the allocation orders. */
441 unsigned long long total_overhead_per_order[NUM_ORDERS];
442 } stats;
443 #endif
444 } G;
446 /* The size in bytes required to maintain a bitmap for the objects
447 on a page-entry. */
448 #define BITMAP_SIZE(Num_objects) \
449 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
451 /* Allocate pages in chunks of this size, to throttle calls to memory
452 allocation routines. The first page is used, the rest go onto the
453 free list. This cannot be larger than HOST_BITS_PER_INT for the
454 in_use bitmask for page_group. */
455 #define GGC_QUIRE_SIZE 16
457 /* Initial guess as to how many page table entries we might need. */
458 #define INITIAL_PTE_COUNT 128
460 static int ggc_allocated_p (const void *);
461 static page_entry *lookup_page_table_entry (const void *);
462 static void set_page_table_entry (void *, page_entry *);
463 #ifdef USING_MMAP
464 static char *alloc_anon (char *, size_t);
465 #endif
466 #ifdef USING_MALLOC_PAGE_GROUPS
467 static size_t page_group_index (char *, char *);
468 static void set_page_group_in_use (page_group *, char *);
469 static void clear_page_group_in_use (page_group *, char *);
470 #endif
471 static struct page_entry * alloc_page (unsigned);
472 static void free_page (struct page_entry *);
473 static void release_pages (void);
474 static void clear_marks (void);
475 static void sweep_pages (void);
476 static void ggc_recalculate_in_use_p (page_entry *);
477 static void compute_inverse (unsigned);
478 static inline void adjust_depth (void);
479 static void move_ptes_to_front (int, int);
481 void debug_print_page_list (int);
482 static void push_depth (unsigned int);
483 static void push_by_depth (page_entry *, unsigned long *);
484 struct alloc_zone *rtl_zone = NULL;
485 struct alloc_zone *tree_zone = NULL;
486 struct alloc_zone *garbage_zone = NULL;
488 /* Push an entry onto G.depth. */
490 inline static void
491 push_depth (unsigned int i)
493 if (G.depth_in_use >= G.depth_max)
495 G.depth_max *= 2;
496 G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
498 G.depth[G.depth_in_use++] = i;
501 /* Push an entry onto G.by_depth and G.save_in_use. */
503 inline static void
504 push_by_depth (page_entry *p, unsigned long *s)
506 if (G.by_depth_in_use >= G.by_depth_max)
508 G.by_depth_max *= 2;
509 G.by_depth = xrealloc (G.by_depth,
510 G.by_depth_max * sizeof (page_entry *));
511 G.save_in_use = xrealloc (G.save_in_use,
512 G.by_depth_max * sizeof (unsigned long *));
514 G.by_depth[G.by_depth_in_use] = p;
515 G.save_in_use[G.by_depth_in_use++] = s;
518 #if (GCC_VERSION < 3001)
519 #define prefetch(X) ((void) X)
520 #else
521 #define prefetch(X) __builtin_prefetch (X)
522 #endif
524 #define save_in_use_p_i(__i) \
525 (G.save_in_use[__i])
526 #define save_in_use_p(__p) \
527 (save_in_use_p_i (__p->index_by_depth))
529 /* Returns nonzero if P was allocated in GC'able memory. */
531 static inline int
532 ggc_allocated_p (const void *p)
534 page_entry ***base;
535 size_t L1, L2;
537 #if HOST_BITS_PER_PTR <= 32
538 base = &G.lookup[0];
539 #else
540 page_table table = G.lookup;
541 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
542 while (1)
544 if (table == NULL)
545 return 0;
546 if (table->high_bits == high_bits)
547 break;
548 table = table->next;
550 base = &table->table[0];
551 #endif
553 /* Extract the level 1 and 2 indices. */
554 L1 = LOOKUP_L1 (p);
555 L2 = LOOKUP_L2 (p);
557 return base[L1] && base[L1][L2];
560 /* Traverse the page table and find the entry for a page.
561 Die (probably) if the object wasn't allocated via GC. */
563 static inline page_entry *
564 lookup_page_table_entry (const void *p)
566 page_entry ***base;
567 size_t L1, L2;
569 #if HOST_BITS_PER_PTR <= 32
570 base = &G.lookup[0];
571 #else
572 page_table table = G.lookup;
573 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
574 while (table->high_bits != high_bits)
575 table = table->next;
576 base = &table->table[0];
577 #endif
579 /* Extract the level 1 and 2 indices. */
580 L1 = LOOKUP_L1 (p);
581 L2 = LOOKUP_L2 (p);
583 return base[L1][L2];
586 /* Set the page table entry for a page. */
588 static void
589 set_page_table_entry (void *p, page_entry *entry)
591 page_entry ***base;
592 size_t L1, L2;
594 #if HOST_BITS_PER_PTR <= 32
595 base = &G.lookup[0];
596 #else
597 page_table table;
598 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
599 for (table = G.lookup; table; table = table->next)
600 if (table->high_bits == high_bits)
601 goto found;
603 /* Not found -- allocate a new table. */
604 table = xcalloc (1, sizeof(*table));
605 table->next = G.lookup;
606 table->high_bits = high_bits;
607 G.lookup = table;
608 found:
609 base = &table->table[0];
610 #endif
612 /* Extract the level 1 and 2 indices. */
613 L1 = LOOKUP_L1 (p);
614 L2 = LOOKUP_L2 (p);
616 if (base[L1] == NULL)
617 base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
619 base[L1][L2] = entry;
622 /* Prints the page-entry for object size ORDER, for debugging. */
624 void
625 debug_print_page_list (int order)
627 page_entry *p;
628 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
629 (void *) G.page_tails[order]);
630 p = G.pages[order];
631 while (p != NULL)
633 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
634 p->num_free_objects);
635 p = p->next;
637 printf ("NULL\n");
638 fflush (stdout);
641 #ifdef USING_MMAP
642 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
643 (if non-null). The ifdef structure here is intended to cause a
644 compile error unless exactly one of the HAVE_* is defined. */
646 static inline char *
647 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
649 #ifdef HAVE_MMAP_ANON
650 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
651 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
652 #endif
653 #ifdef HAVE_MMAP_DEV_ZERO
654 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
655 MAP_PRIVATE, G.dev_zero_fd, 0);
656 #endif
658 if (page == (char *) MAP_FAILED)
660 perror ("virtual memory exhausted");
661 exit (FATAL_EXIT_CODE);
664 /* Remember that we allocated this memory. */
665 G.bytes_mapped += size;
667 /* Pretend we don't have access to the allocated pages. We'll enable
668 access to smaller pieces of the area in ggc_alloc. Discard the
669 handle to avoid handle leak. */
670 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
672 return page;
674 #endif
675 #ifdef USING_MALLOC_PAGE_GROUPS
676 /* Compute the index for this page into the page group. */
678 static inline size_t
679 page_group_index (char *allocation, char *page)
681 return (size_t) (page - allocation) >> G.lg_pagesize;
684 /* Set and clear the in_use bit for this page in the page group. */
686 static inline void
687 set_page_group_in_use (page_group *group, char *page)
689 group->in_use |= 1 << page_group_index (group->allocation, page);
692 static inline void
693 clear_page_group_in_use (page_group *group, char *page)
695 group->in_use &= ~(1 << page_group_index (group->allocation, page));
697 #endif
699 /* Allocate a new page for allocating objects of size 2^ORDER,
700 and return an entry for it. The entry is not added to the
701 appropriate page_table list. */
703 static inline struct page_entry *
704 alloc_page (unsigned order)
706 struct page_entry *entry, *p, **pp;
707 char *page;
708 size_t num_objects;
709 size_t bitmap_size;
710 size_t page_entry_size;
711 size_t entry_size;
712 #ifdef USING_MALLOC_PAGE_GROUPS
713 page_group *group;
714 #endif
716 num_objects = OBJECTS_PER_PAGE (order);
717 bitmap_size = BITMAP_SIZE (num_objects + 1);
718 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
719 entry_size = num_objects * OBJECT_SIZE (order);
720 if (entry_size < G.pagesize)
721 entry_size = G.pagesize;
723 entry = NULL;
724 page = NULL;
726 /* Check the list of free pages for one we can use. */
727 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
728 if (p->bytes == entry_size)
729 break;
731 if (p != NULL)
733 /* Recycle the allocated memory from this page ... */
734 *pp = p->next;
735 page = p->page;
737 #ifdef USING_MALLOC_PAGE_GROUPS
738 group = p->group;
739 #endif
741 /* ... and, if possible, the page entry itself. */
742 if (p->order == order)
744 entry = p;
745 memset (entry, 0, page_entry_size);
747 else
748 free (p);
750 #ifdef USING_MMAP
751 else if (entry_size == G.pagesize)
753 /* We want just one page. Allocate a bunch of them and put the
754 extras on the freelist. (Can only do this optimization with
755 mmap for backing store.) */
756 struct page_entry *e, *f = G.free_pages;
757 int i;
759 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
761 /* This loop counts down so that the chain will be in ascending
762 memory order. */
763 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
765 e = xcalloc (1, page_entry_size);
766 e->order = order;
767 e->bytes = G.pagesize;
768 e->page = page + (i << G.lg_pagesize);
769 e->next = f;
770 f = e;
773 G.free_pages = f;
775 else
776 page = alloc_anon (NULL, entry_size);
777 #endif
778 #ifdef USING_MALLOC_PAGE_GROUPS
779 else
781 /* Allocate a large block of memory and serve out the aligned
782 pages therein. This results in much less memory wastage
783 than the traditional implementation of valloc. */
785 char *allocation, *a, *enda;
786 size_t alloc_size, head_slop, tail_slop;
787 int multiple_pages = (entry_size == G.pagesize);
789 if (multiple_pages)
790 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
791 else
792 alloc_size = entry_size + G.pagesize - 1;
793 allocation = xmalloc (alloc_size);
795 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
796 head_slop = page - allocation;
797 if (multiple_pages)
798 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
799 else
800 tail_slop = alloc_size - entry_size - head_slop;
801 enda = allocation + alloc_size - tail_slop;
803 /* We allocated N pages, which are likely not aligned, leaving
804 us with N-1 usable pages. We plan to place the page_group
805 structure somewhere in the slop. */
806 if (head_slop >= sizeof (page_group))
807 group = (page_group *)page - 1;
808 else
810 /* We magically got an aligned allocation. Too bad, we have
811 to waste a page anyway. */
812 if (tail_slop == 0)
814 enda -= G.pagesize;
815 tail_slop += G.pagesize;
817 if (tail_slop < sizeof (page_group))
818 abort ();
819 group = (page_group *)enda;
820 tail_slop -= sizeof (page_group);
823 /* Remember that we allocated this memory. */
824 group->next = G.page_groups;
825 group->allocation = allocation;
826 group->alloc_size = alloc_size;
827 group->in_use = 0;
828 G.page_groups = group;
829 G.bytes_mapped += alloc_size;
831 /* If we allocated multiple pages, put the rest on the free list. */
832 if (multiple_pages)
834 struct page_entry *e, *f = G.free_pages;
835 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
837 e = xcalloc (1, page_entry_size);
838 e->order = order;
839 e->bytes = G.pagesize;
840 e->page = a;
841 e->group = group;
842 e->next = f;
843 f = e;
845 G.free_pages = f;
848 #endif
850 if (entry == NULL)
851 entry = xcalloc (1, page_entry_size);
853 entry->bytes = entry_size;
854 entry->page = page;
855 entry->context_depth = G.context_depth;
856 entry->order = order;
857 entry->num_free_objects = num_objects;
858 entry->next_bit_hint = 1;
860 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
862 #ifdef USING_MALLOC_PAGE_GROUPS
863 entry->group = group;
864 set_page_group_in_use (group, page);
865 #endif
867 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
868 increment the hint. */
869 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
870 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
872 set_page_table_entry (page, entry);
874 if (GGC_DEBUG_LEVEL >= 2)
875 fprintf (G.debug_file,
876 "Allocating page at %p, object size=%lu, data %p-%p\n",
877 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
878 page + entry_size - 1);
880 return entry;
883 /* Adjust the size of G.depth so that no index greater than the one
884 used by the top of the G.by_depth is used. */
886 static inline void
887 adjust_depth (void)
889 page_entry *top;
891 if (G.by_depth_in_use)
893 top = G.by_depth[G.by_depth_in_use-1];
895 /* Peel back indices in depth that index into by_depth, so that
896 as new elements are added to by_depth, we note the indices
897 of those elements, if they are for new context depths. */
898 while (G.depth_in_use > (size_t)top->context_depth+1)
899 --G.depth_in_use;
903 /* For a page that is no longer needed, put it on the free page list. */
905 static void
906 free_page (page_entry *entry)
908 if (GGC_DEBUG_LEVEL >= 2)
909 fprintf (G.debug_file,
910 "Deallocating page at %p, data %p-%p\n", (void *) entry,
911 entry->page, entry->page + entry->bytes - 1);
913 /* Mark the page as inaccessible. Discard the handle to avoid handle
914 leak. */
915 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
917 set_page_table_entry (entry->page, NULL);
919 #ifdef USING_MALLOC_PAGE_GROUPS
920 clear_page_group_in_use (entry->group, entry->page);
921 #endif
923 if (G.by_depth_in_use > 1)
925 page_entry *top = G.by_depth[G.by_depth_in_use-1];
927 /* If they are at the same depth, put top element into freed
928 slot. */
929 if (entry->context_depth == top->context_depth)
931 int i = entry->index_by_depth;
932 G.by_depth[i] = top;
933 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
934 top->index_by_depth = i;
936 else
938 /* We cannot free a page from a context deeper than the
939 current one. */
940 abort ();
943 --G.by_depth_in_use;
945 adjust_depth ();
947 entry->next = G.free_pages;
948 G.free_pages = entry;
951 /* Release the free page cache to the system. */
953 static void
954 release_pages (void)
956 #ifdef USING_MMAP
957 page_entry *p, *next;
958 char *start;
959 size_t len;
961 /* Gather up adjacent pages so they are unmapped together. */
962 p = G.free_pages;
964 while (p)
966 start = p->page;
967 next = p->next;
968 len = p->bytes;
969 free (p);
970 p = next;
972 while (p && p->page == start + len)
974 next = p->next;
975 len += p->bytes;
976 free (p);
977 p = next;
980 munmap (start, len);
981 G.bytes_mapped -= len;
984 G.free_pages = NULL;
985 #endif
986 #ifdef USING_MALLOC_PAGE_GROUPS
987 page_entry **pp, *p;
988 page_group **gp, *g;
990 /* Remove all pages from free page groups from the list. */
991 pp = &G.free_pages;
992 while ((p = *pp) != NULL)
993 if (p->group->in_use == 0)
995 *pp = p->next;
996 free (p);
998 else
999 pp = &p->next;
1001 /* Remove all free page groups, and release the storage. */
1002 gp = &G.page_groups;
1003 while ((g = *gp) != NULL)
1004 if (g->in_use == 0)
1006 *gp = g->next;
1007 G.bytes_mapped -= g->alloc_size;
1008 free (g->allocation);
1010 else
1011 gp = &g->next;
1012 #endif
1015 /* This table provides a fast way to determine ceil(log_2(size)) for
1016 allocation requests. The minimum allocation size is eight bytes. */
1018 static unsigned char size_lookup[257] =
1020 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1021 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1022 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1023 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1024 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1025 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1026 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1027 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1028 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1029 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1030 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1031 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1034 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1035 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039 /* Typed allocation function. Does nothing special in this collector. */
1041 void *
1042 ggc_alloc_typed (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size)
1044 return ggc_alloc (size);
1047 /* Zone allocation function. Does nothing special in this collector. */
1049 void *
1050 ggc_alloc_zone (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED)
1052 return ggc_alloc (size);
1055 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1057 void *
1058 ggc_alloc (size_t size)
1060 size_t order, word, bit, object_offset, object_size;
1061 struct page_entry *entry;
1062 void *result;
1064 if (size <= 256)
1066 order = size_lookup[size];
1067 object_size = OBJECT_SIZE (order);
1069 else
1071 order = 9;
1072 while (size > (object_size = OBJECT_SIZE (order)))
1073 order++;
1076 /* If there are non-full pages for this size allocation, they are at
1077 the head of the list. */
1078 entry = G.pages[order];
1080 /* If there is no page for this object size, or all pages in this
1081 context are full, allocate a new page. */
1082 if (entry == NULL || entry->num_free_objects == 0)
1084 struct page_entry *new_entry;
1085 new_entry = alloc_page (order);
1087 new_entry->index_by_depth = G.by_depth_in_use;
1088 push_by_depth (new_entry, 0);
1090 /* We can skip context depths, if we do, make sure we go all the
1091 way to the new depth. */
1092 while (new_entry->context_depth >= G.depth_in_use)
1093 push_depth (G.by_depth_in_use-1);
1095 /* If this is the only entry, it's also the tail. */
1096 if (entry == NULL)
1097 G.page_tails[order] = new_entry;
1099 /* Put new pages at the head of the page list. */
1100 new_entry->next = entry;
1101 entry = new_entry;
1102 G.pages[order] = new_entry;
1104 /* For a new page, we know the word and bit positions (in the
1105 in_use bitmap) of the first available object -- they're zero. */
1106 new_entry->next_bit_hint = 1;
1107 word = 0;
1108 bit = 0;
1109 object_offset = 0;
1111 else
1113 /* First try to use the hint left from the previous allocation
1114 to locate a clear bit in the in-use bitmap. We've made sure
1115 that the one-past-the-end bit is always set, so if the hint
1116 has run over, this test will fail. */
1117 unsigned hint = entry->next_bit_hint;
1118 word = hint / HOST_BITS_PER_LONG;
1119 bit = hint % HOST_BITS_PER_LONG;
1121 /* If the hint didn't work, scan the bitmap from the beginning. */
1122 if ((entry->in_use_p[word] >> bit) & 1)
1124 word = bit = 0;
1125 while (~entry->in_use_p[word] == 0)
1126 ++word;
1127 while ((entry->in_use_p[word] >> bit) & 1)
1128 ++bit;
1129 hint = word * HOST_BITS_PER_LONG + bit;
1132 /* Next time, try the next bit. */
1133 entry->next_bit_hint = hint + 1;
1135 object_offset = hint * object_size;
1138 /* Set the in-use bit. */
1139 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1141 /* Keep a running total of the number of free objects. If this page
1142 fills up, we may have to move it to the end of the list if the
1143 next page isn't full. If the next page is full, all subsequent
1144 pages are full, so there's no need to move it. */
1145 if (--entry->num_free_objects == 0
1146 && entry->next != NULL
1147 && entry->next->num_free_objects > 0)
1149 G.pages[order] = entry->next;
1150 entry->next = NULL;
1151 G.page_tails[order]->next = entry;
1152 G.page_tails[order] = entry;
1155 /* Calculate the object's address. */
1156 result = entry->page + object_offset;
1158 #ifdef ENABLE_GC_CHECKING
1159 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1160 exact same semantics in presence of memory bugs, regardless of
1161 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1162 handle to avoid handle leak. */
1163 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1165 /* `Poison' the entire allocated object, including any padding at
1166 the end. */
1167 memset (result, 0xaf, object_size);
1169 /* Make the bytes after the end of the object unaccessible. Discard the
1170 handle to avoid handle leak. */
1171 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1172 object_size - size));
1173 #endif
1175 /* Tell Valgrind that the memory is there, but its content isn't
1176 defined. The bytes at the end of the object are still marked
1177 unaccessible. */
1178 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1180 /* Keep track of how many bytes are being allocated. This
1181 information is used in deciding when to collect. */
1182 G.allocated += object_size;
1184 #ifdef GATHER_STATISTICS
1186 size_t overhead = object_size - size;
1188 G.stats.total_overhead += overhead;
1189 G.stats.total_allocated += object_size;
1190 G.stats.total_overhead_per_order[order] += overhead;
1191 G.stats.total_allocated_per_order[order] += object_size;
1193 if (size <= 32)
1195 G.stats.total_overhead_under32 += overhead;
1196 G.stats.total_allocated_under32 += object_size;
1198 if (size <= 64)
1200 G.stats.total_overhead_under64 += overhead;
1201 G.stats.total_allocated_under64 += object_size;
1203 if (size <= 128)
1205 G.stats.total_overhead_under128 += overhead;
1206 G.stats.total_allocated_under128 += object_size;
1209 #endif
1211 if (GGC_DEBUG_LEVEL >= 3)
1212 fprintf (G.debug_file,
1213 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1214 (unsigned long) size, (unsigned long) object_size, result,
1215 (void *) entry);
1217 return result;
1220 /* If P is not marked, marks it and return false. Otherwise return true.
1221 P must have been allocated by the GC allocator; it mustn't point to
1222 static objects, stack variables, or memory allocated with malloc. */
1225 ggc_set_mark (const void *p)
1227 page_entry *entry;
1228 unsigned bit, word;
1229 unsigned long mask;
1231 /* Look up the page on which the object is alloced. If the object
1232 wasn't allocated by the collector, we'll probably die. */
1233 entry = lookup_page_table_entry (p);
1234 #ifdef ENABLE_CHECKING
1235 if (entry == NULL)
1236 abort ();
1237 #endif
1239 /* Calculate the index of the object on the page; this is its bit
1240 position in the in_use_p bitmap. */
1241 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1242 word = bit / HOST_BITS_PER_LONG;
1243 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1245 /* If the bit was previously set, skip it. */
1246 if (entry->in_use_p[word] & mask)
1247 return 1;
1249 /* Otherwise set it, and decrement the free object count. */
1250 entry->in_use_p[word] |= mask;
1251 entry->num_free_objects -= 1;
1253 if (GGC_DEBUG_LEVEL >= 4)
1254 fprintf (G.debug_file, "Marking %p\n", p);
1256 return 0;
1259 /* Return 1 if P has been marked, zero otherwise.
1260 P must have been allocated by the GC allocator; it mustn't point to
1261 static objects, stack variables, or memory allocated with malloc. */
1264 ggc_marked_p (const void *p)
1266 page_entry *entry;
1267 unsigned bit, word;
1268 unsigned long mask;
1270 /* Look up the page on which the object is alloced. If the object
1271 wasn't allocated by the collector, we'll probably die. */
1272 entry = lookup_page_table_entry (p);
1273 #ifdef ENABLE_CHECKING
1274 if (entry == NULL)
1275 abort ();
1276 #endif
1278 /* Calculate the index of the object on the page; this is its bit
1279 position in the in_use_p bitmap. */
1280 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1281 word = bit / HOST_BITS_PER_LONG;
1282 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1284 return (entry->in_use_p[word] & mask) != 0;
1287 /* Return the size of the gc-able object P. */
1289 size_t
1290 ggc_get_size (const void *p)
1292 page_entry *pe = lookup_page_table_entry (p);
1293 return OBJECT_SIZE (pe->order);
1296 /* Release the memory for object P. */
1298 void
1299 ggc_free (void *p)
1301 page_entry *pe = lookup_page_table_entry (p);
1302 size_t order = pe->order;
1303 size_t size = OBJECT_SIZE (order);
1305 if (GGC_DEBUG_LEVEL >= 3)
1306 fprintf (G.debug_file,
1307 "Freeing object, actual size=%lu, at %p on %p\n",
1308 (unsigned long) size, p, (void *) pe);
1310 #ifdef ENABLE_GC_CHECKING
1311 /* Poison the data, to indicate the data is garbage. */
1312 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1313 memset (p, 0xa5, size);
1314 #endif
1315 /* Let valgrind know the object is free. */
1316 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1318 #ifdef ENABLE_GC_ALWAYS_COLLECT
1319 /* In the completely-anal-checking mode, we do *not* immediately free
1320 the data, but instead verify that the data is *actually* not
1321 reachable the next time we collect. */
1323 struct free_object *fo = xmalloc (sizeof (struct free_object));
1324 fo->object = p;
1325 fo->next = G.free_object_list;
1326 G.free_object_list = fo;
1328 #else
1330 unsigned int bit_offset, word, bit;
1332 G.allocated -= size;
1334 /* Mark the object not-in-use. */
1335 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1336 word = bit_offset / HOST_BITS_PER_LONG;
1337 bit = bit_offset % HOST_BITS_PER_LONG;
1338 pe->in_use_p[word] &= ~(1UL << bit);
1340 if (pe->num_free_objects++ == 0)
1342 /* If the page is completely full, then it's supposed to
1343 be after all pages that aren't. Since we've freed one
1344 object from a page that was full, we need to move the
1345 page to the head of the list. */
1347 page_entry *p, *q;
1348 for (q = NULL, p = G.pages[order]; ; q = p, p = p->next)
1349 if (p == pe)
1350 break;
1351 if (q && q->num_free_objects == 0)
1353 p = pe->next;
1354 q->next = p;
1355 if (!p)
1356 G.page_tails[order] = q;
1357 pe->next = G.pages[order];
1358 G.pages[order] = pe;
1361 /* Reset the hint bit to point to the only free object. */
1362 pe->next_bit_hint = bit_offset;
1365 #endif
1368 /* Subroutine of init_ggc which computes the pair of numbers used to
1369 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1371 This algorithm is taken from Granlund and Montgomery's paper
1372 "Division by Invariant Integers using Multiplication"
1373 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1374 constants). */
1376 static void
1377 compute_inverse (unsigned order)
1379 size_t size, inv;
1380 unsigned int e;
1382 size = OBJECT_SIZE (order);
1383 e = 0;
1384 while (size % 2 == 0)
1386 e++;
1387 size >>= 1;
1390 inv = size;
1391 while (inv * size != 1)
1392 inv = inv * (2 - inv*size);
1394 DIV_MULT (order) = inv;
1395 DIV_SHIFT (order) = e;
1398 /* Initialize the ggc-mmap allocator. */
1399 void
1400 init_ggc (void)
1402 unsigned order;
1404 G.pagesize = getpagesize();
1405 G.lg_pagesize = exact_log2 (G.pagesize);
1407 #ifdef HAVE_MMAP_DEV_ZERO
1408 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1409 if (G.dev_zero_fd == -1)
1410 internal_error ("open /dev/zero: %m");
1411 #endif
1413 #if 0
1414 G.debug_file = fopen ("ggc-mmap.debug", "w");
1415 #else
1416 G.debug_file = stdout;
1417 #endif
1419 #ifdef USING_MMAP
1420 /* StunOS has an amazing off-by-one error for the first mmap allocation
1421 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1422 believe, is an unaligned page allocation, which would cause us to
1423 hork badly if we tried to use it. */
1425 char *p = alloc_anon (NULL, G.pagesize);
1426 struct page_entry *e;
1427 if ((size_t)p & (G.pagesize - 1))
1429 /* How losing. Discard this one and try another. If we still
1430 can't get something useful, give up. */
1432 p = alloc_anon (NULL, G.pagesize);
1433 if ((size_t)p & (G.pagesize - 1))
1434 abort ();
1437 /* We have a good page, might as well hold onto it... */
1438 e = xcalloc (1, sizeof (struct page_entry));
1439 e->bytes = G.pagesize;
1440 e->page = p;
1441 e->next = G.free_pages;
1442 G.free_pages = e;
1444 #endif
1446 /* Initialize the object size table. */
1447 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1448 object_size_table[order] = (size_t) 1 << order;
1449 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1451 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1453 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1454 so that we're sure of getting aligned memory. */
1455 s = ROUND_UP (s, MAX_ALIGNMENT);
1456 object_size_table[order] = s;
1459 /* Initialize the objects-per-page and inverse tables. */
1460 for (order = 0; order < NUM_ORDERS; ++order)
1462 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1463 if (objects_per_page_table[order] == 0)
1464 objects_per_page_table[order] = 1;
1465 compute_inverse (order);
1468 /* Reset the size_lookup array to put appropriately sized objects in
1469 the special orders. All objects bigger than the previous power
1470 of two, but no greater than the special size, should go in the
1471 new order. */
1472 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1474 int o;
1475 int i;
1477 o = size_lookup[OBJECT_SIZE (order)];
1478 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1479 size_lookup[i] = order;
1482 G.depth_in_use = 0;
1483 G.depth_max = 10;
1484 G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1486 G.by_depth_in_use = 0;
1487 G.by_depth_max = INITIAL_PTE_COUNT;
1488 G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1489 G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1492 /* Start a new GGC zone. */
1494 struct alloc_zone *
1495 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1497 return NULL;
1500 /* Destroy a GGC zone. */
1501 void
1502 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1506 /* Increment the `GC context'. Objects allocated in an outer context
1507 are never freed, eliminating the need to register their roots. */
1509 void
1510 ggc_push_context (void)
1512 ++G.context_depth;
1514 /* Die on wrap. */
1515 if (G.context_depth >= HOST_BITS_PER_LONG)
1516 abort ();
1519 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1520 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1522 static void
1523 ggc_recalculate_in_use_p (page_entry *p)
1525 unsigned int i;
1526 size_t num_objects;
1528 /* Because the past-the-end bit in in_use_p is always set, we
1529 pretend there is one additional object. */
1530 num_objects = OBJECTS_IN_PAGE (p) + 1;
1532 /* Reset the free object count. */
1533 p->num_free_objects = num_objects;
1535 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1536 for (i = 0;
1537 i < CEIL (BITMAP_SIZE (num_objects),
1538 sizeof (*p->in_use_p));
1539 ++i)
1541 unsigned long j;
1543 /* Something is in use if it is marked, or if it was in use in a
1544 context further down the context stack. */
1545 p->in_use_p[i] |= save_in_use_p (p)[i];
1547 /* Decrement the free object count for every object allocated. */
1548 for (j = p->in_use_p[i]; j; j >>= 1)
1549 p->num_free_objects -= (j & 1);
1552 if (p->num_free_objects >= num_objects)
1553 abort ();
1556 /* Decrement the `GC context'. All objects allocated since the
1557 previous ggc_push_context are migrated to the outer context. */
1559 void
1560 ggc_pop_context (void)
1562 unsigned long omask;
1563 unsigned int depth, i, e;
1564 #ifdef ENABLE_CHECKING
1565 unsigned int order;
1566 #endif
1568 depth = --G.context_depth;
1569 omask = (unsigned long)1 << (depth + 1);
1571 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1572 return;
1574 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1575 G.context_depth_allocations &= omask - 1;
1576 G.context_depth_collections &= omask - 1;
1578 /* The G.depth array is shortened so that the last index is the
1579 context_depth of the top element of by_depth. */
1580 if (depth+1 < G.depth_in_use)
1581 e = G.depth[depth+1];
1582 else
1583 e = G.by_depth_in_use;
1585 /* We might not have any PTEs of depth depth. */
1586 if (depth < G.depth_in_use)
1589 /* First we go through all the pages at depth depth to
1590 recalculate the in use bits. */
1591 for (i = G.depth[depth]; i < e; ++i)
1593 page_entry *p;
1595 #ifdef ENABLE_CHECKING
1596 p = G.by_depth[i];
1598 /* Check that all of the pages really are at the depth that
1599 we expect. */
1600 if (p->context_depth != depth)
1601 abort ();
1602 if (p->index_by_depth != i)
1603 abort ();
1604 #endif
1606 prefetch (&save_in_use_p_i (i+8));
1607 prefetch (&save_in_use_p_i (i+16));
1608 if (save_in_use_p_i (i))
1610 p = G.by_depth[i];
1611 ggc_recalculate_in_use_p (p);
1612 free (save_in_use_p_i (i));
1613 save_in_use_p_i (i) = 0;
1618 /* Then, we reset all page_entries with a depth greater than depth
1619 to be at depth. */
1620 for (i = e; i < G.by_depth_in_use; ++i)
1622 page_entry *p = G.by_depth[i];
1624 /* Check that all of the pages really are at the depth we
1625 expect. */
1626 #ifdef ENABLE_CHECKING
1627 if (p->context_depth <= depth)
1628 abort ();
1629 if (p->index_by_depth != i)
1630 abort ();
1631 #endif
1632 p->context_depth = depth;
1635 adjust_depth ();
1637 #ifdef ENABLE_CHECKING
1638 for (order = 2; order < NUM_ORDERS; order++)
1640 page_entry *p;
1642 for (p = G.pages[order]; p != NULL; p = p->next)
1644 if (p->context_depth > depth)
1645 abort ();
1646 else if (p->context_depth == depth && save_in_use_p (p))
1647 abort ();
1650 #endif
1653 /* Unmark all objects. */
1655 static void
1656 clear_marks (void)
1658 unsigned order;
1660 for (order = 2; order < NUM_ORDERS; order++)
1662 page_entry *p;
1664 for (p = G.pages[order]; p != NULL; p = p->next)
1666 size_t num_objects = OBJECTS_IN_PAGE (p);
1667 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1669 #ifdef ENABLE_CHECKING
1670 /* The data should be page-aligned. */
1671 if ((size_t) p->page & (G.pagesize - 1))
1672 abort ();
1673 #endif
1675 /* Pages that aren't in the topmost context are not collected;
1676 nevertheless, we need their in-use bit vectors to store GC
1677 marks. So, back them up first. */
1678 if (p->context_depth < G.context_depth)
1680 if (! save_in_use_p (p))
1681 save_in_use_p (p) = xmalloc (bitmap_size);
1682 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1685 /* Reset reset the number of free objects and clear the
1686 in-use bits. These will be adjusted by mark_obj. */
1687 p->num_free_objects = num_objects;
1688 memset (p->in_use_p, 0, bitmap_size);
1690 /* Make sure the one-past-the-end bit is always set. */
1691 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1692 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1697 /* Free all empty pages. Partially empty pages need no attention
1698 because the `mark' bit doubles as an `unused' bit. */
1700 static void
1701 sweep_pages (void)
1703 unsigned order;
1705 for (order = 2; order < NUM_ORDERS; order++)
1707 /* The last page-entry to consider, regardless of entries
1708 placed at the end of the list. */
1709 page_entry * const last = G.page_tails[order];
1711 size_t num_objects;
1712 size_t live_objects;
1713 page_entry *p, *previous;
1714 int done;
1716 p = G.pages[order];
1717 if (p == NULL)
1718 continue;
1720 previous = NULL;
1723 page_entry *next = p->next;
1725 /* Loop until all entries have been examined. */
1726 done = (p == last);
1728 num_objects = OBJECTS_IN_PAGE (p);
1730 /* Add all live objects on this page to the count of
1731 allocated memory. */
1732 live_objects = num_objects - p->num_free_objects;
1734 G.allocated += OBJECT_SIZE (order) * live_objects;
1736 /* Only objects on pages in the topmost context should get
1737 collected. */
1738 if (p->context_depth < G.context_depth)
1741 /* Remove the page if it's empty. */
1742 else if (live_objects == 0)
1744 if (! previous)
1745 G.pages[order] = next;
1746 else
1747 previous->next = next;
1749 /* Are we removing the last element? */
1750 if (p == G.page_tails[order])
1751 G.page_tails[order] = previous;
1752 free_page (p);
1753 p = previous;
1756 /* If the page is full, move it to the end. */
1757 else if (p->num_free_objects == 0)
1759 /* Don't move it if it's already at the end. */
1760 if (p != G.page_tails[order])
1762 /* Move p to the end of the list. */
1763 p->next = NULL;
1764 G.page_tails[order]->next = p;
1766 /* Update the tail pointer... */
1767 G.page_tails[order] = p;
1769 /* ... and the head pointer, if necessary. */
1770 if (! previous)
1771 G.pages[order] = next;
1772 else
1773 previous->next = next;
1774 p = previous;
1778 /* If we've fallen through to here, it's a page in the
1779 topmost context that is neither full nor empty. Such a
1780 page must precede pages at lesser context depth in the
1781 list, so move it to the head. */
1782 else if (p != G.pages[order])
1784 previous->next = p->next;
1785 p->next = G.pages[order];
1786 G.pages[order] = p;
1787 /* Are we moving the last element? */
1788 if (G.page_tails[order] == p)
1789 G.page_tails[order] = previous;
1790 p = previous;
1793 previous = p;
1794 p = next;
1796 while (! done);
1798 /* Now, restore the in_use_p vectors for any pages from contexts
1799 other than the current one. */
1800 for (p = G.pages[order]; p; p = p->next)
1801 if (p->context_depth != G.context_depth)
1802 ggc_recalculate_in_use_p (p);
1806 #ifdef ENABLE_GC_CHECKING
1807 /* Clobber all free objects. */
1809 static void
1810 poison_pages (void)
1812 unsigned order;
1814 for (order = 2; order < NUM_ORDERS; order++)
1816 size_t size = OBJECT_SIZE (order);
1817 page_entry *p;
1819 for (p = G.pages[order]; p != NULL; p = p->next)
1821 size_t num_objects;
1822 size_t i;
1824 if (p->context_depth != G.context_depth)
1825 /* Since we don't do any collection for pages in pushed
1826 contexts, there's no need to do any poisoning. And
1827 besides, the IN_USE_P array isn't valid until we pop
1828 contexts. */
1829 continue;
1831 num_objects = OBJECTS_IN_PAGE (p);
1832 for (i = 0; i < num_objects; i++)
1834 size_t word, bit;
1835 word = i / HOST_BITS_PER_LONG;
1836 bit = i % HOST_BITS_PER_LONG;
1837 if (((p->in_use_p[word] >> bit) & 1) == 0)
1839 char *object = p->page + i * size;
1841 /* Keep poison-by-write when we expect to use Valgrind,
1842 so the exact same memory semantics is kept, in case
1843 there are memory errors. We override this request
1844 below. */
1845 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1846 memset (object, 0xa5, size);
1848 /* Drop the handle to avoid handle leak. */
1849 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1855 #else
1856 #define poison_pages()
1857 #endif
1859 #ifdef ENABLE_GC_ALWAYS_COLLECT
1860 /* Validate that the reportedly free objects actually are. */
1862 static void
1863 validate_free_objects (void)
1865 struct free_object *f, *next, *still_free = NULL;
1867 for (f = G.free_object_list; f ; f = next)
1869 page_entry *pe = lookup_page_table_entry (f->object);
1870 size_t bit, word;
1872 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1873 word = bit / HOST_BITS_PER_LONG;
1874 bit = bit % HOST_BITS_PER_LONG;
1875 next = f->next;
1877 /* Make certain it isn't visible from any root. Notice that we
1878 do this check before sweep_pages merges save_in_use_p. */
1879 if (pe->in_use_p[word] & (1UL << bit))
1880 abort ();
1882 /* If the object comes from an outer context, then retain the
1883 free_object entry, so that we can verify that the address
1884 isn't live on the stack in some outer context. */
1885 if (pe->context_depth != G.context_depth)
1887 f->next = still_free;
1888 still_free = f;
1890 else
1891 free (f);
1894 G.free_object_list = still_free;
1896 #else
1897 #define validate_free_objects()
1898 #endif
1900 /* Top level mark-and-sweep routine. */
1902 void
1903 ggc_collect (void)
1905 /* Avoid frequent unnecessary work by skipping collection if the
1906 total allocations haven't expanded much since the last
1907 collection. */
1908 float allocated_last_gc =
1909 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1911 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1913 if (G.allocated < allocated_last_gc + min_expand)
1914 return;
1916 timevar_push (TV_GC);
1917 if (!quiet_flag)
1918 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1919 if (GGC_DEBUG_LEVEL >= 2)
1920 fprintf (G.debug_file, "BEGIN COLLECTING\n");
1922 /* Zero the total allocated bytes. This will be recalculated in the
1923 sweep phase. */
1924 G.allocated = 0;
1926 /* Release the pages we freed the last time we collected, but didn't
1927 reuse in the interim. */
1928 release_pages ();
1930 /* Indicate that we've seen collections at this context depth. */
1931 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1933 clear_marks ();
1934 ggc_mark_roots ();
1935 poison_pages ();
1936 validate_free_objects ();
1937 sweep_pages ();
1939 G.allocated_last_gc = G.allocated;
1941 timevar_pop (TV_GC);
1943 if (!quiet_flag)
1944 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1945 if (GGC_DEBUG_LEVEL >= 2)
1946 fprintf (G.debug_file, "END COLLECTING\n");
1949 /* Print allocation statistics. */
1950 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1951 ? (x) \
1952 : ((x) < 1024*1024*10 \
1953 ? (x) / 1024 \
1954 : (x) / (1024*1024))))
1955 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1957 void
1958 ggc_print_statistics (void)
1960 struct ggc_statistics stats;
1961 unsigned int i;
1962 size_t total_overhead = 0;
1964 /* Clear the statistics. */
1965 memset (&stats, 0, sizeof (stats));
1967 /* Make sure collection will really occur. */
1968 G.allocated_last_gc = 0;
1970 /* Collect and print the statistics common across collectors. */
1971 ggc_print_common_statistics (stderr, &stats);
1973 /* Release free pages so that we will not count the bytes allocated
1974 there as part of the total allocated memory. */
1975 release_pages ();
1977 /* Collect some information about the various sizes of
1978 allocation. */
1979 fprintf (stderr,
1980 "Memory still allocated at the end of the compilation process\n");
1981 fprintf (stderr, "%-5s %10s %10s %10s\n",
1982 "Size", "Allocated", "Used", "Overhead");
1983 for (i = 0; i < NUM_ORDERS; ++i)
1985 page_entry *p;
1986 size_t allocated;
1987 size_t in_use;
1988 size_t overhead;
1990 /* Skip empty entries. */
1991 if (!G.pages[i])
1992 continue;
1994 overhead = allocated = in_use = 0;
1996 /* Figure out the total number of bytes allocated for objects of
1997 this size, and how many of them are actually in use. Also figure
1998 out how much memory the page table is using. */
1999 for (p = G.pages[i]; p; p = p->next)
2001 allocated += p->bytes;
2002 in_use +=
2003 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2005 overhead += (sizeof (page_entry) - sizeof (long)
2006 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2008 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2009 (unsigned long) OBJECT_SIZE (i),
2010 SCALE (allocated), LABEL (allocated),
2011 SCALE (in_use), LABEL (in_use),
2012 SCALE (overhead), LABEL (overhead));
2013 total_overhead += overhead;
2015 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2016 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
2017 SCALE (G.allocated), LABEL(G.allocated),
2018 SCALE (total_overhead), LABEL (total_overhead));
2020 #ifdef GATHER_STATISTICS
2022 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2024 fprintf (stderr, "Total Overhead: %10lld\n",
2025 G.stats.total_overhead);
2026 fprintf (stderr, "Total Allocated: %10lld\n",
2027 G.stats.total_allocated);
2029 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2030 G.stats.total_overhead_under32);
2031 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2032 G.stats.total_allocated_under32);
2033 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2034 G.stats.total_overhead_under64);
2035 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2036 G.stats.total_allocated_under64);
2037 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2038 G.stats.total_overhead_under128);
2039 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2040 G.stats.total_allocated_under128);
2042 for (i = 0; i < NUM_ORDERS; i++)
2043 if (G.stats.total_allocated_per_order[i])
2045 fprintf (stderr, "Total Overhead page size %7d: %10lld\n",
2046 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2047 fprintf (stderr, "Total Allocated page size %7d: %10lld\n",
2048 OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2051 #endif
2054 struct ggc_pch_data
2056 struct ggc_pch_ondisk
2058 unsigned totals[NUM_ORDERS];
2059 } d;
2060 size_t base[NUM_ORDERS];
2061 size_t written[NUM_ORDERS];
2064 struct ggc_pch_data *
2065 init_ggc_pch (void)
2067 return xcalloc (sizeof (struct ggc_pch_data), 1);
2070 void
2071 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2072 size_t size, bool is_string ATTRIBUTE_UNUSED)
2074 unsigned order;
2076 if (size <= 256)
2077 order = size_lookup[size];
2078 else
2080 order = 9;
2081 while (size > OBJECT_SIZE (order))
2082 order++;
2085 d->d.totals[order]++;
2088 size_t
2089 ggc_pch_total_size (struct ggc_pch_data *d)
2091 size_t a = 0;
2092 unsigned i;
2094 for (i = 0; i < NUM_ORDERS; i++)
2095 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2096 return a;
2099 void
2100 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2102 size_t a = (size_t) base;
2103 unsigned i;
2105 for (i = 0; i < NUM_ORDERS; i++)
2107 d->base[i] = a;
2108 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2113 char *
2114 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2115 size_t size, bool is_string ATTRIBUTE_UNUSED)
2117 unsigned order;
2118 char *result;
2120 if (size <= 256)
2121 order = size_lookup[size];
2122 else
2124 order = 9;
2125 while (size > OBJECT_SIZE (order))
2126 order++;
2129 result = (char *) d->base[order];
2130 d->base[order] += OBJECT_SIZE (order);
2131 return result;
2134 void
2135 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2136 FILE *f ATTRIBUTE_UNUSED)
2138 /* Nothing to do. */
2141 void
2142 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2143 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2144 size_t size, bool is_string ATTRIBUTE_UNUSED)
2146 unsigned order;
2147 static const char emptyBytes[256];
2149 if (size <= 256)
2150 order = size_lookup[size];
2151 else
2153 order = 9;
2154 while (size > OBJECT_SIZE (order))
2155 order++;
2158 if (fwrite (x, size, 1, f) != 1)
2159 fatal_error ("can't write PCH file: %m");
2161 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2162 object out to OBJECT_SIZE(order). This happens for strings. */
2164 if (size != OBJECT_SIZE (order))
2166 unsigned padding = OBJECT_SIZE(order) - size;
2168 /* To speed small writes, we use a nulled-out array that's larger
2169 than most padding requests as the source for our null bytes. This
2170 permits us to do the padding with fwrite() rather than fseek(), and
2171 limits the chance the the OS may try to flush any outstanding
2172 writes. */
2173 if (padding <= sizeof(emptyBytes))
2175 if (fwrite (emptyBytes, 1, padding, f) != padding)
2176 fatal_error ("can't write PCH file");
2178 else
2180 /* Larger than our buffer? Just default to fseek. */
2181 if (fseek (f, padding, SEEK_CUR) != 0)
2182 fatal_error ("can't write PCH file");
2186 d->written[order]++;
2187 if (d->written[order] == d->d.totals[order]
2188 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2189 G.pagesize),
2190 SEEK_CUR) != 0)
2191 fatal_error ("can't write PCH file: %m");
2194 void
2195 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2197 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2198 fatal_error ("can't write PCH file: %m");
2199 free (d);
2202 /* Move the PCH PTE entries just added to the end of by_depth, to the
2203 front. */
2205 static void
2206 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2208 unsigned i;
2210 /* First, we swap the new entries to the front of the varrays. */
2211 page_entry **new_by_depth;
2212 unsigned long **new_save_in_use;
2214 new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2215 new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2217 memcpy (&new_by_depth[0],
2218 &G.by_depth[count_old_page_tables],
2219 count_new_page_tables * sizeof (void *));
2220 memcpy (&new_by_depth[count_new_page_tables],
2221 &G.by_depth[0],
2222 count_old_page_tables * sizeof (void *));
2223 memcpy (&new_save_in_use[0],
2224 &G.save_in_use[count_old_page_tables],
2225 count_new_page_tables * sizeof (void *));
2226 memcpy (&new_save_in_use[count_new_page_tables],
2227 &G.save_in_use[0],
2228 count_old_page_tables * sizeof (void *));
2230 free (G.by_depth);
2231 free (G.save_in_use);
2233 G.by_depth = new_by_depth;
2234 G.save_in_use = new_save_in_use;
2236 /* Now update all the index_by_depth fields. */
2237 for (i = G.by_depth_in_use; i > 0; --i)
2239 page_entry *p = G.by_depth[i-1];
2240 p->index_by_depth = i-1;
2243 /* And last, we update the depth pointers in G.depth. The first
2244 entry is already 0, and context 0 entries always start at index
2245 0, so there is nothing to update in the first slot. We need a
2246 second slot, only if we have old ptes, and if we do, they start
2247 at index count_new_page_tables. */
2248 if (count_old_page_tables)
2249 push_depth (count_new_page_tables);
2252 void
2253 ggc_pch_read (FILE *f, void *addr)
2255 struct ggc_pch_ondisk d;
2256 unsigned i;
2257 char *offs = addr;
2258 unsigned long count_old_page_tables;
2259 unsigned long count_new_page_tables;
2261 count_old_page_tables = G.by_depth_in_use;
2263 /* We've just read in a PCH file. So, every object that used to be
2264 allocated is now free. */
2265 clear_marks ();
2266 #ifdef ENABLE_GC_CHECKING
2267 poison_pages ();
2268 #endif
2270 /* No object read from a PCH file should ever be freed. So, set the
2271 context depth to 1, and set the depth of all the currently-allocated
2272 pages to be 1 too. PCH pages will have depth 0. */
2273 if (G.context_depth != 0)
2274 abort ();
2275 G.context_depth = 1;
2276 for (i = 0; i < NUM_ORDERS; i++)
2278 page_entry *p;
2279 for (p = G.pages[i]; p != NULL; p = p->next)
2280 p->context_depth = G.context_depth;
2283 /* Allocate the appropriate page-table entries for the pages read from
2284 the PCH file. */
2285 if (fread (&d, sizeof (d), 1, f) != 1)
2286 fatal_error ("can't read PCH file: %m");
2288 for (i = 0; i < NUM_ORDERS; i++)
2290 struct page_entry *entry;
2291 char *pte;
2292 size_t bytes;
2293 size_t num_objs;
2294 size_t j;
2296 if (d.totals[i] == 0)
2297 continue;
2299 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2300 num_objs = bytes / OBJECT_SIZE (i);
2301 entry = xcalloc (1, (sizeof (struct page_entry)
2302 - sizeof (long)
2303 + BITMAP_SIZE (num_objs + 1)));
2304 entry->bytes = bytes;
2305 entry->page = offs;
2306 entry->context_depth = 0;
2307 offs += bytes;
2308 entry->num_free_objects = 0;
2309 entry->order = i;
2311 for (j = 0;
2312 j + HOST_BITS_PER_LONG <= num_objs + 1;
2313 j += HOST_BITS_PER_LONG)
2314 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2315 for (; j < num_objs + 1; j++)
2316 entry->in_use_p[j / HOST_BITS_PER_LONG]
2317 |= 1L << (j % HOST_BITS_PER_LONG);
2319 for (pte = entry->page;
2320 pte < entry->page + entry->bytes;
2321 pte += G.pagesize)
2322 set_page_table_entry (pte, entry);
2324 if (G.page_tails[i] != NULL)
2325 G.page_tails[i]->next = entry;
2326 else
2327 G.pages[i] = entry;
2328 G.page_tails[i] = entry;
2330 /* We start off by just adding all the new information to the
2331 end of the varrays, later, we will move the new information
2332 to the front of the varrays, as the PCH page tables are at
2333 context 0. */
2334 push_by_depth (entry, 0);
2337 /* Now, we update the various data structures that speed page table
2338 handling. */
2339 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2341 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2343 /* Update the statistics. */
2344 G.allocated = G.allocated_last_gc = offs - (char *)addr;