1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
38 #ifdef ENABLE_VALGRIND_CHECKING
39 # ifdef HAVE_VALGRIND_MEMCHECK_H
40 # include <valgrind/memcheck.h>
41 # elif defined HAVE_MEMCHECK_H
42 # include <memcheck.h>
44 # include <valgrind.h>
47 /* Avoid #ifdef:s when we can help it. */
48 #define VALGRIND_DISCARD(x)
49 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
50 #define VALGRIND_FREELIKE_BLOCK(x,y)
52 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
53 file open. Prefer either to valloc. */
55 # undef HAVE_MMAP_DEV_ZERO
57 # include <sys/mman.h>
59 # define MAP_FAILED -1
61 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
62 # define MAP_ANONYMOUS MAP_ANON
68 #ifdef HAVE_MMAP_DEV_ZERO
70 # include <sys/mman.h>
72 # define MAP_FAILED -1
79 #define USING_MALLOC_PAGE_GROUPS
82 #if (GCC_VERSION < 3001)
83 #define prefetch(X) ((void) X)
85 #define prefetch(X) __builtin_prefetch (X)
89 If we track inter-zone pointers, we can mark single zones at a
91 If we have a zone where we guarantee no inter-zone pointers, we
92 could mark that zone seperately.
93 The garbage zone should not be marked, and we should return 1 in
94 ggc_set_mark for any object in the garbage zone, which cuts off
98 This garbage-collecting allocator segregates objects into zones.
99 It also segregates objects into "large" and "small" bins. Large
100 objects are greater or equal to page size.
102 Pages for small objects are broken up into chunks, each of which
103 are described by a struct alloc_chunk. One can walk over all
104 chunks on the page by adding the chunk size to the chunk's data
105 address. The free space for a page exists in the free chunk bins.
107 Each page-entry also has a context depth, which is used to track
108 pushing and popping of allocation contexts. Only objects allocated
109 in the current (highest-numbered) context may be collected.
111 Empty pages (of all sizes) are kept on a single page cache list,
112 and are considered first when new pages are required; they are
113 deallocated at the start of the next collection if they haven't
114 been recycled by then. */
116 /* Define GGC_DEBUG_LEVEL to print debugging information.
117 0: No debugging output.
118 1: GC statistics only.
119 2: Page-entry allocations/deallocations as well.
120 3: Object allocations as well.
121 4: Object marks as well. */
122 #define GGC_DEBUG_LEVEL (0)
124 #ifndef HOST_BITS_PER_PTR
125 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
127 #ifdef COOKIE_CHECKING
128 #define CHUNK_MAGIC 0x95321123
129 #define DEADCHUNK_MAGIC 0x12817317
132 /* This structure manages small chunks. When the chunk is free, it's
133 linked with other chunks via free_next. When the chunk is allocated,
134 the data starts at u. Large chunks are allocated one at a time to
135 their own page, and so don't come in here.
137 The "type" field is a placeholder for a future change to do
138 generational collection. At present it is 0 when free and
139 and 1 when allocated. */
142 #ifdef COOKIE_CHECKING
146 unsigned int typecode
:15;
147 unsigned int size
:15;
150 struct alloc_chunk
*next_free
;
153 /* Make sure the data is sufficiently aligned. */
154 HOST_WIDEST_INT align_i
;
155 #ifdef HAVE_LONG_DOUBLE
161 } __attribute__ ((packed
));
163 #define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
165 /* We maintain several bins of free lists for chunks for very small
166 objects. We never exhaustively search other bins -- if we don't
167 find one of the proper size, we allocate from the "larger" bin. */
169 /* Decreasing the number of free bins increases the time it takes to allocate.
170 Similar with increasing max_free_bin_size without increasing num_free_bins.
172 After much histogramming of allocation sizes and time spent on gc,
173 on a powerpc G4 7450 - 667 mhz, and an pentium 4 - 2.8ghz,
174 these were determined to be the optimal values. */
175 #define NUM_FREE_BINS 64
176 #define MAX_FREE_BIN_SIZE 256
177 #define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
178 #define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
179 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
181 /* Marker used as chunk->size for a large object. Should correspond
182 to the size of the bitfield above. */
183 #define LARGE_OBJECT_SIZE 0x7fff
185 /* We use this structure to determine the alignment required for
186 allocations. For power-of-two sized allocations, that's not a
187 problem, but it does matter for odd-sized allocations. */
189 struct max_alignment
{
193 #ifdef HAVE_LONG_DOUBLE
201 /* The biggest alignment required. */
203 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
205 /* Compute the smallest nonnegative number which when added to X gives
208 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
210 /* Compute the smallest multiple of F that is >= X. */
212 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
214 /* A two-level tree is used to look up the page-entry for a given
215 pointer. Two chunks of the pointer's bits are extracted to index
216 the first and second levels of the tree, as follows:
220 msb +----------------+----+------+------+ lsb
226 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
227 pages are aligned on system page boundaries. The next most
228 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
229 index values in the lookup table, respectively.
231 For 32-bit architectures and the settings below, there are no
232 leftover bits. For architectures with wider pointers, the lookup
233 tree points to a list of pages, which must be scanned to find the
236 #define PAGE_L1_BITS (8)
237 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
238 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
239 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
241 #define LOOKUP_L1(p) \
242 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
244 #define LOOKUP_L2(p) \
245 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
248 /* A page_entry records the status of an allocation page. */
249 typedef struct page_entry
251 /* The next page-entry with objects of the same size, or NULL if
252 this is the last page-entry. */
253 struct page_entry
*next
;
255 /* The number of bytes allocated. (This will always be a multiple
256 of the host system page size.) */
259 /* How many collections we've survived. */
262 /* The address at which the memory is allocated. */
265 #ifdef USING_MALLOC_PAGE_GROUPS
266 /* Back pointer to the page group this page came from. */
267 struct page_group
*group
;
270 /* Number of bytes on the page unallocated. Only used during
271 collection, and even then large pages merely set this nonzero. */
274 /* Context depth of this page. */
275 unsigned short context_depth
;
277 /* Does this page contain small objects, or one large object? */
280 /* The zone that this page entry belongs to. */
281 struct alloc_zone
*zone
;
284 #ifdef USING_MALLOC_PAGE_GROUPS
285 /* A page_group describes a large allocation from malloc, from which
286 we parcel out aligned pages. */
287 typedef struct page_group
289 /* A linked list of all extant page groups. */
290 struct page_group
*next
;
292 /* The address we received from malloc. */
295 /* The size of the block. */
298 /* A bitmask of pages in use. */
303 #if HOST_BITS_PER_PTR <= 32
305 /* On 32-bit hosts, we use a two level page table, as pictured above. */
306 typedef page_entry
**page_table
[PAGE_L1_SIZE
];
310 /* On 64-bit hosts, we use the same two level page tables plus a linked
311 list that disambiguates the top 32-bits. There will almost always be
312 exactly one entry in the list. */
313 typedef struct page_table_chain
315 struct page_table_chain
*next
;
317 page_entry
**table
[PAGE_L1_SIZE
];
322 /* The global variables. */
323 static struct globals
325 /* The page lookup table. A single page can only belong to one
326 zone. This means free pages are zone-specific ATM. */
328 /* The linked list of zones. */
329 struct alloc_zone
*zones
;
331 /* The system's page size. */
335 /* A file descriptor open to /dev/zero for reading. */
336 #if defined (HAVE_MMAP_DEV_ZERO)
340 /* The file descriptor for debugging output. */
344 /* The zone allocation structure. */
347 /* Name of the zone. */
350 /* Linked list of pages in a zone. */
353 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
354 FREE_BIN_DELTA. All other chunks are in slot 0. */
355 struct alloc_chunk
*free_chunks
[NUM_FREE_BINS
+ 1];
357 /* Bytes currently allocated. */
360 /* Bytes currently allocated at the end of the last collection. */
361 size_t allocated_last_gc
;
363 /* Total amount of memory mapped. */
366 /* Bit N set if any allocations have been done at context depth N. */
367 unsigned long context_depth_allocations
;
369 /* Bit N set if any collections have been done at context depth N. */
370 unsigned long context_depth_collections
;
372 /* The current depth in the context stack. */
373 unsigned short context_depth
;
375 /* A cache of free system pages. */
376 page_entry
*free_pages
;
378 #ifdef USING_MALLOC_PAGE_GROUPS
379 page_group
*page_groups
;
382 /* Next zone in the linked list of zones. */
383 struct alloc_zone
*next_zone
;
385 /* Return true if this zone was collected during this collection. */
389 struct alloc_zone
*rtl_zone
;
390 struct alloc_zone
*garbage_zone
;
391 struct alloc_zone
*tree_zone
;
393 /* Allocate pages in chunks of this size, to throttle calls to memory
394 allocation routines. The first page is used, the rest go onto the
395 free list. This cannot be larger than HOST_BITS_PER_INT for the
396 in_use bitmask for page_group. */
397 #define GGC_QUIRE_SIZE 16
399 static int ggc_allocated_p (const void *);
400 static page_entry
*lookup_page_table_entry (const void *);
401 static void set_page_table_entry (void *, page_entry
*);
403 static char *alloc_anon (char *, size_t, struct alloc_zone
*);
405 #ifdef USING_MALLOC_PAGE_GROUPS
406 static size_t page_group_index (char *, char *);
407 static void set_page_group_in_use (page_group
*, char *);
408 static void clear_page_group_in_use (page_group
*, char *);
410 static struct page_entry
* alloc_small_page ( struct alloc_zone
*);
411 static struct page_entry
* alloc_large_page (size_t, struct alloc_zone
*);
412 static void free_chunk (struct alloc_chunk
*, size_t, struct alloc_zone
*);
413 static void free_page (struct page_entry
*);
414 static void release_pages (struct alloc_zone
*);
415 static void sweep_pages (struct alloc_zone
*);
416 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone
*, short);
417 static bool ggc_collect_1 (struct alloc_zone
*, bool);
418 static void check_cookies (void);
421 /* Returns nonzero if P was allocated in GC'able memory. */
424 ggc_allocated_p (const void *p
)
429 #if HOST_BITS_PER_PTR <= 32
432 page_table table
= G
.lookup
;
433 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
438 if (table
->high_bits
== high_bits
)
442 base
= &table
->table
[0];
445 /* Extract the level 1 and 2 indices. */
449 return base
[L1
] && base
[L1
][L2
];
452 /* Traverse the page table and find the entry for a page.
453 Die (probably) if the object wasn't allocated via GC. */
455 static inline page_entry
*
456 lookup_page_table_entry(const void *p
)
461 #if HOST_BITS_PER_PTR <= 32
464 page_table table
= G
.lookup
;
465 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
466 while (table
->high_bits
!= high_bits
)
468 base
= &table
->table
[0];
471 /* Extract the level 1 and 2 indices. */
479 /* Set the page table entry for a page. */
482 set_page_table_entry(void *p
, page_entry
*entry
)
487 #if HOST_BITS_PER_PTR <= 32
491 size_t high_bits
= (size_t) p
& ~ (size_t) 0xffffffff;
492 for (table
= G
.lookup
; table
; table
= table
->next
)
493 if (table
->high_bits
== high_bits
)
496 /* Not found -- allocate a new table. */
497 table
= (page_table
) xcalloc (1, sizeof(*table
));
498 table
->next
= G
.lookup
;
499 table
->high_bits
= high_bits
;
502 base
= &table
->table
[0];
505 /* Extract the level 1 and 2 indices. */
509 if (base
[L1
] == NULL
)
510 base
[L1
] = (page_entry
**) xcalloc (PAGE_L2_SIZE
, sizeof (page_entry
*));
512 base
[L1
][L2
] = entry
;
516 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
517 (if non-null). The ifdef structure here is intended to cause a
518 compile error unless exactly one of the HAVE_* is defined. */
521 alloc_anon (char *pref ATTRIBUTE_UNUSED
, size_t size
, struct alloc_zone
*zone
)
523 #ifdef HAVE_MMAP_ANON
524 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
525 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
527 #ifdef HAVE_MMAP_DEV_ZERO
528 char *page
= (char *) mmap (pref
, size
, PROT_READ
| PROT_WRITE
,
529 MAP_PRIVATE
, G
.dev_zero_fd
, 0);
531 VALGRIND_MALLOCLIKE_BLOCK(page
, size
, 0, 0);
533 if (page
== (char *) MAP_FAILED
)
535 perror ("virtual memory exhausted");
536 exit (FATAL_EXIT_CODE
);
539 /* Remember that we allocated this memory. */
540 zone
->bytes_mapped
+= size
;
541 /* Pretend we don't have access to the allocated pages. We'll enable
542 access to smaller pieces of the area in ggc_alloc. Discard the
543 handle to avoid handle leak. */
544 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page
, size
));
548 #ifdef USING_MALLOC_PAGE_GROUPS
549 /* Compute the index for this page into the page group. */
552 page_group_index (char *allocation
, char *page
)
554 return (size_t) (page
- allocation
) >> G
.lg_pagesize
;
557 /* Set and clear the in_use bit for this page in the page group. */
560 set_page_group_in_use (page_group
*group
, char *page
)
562 group
->in_use
|= 1 << page_group_index (group
->allocation
, page
);
566 clear_page_group_in_use (page_group
*group
, char *page
)
568 group
->in_use
&= ~(1 << page_group_index (group
->allocation
, page
));
572 /* Allocate a new page for allocating objects of size 2^ORDER,
573 and return an entry for it. The entry is not added to the
574 appropriate page_table list. */
576 static inline struct page_entry
*
577 alloc_small_page (struct alloc_zone
*zone
)
579 struct page_entry
*entry
;
581 #ifdef USING_MALLOC_PAGE_GROUPS
587 /* Check the list of free pages for one we can use. */
588 entry
= zone
->free_pages
;
591 /* Recycle the allocated memory from this page ... */
592 zone
->free_pages
= entry
->next
;
595 #ifdef USING_MALLOC_PAGE_GROUPS
596 group
= entry
->group
;
602 /* We want just one page. Allocate a bunch of them and put the
603 extras on the freelist. (Can only do this optimization with
604 mmap for backing store.) */
605 struct page_entry
*e
, *f
= zone
->free_pages
;
608 page
= alloc_anon (NULL
, G
.pagesize
* GGC_QUIRE_SIZE
, zone
);
610 /* This loop counts down so that the chain will be in ascending
612 for (i
= GGC_QUIRE_SIZE
- 1; i
>= 1; i
--)
614 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
615 e
->bytes
= G
.pagesize
;
616 e
->page
= page
+ (i
<< G
.lg_pagesize
);
621 zone
->free_pages
= f
;
624 #ifdef USING_MALLOC_PAGE_GROUPS
627 /* Allocate a large block of memory and serve out the aligned
628 pages therein. This results in much less memory wastage
629 than the traditional implementation of valloc. */
631 char *allocation
, *a
, *enda
;
632 size_t alloc_size
, head_slop
, tail_slop
;
633 int multiple_pages
= (entry_size
== G
.pagesize
);
636 alloc_size
= GGC_QUIRE_SIZE
* G
.pagesize
;
638 alloc_size
= entry_size
+ G
.pagesize
- 1;
639 allocation
= xmalloc (alloc_size
);
640 VALGRIND_MALLOCLIKE_BLOCK(addr
, alloc_size
, 0, 0);
642 page
= (char *) (((size_t) allocation
+ G
.pagesize
- 1) & -G
.pagesize
);
643 head_slop
= page
- allocation
;
645 tail_slop
= ((size_t) allocation
+ alloc_size
) & (G
.pagesize
- 1);
647 tail_slop
= alloc_size
- entry_size
- head_slop
;
648 enda
= allocation
+ alloc_size
- tail_slop
;
650 /* We allocated N pages, which are likely not aligned, leaving
651 us with N-1 usable pages. We plan to place the page_group
652 structure somewhere in the slop. */
653 if (head_slop
>= sizeof (page_group
))
654 group
= (page_group
*)page
- 1;
657 /* We magically got an aligned allocation. Too bad, we have
658 to waste a page anyway. */
662 tail_slop
+= G
.pagesize
;
664 if (tail_slop
< sizeof (page_group
))
666 group
= (page_group
*)enda
;
667 tail_slop
-= sizeof (page_group
);
670 /* Remember that we allocated this memory. */
671 group
->next
= G
.page_groups
;
672 group
->allocation
= allocation
;
673 group
->alloc_size
= alloc_size
;
675 zone
->page_groups
= group
;
676 G
.bytes_mapped
+= alloc_size
;
678 /* If we allocated multiple pages, put the rest on the free list. */
681 struct page_entry
*e
, *f
= G
.free_pages
;
682 for (a
= enda
- G
.pagesize
; a
!= page
; a
-= G
.pagesize
)
684 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
685 e
->bytes
= G
.pagesize
;
691 zone
->free_pages
= f
;
697 entry
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
700 entry
->bytes
= G
.pagesize
;
701 entry
->bytes_free
= G
.pagesize
;
703 entry
->context_depth
= zone
->context_depth
;
704 entry
->large_p
= false;
706 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
708 #ifdef USING_MALLOC_PAGE_GROUPS
709 entry
->group
= group
;
710 set_page_group_in_use (group
, page
);
713 set_page_table_entry (page
, entry
);
715 if (GGC_DEBUG_LEVEL
>= 2)
716 fprintf (G
.debug_file
,
717 "Allocating %s page at %p, data %p-%p\n", entry
->zone
->name
,
718 (PTR
) entry
, page
, page
+ G
.pagesize
- 1);
723 /* Allocate a large page of size SIZE in ZONE. */
725 static inline struct page_entry
*
726 alloc_large_page (size_t size
, struct alloc_zone
*zone
)
728 struct page_entry
*entry
;
731 page
= (char *) xmalloc (size
+ CHUNK_OVERHEAD
+ sizeof (struct page_entry
));
732 entry
= (struct page_entry
*) (page
+ size
+ CHUNK_OVERHEAD
);
736 entry
->bytes_free
= LARGE_OBJECT_SIZE
+ CHUNK_OVERHEAD
;
738 entry
->context_depth
= zone
->context_depth
;
739 entry
->large_p
= true;
741 zone
->context_depth_allocations
|= (unsigned long)1 << zone
->context_depth
;
743 #ifdef USING_MALLOC_PAGE_GROUPS
746 set_page_table_entry (page
, entry
);
748 if (GGC_DEBUG_LEVEL
>= 2)
749 fprintf (G
.debug_file
,
750 "Allocating %s large page at %p, data %p-%p\n", entry
->zone
->name
,
751 (PTR
) entry
, page
, page
+ size
- 1);
757 /* For a page that is no longer needed, put it on the free page list. */
760 free_page (page_entry
*entry
)
762 if (GGC_DEBUG_LEVEL
>= 2)
763 fprintf (G
.debug_file
,
764 "Deallocating %s page at %p, data %p-%p\n", entry
->zone
->name
, (PTR
) entry
,
765 entry
->page
, entry
->page
+ entry
->bytes
- 1);
767 set_page_table_entry (entry
->page
, NULL
);
772 VALGRIND_FREELIKE_BLOCK (entry
->page
, entry
->bytes
);
776 /* Mark the page as inaccessible. Discard the handle to
777 avoid handle leak. */
778 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry
->page
, entry
->bytes
));
780 #ifdef USING_MALLOC_PAGE_GROUPS
781 clear_page_group_in_use (entry
->group
, entry
->page
);
784 entry
->next
= entry
->zone
->free_pages
;
785 entry
->zone
->free_pages
= entry
;
789 /* Release the free page cache to the system. */
792 release_pages (struct alloc_zone
*zone
)
795 page_entry
*p
, *next
;
799 /* Gather up adjacent pages so they are unmapped together. */
800 p
= zone
->free_pages
;
810 while (p
&& p
->page
== start
+ len
)
819 zone
->bytes_mapped
-= len
;
822 zone
->free_pages
= NULL
;
824 #ifdef USING_MALLOC_PAGE_GROUPS
828 /* Remove all pages from free page groups from the list. */
829 pp
= &(zone
->free_pages
);
830 while ((p
= *pp
) != NULL
)
831 if (p
->group
->in_use
== 0)
839 /* Remove all free page groups, and release the storage. */
840 gp
= &(zone
->page_groups
);
841 while ((g
= *gp
) != NULL
)
845 zone
->bytes_mapped
-= g
->alloc_size
;
846 free (g
->allocation
);
847 VALGRIND_FREELIKE_BLOCK(g
->allocation
, 0);
854 /* Place CHUNK of size SIZE on the free list for ZONE. */
857 free_chunk (struct alloc_chunk
*chunk
, size_t size
, struct alloc_zone
*zone
)
861 bin
= SIZE_BIN_DOWN (size
);
864 if (bin
> NUM_FREE_BINS
)
866 #ifdef COOKIE_CHECKING
867 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
869 chunk
->magic
= DEADCHUNK_MAGIC
;
871 chunk
->u
.next_free
= zone
->free_chunks
[bin
];
872 zone
->free_chunks
[bin
] = chunk
;
873 if (GGC_DEBUG_LEVEL
>= 3)
874 fprintf (G
.debug_file
, "Deallocating object, chunk=%p\n", (void *)chunk
);
875 VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk
, sizeof (struct alloc_chunk
)));
878 /* Allocate a chunk of memory of SIZE bytes. */
881 ggc_alloc_zone_1 (size_t size
, struct alloc_zone
*zone
, short type
)
885 struct page_entry
*entry
;
886 struct alloc_chunk
*chunk
, *lchunk
, **pp
;
889 /* Align size, so that we're assured of aligned allocations. */
890 if (size
< FREE_BIN_DELTA
)
891 size
= FREE_BIN_DELTA
;
892 size
= (size
+ MAX_ALIGNMENT
- 1) & -MAX_ALIGNMENT
;
894 /* Large objects are handled specially. */
895 if (size
>= G
.pagesize
- 2*CHUNK_OVERHEAD
- FREE_BIN_DELTA
)
897 entry
= alloc_large_page (size
, zone
);
899 entry
->next
= entry
->zone
->pages
;
900 entry
->zone
->pages
= entry
;
903 chunk
= (struct alloc_chunk
*) entry
->page
;
904 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
905 chunk
->size
= LARGE_OBJECT_SIZE
;
910 /* First look for a tiny object already segregated into its own
912 bin
= SIZE_BIN_UP (size
);
913 if (bin
<= NUM_FREE_BINS
)
915 chunk
= zone
->free_chunks
[bin
];
918 zone
->free_chunks
[bin
] = chunk
->u
.next_free
;
919 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
924 /* Failing that, look through the "other" bucket for a chunk
925 that is large enough. */
926 pp
= &(zone
->free_chunks
[0]);
928 while (chunk
&& chunk
->size
< size
)
930 pp
= &chunk
->u
.next_free
;
934 /* Failing that, allocate new storage. */
937 entry
= alloc_small_page (zone
);
938 entry
->next
= entry
->zone
->pages
;
939 entry
->zone
->pages
= entry
;
941 chunk
= (struct alloc_chunk
*) entry
->page
;
942 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
943 chunk
->size
= G
.pagesize
- CHUNK_OVERHEAD
;
947 *pp
= chunk
->u
.next_free
;
948 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
950 /* Release extra memory from a chunk that's too big. */
951 lsize
= chunk
->size
- size
;
952 if (lsize
>= CHUNK_OVERHEAD
+ FREE_BIN_DELTA
)
954 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk
, sizeof (struct alloc_chunk
)));
957 lsize
-= CHUNK_OVERHEAD
;
958 lchunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ size
);
959 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk
, sizeof (struct alloc_chunk
)));
960 #ifdef COOKIE_CHECKING
961 lchunk
->magic
= CHUNK_MAGIC
;
965 lchunk
->size
= lsize
;
966 free_chunk (lchunk
, lsize
, zone
);
968 /* Calculate the object's address. */
970 #ifdef COOKIE_CHECKING
971 chunk
->magic
= CHUNK_MAGIC
;
975 chunk
->typecode
= type
;
976 result
= chunk
->u
.data
;
978 #ifdef ENABLE_GC_CHECKING
979 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
980 exact same semantics in presence of memory bugs, regardless of
981 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
982 handle to avoid handle leak. */
983 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
985 /* `Poison' the entire allocated object. */
986 memset (result
, 0xaf, size
);
989 /* Tell Valgrind that the memory is there, but its content isn't
990 defined. The bytes at the end of the object are still marked
992 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result
, size
));
994 /* Keep track of how many bytes are being allocated. This
995 information is used in deciding when to collect. */
996 zone
->allocated
+= size
+ CHUNK_OVERHEAD
;
998 if (GGC_DEBUG_LEVEL
>= 3)
999 fprintf (G
.debug_file
, "Allocating object, chunk=%p size=%lu at %p\n",
1000 (void *)chunk
, (unsigned long) size
, result
);
1005 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1009 ggc_alloc_typed (enum gt_types_enum gte
, size_t size
)
1013 case gt_ggc_e_14lang_tree_node
:
1014 return ggc_alloc_zone_1 (size
, tree_zone
, gte
);
1016 case gt_ggc_e_7rtx_def
:
1017 return ggc_alloc_zone_1 (size
, rtl_zone
, gte
);
1019 case gt_ggc_e_9rtvec_def
:
1020 return ggc_alloc_zone_1 (size
, rtl_zone
, gte
);
1023 return ggc_alloc_zone_1 (size
, &main_zone
, gte
);
1027 /* Normal ggc_alloc simply allocates into the main zone. */
1030 ggc_alloc (size_t size
)
1032 return ggc_alloc_zone_1 (size
, &main_zone
, -1);
1035 /* Zone allocation allocates into the specified zone. */
1038 ggc_alloc_zone (size_t size
, struct alloc_zone
*zone
)
1040 return ggc_alloc_zone_1 (size
, zone
, -1);
1043 /* If P is not marked, mark it and return false. Otherwise return true.
1044 P must have been allocated by the GC allocator; it mustn't point to
1045 static objects, stack variables, or memory allocated with malloc. */
1048 ggc_set_mark (const void *p
)
1051 struct alloc_chunk
*chunk
;
1053 #ifdef ENABLE_CHECKING
1054 /* Look up the page on which the object is alloced. If the object
1055 wasn't allocated by the collector, we'll probably die. */
1056 entry
= lookup_page_table_entry (p
);
1060 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
1061 #ifdef COOKIE_CHECKING
1062 if (chunk
->magic
!= CHUNK_MAGIC
)
1069 #ifndef ENABLE_CHECKING
1070 entry
= lookup_page_table_entry (p
);
1073 /* Large pages are either completely full or completely empty. So if
1074 they are marked, they are completely full. */
1076 entry
->bytes_free
= 0;
1078 entry
->bytes_free
-= chunk
->size
+ CHUNK_OVERHEAD
;
1080 if (GGC_DEBUG_LEVEL
>= 4)
1081 fprintf (G
.debug_file
, "Marking %p\n", p
);
1086 /* Return 1 if P has been marked, zero otherwise.
1087 P must have been allocated by the GC allocator; it mustn't point to
1088 static objects, stack variables, or memory allocated with malloc. */
1091 ggc_marked_p (const void *p
)
1093 struct alloc_chunk
*chunk
;
1095 #ifdef ENABLE_CHECKING
1097 page_entry
*entry
= lookup_page_table_entry (p
);
1103 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
1104 #ifdef COOKIE_CHECKING
1105 if (chunk
->magic
!= CHUNK_MAGIC
)
1111 /* Return the size of the gc-able object P. */
1114 ggc_get_size (const void *p
)
1116 struct alloc_chunk
*chunk
;
1117 struct page_entry
*entry
;
1119 #ifdef ENABLE_CHECKING
1120 entry
= lookup_page_table_entry (p
);
1125 chunk
= (struct alloc_chunk
*) ((char *)p
- CHUNK_OVERHEAD
);
1126 #ifdef COOKIE_CHECKING
1127 if (chunk
->magic
!= CHUNK_MAGIC
)
1130 if (chunk
->size
== LARGE_OBJECT_SIZE
)
1132 #ifndef ENABLE_CHECKING
1133 entry
= lookup_page_table_entry (p
);
1135 return entry
->bytes
;
1141 /* Initialize the ggc-zone-mmap allocator. */
1145 /* Set up the main zone by hand. */
1146 main_zone
.name
= "Main zone";
1147 G
.zones
= &main_zone
;
1149 /* Allocate the default zones. */
1150 rtl_zone
= new_ggc_zone ("RTL zone");
1151 tree_zone
= new_ggc_zone ("Tree zone");
1152 garbage_zone
= new_ggc_zone ("Garbage zone");
1154 G
.pagesize
= getpagesize();
1155 G
.lg_pagesize
= exact_log2 (G
.pagesize
);
1156 #ifdef HAVE_MMAP_DEV_ZERO
1157 G
.dev_zero_fd
= open ("/dev/zero", O_RDONLY
);
1158 if (G
.dev_zero_fd
== -1)
1163 G
.debug_file
= fopen ("ggc-mmap.debug", "w");
1164 setlinebuf (G
.debug_file
);
1166 G
.debug_file
= stdout
;
1170 /* StunOS has an amazing off-by-one error for the first mmap allocation
1171 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1172 believe, is an unaligned page allocation, which would cause us to
1173 hork badly if we tried to use it. */
1175 char *p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1176 struct page_entry
*e
;
1177 if ((size_t)p
& (G
.pagesize
- 1))
1179 /* How losing. Discard this one and try another. If we still
1180 can't get something useful, give up. */
1182 p
= alloc_anon (NULL
, G
.pagesize
, &main_zone
);
1183 if ((size_t)p
& (G
.pagesize
- 1))
1187 /* We have a good page, might as well hold onto it... */
1188 e
= (struct page_entry
*) xmalloc (sizeof (struct page_entry
));
1189 e
->bytes
= G
.pagesize
;
1191 e
->next
= main_zone
.free_pages
;
1192 main_zone
.free_pages
= e
;
1197 /* Start a new GGC zone. */
1200 new_ggc_zone (const char * name
)
1202 struct alloc_zone
*new_zone
= xcalloc (1, sizeof (struct alloc_zone
));
1203 new_zone
->name
= name
;
1204 new_zone
->next_zone
= G
.zones
->next_zone
;
1205 G
.zones
->next_zone
= new_zone
;
1209 /* Destroy a GGC zone. */
1211 destroy_ggc_zone (struct alloc_zone
* dead_zone
)
1213 struct alloc_zone
*z
;
1215 for (z
= G
.zones
; z
&& z
->next_zone
!= dead_zone
; z
= z
->next_zone
)
1216 /* Just find that zone. */ ;
1218 /* We should have found the zone in the list. Anything else is
1220 If we did find the zone, we expect this zone to be empty.
1221 A ggc_collect should have emptied it before we can destroy it. */
1222 if (!z
|| dead_zone
->allocated
!= 0)
1225 /* Unchain the dead zone, release all its pages and free it. */
1226 z
->next_zone
= z
->next_zone
->next_zone
;
1227 release_pages (dead_zone
);
1231 /* Increment the `GC context'. Objects allocated in an outer context
1232 are never freed, eliminating the need to register their roots. */
1235 ggc_push_context (void)
1237 struct alloc_zone
*zone
;
1238 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1239 ++(zone
->context_depth
);
1241 if (main_zone
.context_depth
>= HOST_BITS_PER_LONG
)
1245 /* Decrement the `GC context'. All objects allocated since the
1246 previous ggc_push_context are migrated to the outer context. */
1249 ggc_pop_context_1 (struct alloc_zone
*zone
)
1251 unsigned long omask
;
1255 depth
= --(zone
->context_depth
);
1256 omask
= (unsigned long)1 << (depth
+ 1);
1258 if (!((zone
->context_depth_allocations
| zone
->context_depth_collections
) & omask
))
1261 zone
->context_depth_allocations
|= (zone
->context_depth_allocations
& omask
) >> 1;
1262 zone
->context_depth_allocations
&= omask
- 1;
1263 zone
->context_depth_collections
&= omask
- 1;
1265 /* Any remaining pages in the popped context are lowered to the new
1266 current context; i.e. objects allocated in the popped context and
1267 left over are imported into the previous context. */
1268 for (p
= zone
->pages
; p
!= NULL
; p
= p
->next
)
1269 if (p
->context_depth
> depth
)
1270 p
->context_depth
= depth
;
1273 /* Pop all the zone contexts. */
1276 ggc_pop_context (void)
1278 struct alloc_zone
*zone
;
1279 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1280 ggc_pop_context_1 (zone
);
1284 /* Poison the chunk. */
1285 #ifdef ENABLE_GC_CHECKING
1286 #define poison_chunk(CHUNK, SIZE) \
1287 memset ((CHUNK)->u.data, 0xa5, (SIZE))
1289 #define poison_chunk(CHUNK, SIZE)
1292 /* Free all empty pages and objects within a page for a given zone */
1295 sweep_pages (struct alloc_zone
*zone
)
1297 page_entry
**pp
, *p
, *next
;
1298 struct alloc_chunk
*chunk
, *last_free
, *end
;
1299 size_t last_free_size
, allocated
= 0;
1301 /* First, reset the free_chunks lists, since we are going to
1302 re-free free chunks in hopes of coalescing them into large chunks. */
1303 memset (zone
->free_chunks
, 0, sizeof (zone
->free_chunks
));
1305 for (p
= zone
->pages
; p
; p
= next
)
1309 /* For empty pages, just free the page. */
1310 if (p
->bytes_free
== G
.pagesize
&& p
->context_depth
== zone
->context_depth
)
1313 #ifdef ENABLE_GC_CHECKING
1314 /* Poison the page. */
1315 memset (p
->page
, 0xb5, p
->bytes
);
1321 /* Large pages are all or none affairs. Either they are
1322 completely empty, or they are completely full.
1323 Thus, if the above didn't catch it, we need not do anything
1324 except remove the mark and reset the bytes_free.
1326 XXX: Should we bother to increment allocated. */
1327 else if (p
->large_p
)
1329 p
->bytes_free
= p
->bytes
;
1330 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1335 /* This page has now survived another collection. */
1338 /* Which leaves full and partial pages. Step through all chunks,
1339 consolidate those that are free and insert them into the free
1340 lists. Note that consolidation slows down collection
1343 chunk
= (struct alloc_chunk
*)p
->page
;
1344 end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1350 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1351 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1355 last_free
->type
= 0;
1356 last_free
->size
= last_free_size
;
1357 last_free
->mark
= 0;
1358 poison_chunk (last_free
, last_free_size
);
1359 free_chunk (last_free
, last_free_size
, zone
);
1364 allocated
+= chunk
->size
+ CHUNK_OVERHEAD
;
1365 p
->bytes_free
+= chunk
->size
+ CHUNK_OVERHEAD
;
1368 #ifdef ENABLE_CHECKING
1369 if (p
->bytes_free
> p
->bytes
)
1377 last_free_size
+= CHUNK_OVERHEAD
+ chunk
->size
;
1382 last_free_size
= chunk
->size
;
1386 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1388 while (chunk
< end
);
1392 last_free
->type
= 0;
1393 last_free
->size
= last_free_size
;
1394 last_free
->mark
= 0;
1395 poison_chunk (last_free
, last_free_size
);
1396 free_chunk (last_free
, last_free_size
, zone
);
1400 zone
->allocated
= allocated
;
1403 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1404 is true if we need to mark before sweeping, false if some other
1405 zone collection has already performed marking for us. Returns true
1406 if we collected, false otherwise. */
1409 ggc_collect_1 (struct alloc_zone
*zone
, bool need_marking
)
1411 /* Avoid frequent unnecessary work by skipping collection if the
1412 total allocations haven't expanded much since the last
1414 float allocated_last_gc
=
1415 MAX (zone
->allocated_last_gc
, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE
) * 1024);
1417 float min_expand
= allocated_last_gc
* PARAM_VALUE (GGC_MIN_EXPAND
) / 100;
1419 if (zone
->allocated
< allocated_last_gc
+ min_expand
)
1423 fprintf (stderr
, " {%s GC %luk -> ", zone
->name
, (unsigned long) zone
->allocated
/ 1024);
1425 /* Zero the total allocated bytes. This will be recalculated in the
1427 zone
->allocated
= 0;
1429 /* Release the pages we freed the last time we collected, but didn't
1430 reuse in the interim. */
1431 release_pages (zone
);
1433 /* Indicate that we've seen collections at this context depth. */
1434 zone
->context_depth_collections
1435 = ((unsigned long)1 << (zone
->context_depth
+ 1)) - 1;
1439 zone
->was_collected
= true;
1440 zone
->allocated_last_gc
= zone
->allocated
;
1444 fprintf (stderr
, "%luk}", (unsigned long) zone
->allocated
/ 1024);
1448 /* Calculate the average page survival rate in terms of number of
1452 calculate_average_page_survival (struct alloc_zone
*zone
)
1455 float survival
= 0.0;
1457 for (p
= zone
->pages
; p
; p
= p
->next
)
1460 survival
+= p
->survived
;
1462 return survival
/count
;
1465 /* Check the magic cookies all of the chunks contain, to make sure we
1466 aren't doing anything stupid, like stomping on alloc_chunk
1470 check_cookies (void)
1472 #ifdef COOKIE_CHECKING
1474 struct alloc_zone
*zone
;
1476 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1478 for (p
= zone
->pages
; p
; p
= p
->next
)
1482 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1483 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1486 if (chunk
->magic
!= CHUNK_MAGIC
&& chunk
->magic
!= DEADCHUNK_MAGIC
)
1488 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1490 while (chunk
< end
);
1498 /* Top level collection routine. */
1503 struct alloc_zone
*zone
;
1504 bool marked
= false;
1507 timevar_push (TV_GC
);
1509 /* Start by possibly collecting the main zone. */
1510 main_zone
.was_collected
= false;
1511 marked
|= ggc_collect_1 (&main_zone
, true);
1513 /* In order to keep the number of collections down, we don't
1514 collect other zones unless we are collecting the main zone. This
1515 gives us roughly the same number of collections as we used to
1516 have with the old gc. The number of collection is important
1517 because our main slowdown (according to profiling) is now in
1518 marking. So if we mark twice as often as we used to, we'll be
1519 twice as slow. Hopefully we'll avoid this cost when we mark
1522 if (main_zone
.was_collected
)
1524 struct alloc_zone
*zone
;
1526 for (zone
= main_zone
.next_zone
; zone
; zone
= zone
->next_zone
)
1529 zone
->was_collected
= false;
1530 marked
|= ggc_collect_1 (zone
, !marked
);
1534 /* Print page survival stats, if someone wants them. */
1535 if (GGC_DEBUG_LEVEL
>= 2)
1537 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1539 if (zone
->was_collected
)
1541 f
= calculate_average_page_survival (zone
);
1542 printf ("Average page survival in zone `%s' is %f\n",
1548 /* Since we don't mark zone at a time right now, marking in any
1549 zone means marking in every zone. So we have to clear all the
1550 marks in all the zones that weren't collected already. */
1554 for (zone
= G
.zones
; zone
; zone
= zone
->next_zone
)
1556 if (zone
->was_collected
)
1558 for (p
= zone
->pages
; p
; p
= p
->next
)
1562 struct alloc_chunk
*chunk
= (struct alloc_chunk
*)p
->page
;
1563 struct alloc_chunk
*end
= (struct alloc_chunk
*)(p
->page
+ G
.pagesize
);
1566 prefetch ((struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
));
1567 if (chunk
->mark
|| p
->context_depth
< zone
->context_depth
)
1570 p
->bytes_free
+= chunk
->size
+ CHUNK_OVERHEAD
;
1571 #ifdef ENABLE_CHECKING
1572 if (p
->bytes_free
> p
->bytes
)
1577 chunk
= (struct alloc_chunk
*)(chunk
->u
.data
+ chunk
->size
);
1579 while (chunk
< end
);
1583 p
->bytes_free
= p
->bytes
;
1584 ((struct alloc_chunk
*)p
->page
)->mark
= 0;
1589 timevar_pop (TV_GC
);
1592 /* Print allocation statistics. */
1595 ggc_print_statistics (void)
1601 struct ggc_pch_ondisk
1609 /* Initialize the PCH datastructure. */
1611 struct ggc_pch_data
*
1614 return xcalloc (sizeof (struct ggc_pch_data
), 1);
1617 /* Add the size of object X to the size of the PCH data. */
1620 ggc_pch_count_object (struct ggc_pch_data
*d
, void *x ATTRIBUTE_UNUSED
,
1621 size_t size
, bool is_string
)
1625 d
->d
.total
+= size
+ CHUNK_OVERHEAD
;
1631 /* Return the total size of the PCH data. */
1634 ggc_pch_total_size (struct ggc_pch_data
*d
)
1639 /* Set the base address for the objects in the PCH file. */
1642 ggc_pch_this_base (struct ggc_pch_data
*d
, void *base
)
1644 d
->base
= (size_t) base
;
1647 /* Allocate a place for object X of size SIZE in the PCH file. */
1650 ggc_pch_alloc_object (struct ggc_pch_data
*d
, void *x
,
1651 size_t size
, bool is_string
)
1654 result
= (char *)d
->base
;
1657 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1658 if (chunk
->size
== LARGE_OBJECT_SIZE
)
1659 d
->base
+= ggc_get_size (x
) + CHUNK_OVERHEAD
;
1661 d
->base
+= chunk
->size
+ CHUNK_OVERHEAD
;
1662 return result
+ CHUNK_OVERHEAD
;
1672 /* Prepare to write out the PCH data to file F. */
1675 ggc_pch_prepare_write (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1676 FILE *f ATTRIBUTE_UNUSED
)
1678 /* Nothing to do. */
1681 /* Write out object X of SIZE to file F. */
1684 ggc_pch_write_object (struct ggc_pch_data
*d ATTRIBUTE_UNUSED
,
1685 FILE *f
, void *x
, void *newx ATTRIBUTE_UNUSED
,
1686 size_t size
, bool is_string
)
1690 struct alloc_chunk
*chunk
= (struct alloc_chunk
*) ((char *)x
- CHUNK_OVERHEAD
);
1691 size
= ggc_get_size (x
);
1692 if (fwrite (chunk
, size
+ CHUNK_OVERHEAD
, 1, f
) != 1)
1693 fatal_error ("can't write PCH file: %m");
1694 d
->written
+= size
+ CHUNK_OVERHEAD
;
1698 if (fwrite (x
, size
, 1, f
) != 1)
1699 fatal_error ("can't write PCH file: %m");
1702 if (d
->written
== d
->d
.total
1703 && fseek (f
, ROUND_UP_VALUE (d
->d
.total
, G
.pagesize
), SEEK_CUR
) != 0)
1704 fatal_error ("can't write PCH file: %m");
1708 ggc_pch_finish (struct ggc_pch_data
*d
, FILE *f
)
1710 if (fwrite (&d
->d
, sizeof (d
->d
), 1, f
) != 1)
1711 fatal_error ("can't write PCH file: %m");
1717 ggc_pch_read (FILE *f
, void *addr
)
1719 struct ggc_pch_ondisk d
;
1720 struct page_entry
*entry
;
1722 if (fread (&d
, sizeof (d
), 1, f
) != 1)
1723 fatal_error ("can't read PCH file: %m");
1724 entry
= xcalloc (1, sizeof (struct page_entry
));
1725 entry
->bytes
= d
.total
;
1727 entry
->context_depth
= 0;
1728 entry
->zone
= &main_zone
;
1729 for (pte
= entry
->page
;
1730 pte
< entry
->page
+ entry
->bytes
;
1732 set_page_table_entry (pte
, entry
);