0.8.5.49:
[sbcl/simd.git] / src / runtime / gencgc-internal.h
blobeaa789849a3b24ca7e48959b95a070d9751c19fd
1 /*
2 * Generational Conservative Garbage Collector for SBCL x86
4 * inline functions that gc-common.c needs sight of
5 */
8 /*
9 * This software is part of the SBCL system. See the README file for
10 * more information.
12 * This software is derived from the CMU CL system, which was
13 * written at Carnegie Mellon University and released into the
14 * public domain. The software is in the public domain and is
15 * provided with absolutely no warranty. See the COPYING and CREDITS
16 * files for more information.
19 #ifndef _GENCGC_INTERNAL_H_
20 #define _GENCGC_INTERNAL_H_
22 #include "gencgc-alloc-region.h"
23 #include "genesis/code.h"
25 /* Size of a page, in bytes. FIXME: needs to be conditionalized per
26 * architecture, preferably by someone with a clue as to what page
27 * sizes are on archs other than x86 and PPC - Patrik */
28 #define PAGE_BYTES 4096
31 void gc_free_heap(void);
32 inline int find_page_index(void *);
33 inline void *page_address(int);
34 int gencgc_handle_wp_violation(void *);
35 lispobj *search_dynamic_space(lispobj *);
37 struct page {
39 unsigned
40 /* This is set when the page is write-protected. This should
41 * always reflect the actual write_protect status of a page.
42 * (If the page is written into, we catch the exception, make
43 * the page writable, and clear this flag.) */
44 write_protected :1,
45 /* This flag is set when the above write_protected flag is
46 * cleared by the SIGBUS handler (or SIGSEGV handler, for some
47 * OSes). This is useful for re-scavenging pages that are
48 * written during a GC. */
49 write_protected_cleared :1,
50 /* the region the page is allocated to: 0 for a free page; 1
51 * for boxed objects; 2 for unboxed objects. If the page is
52 * free the following slots are invalid (well the bytes_used
53 * must be 0). */
54 allocated :3,
55 /* If this page should not be moved during a GC then this flag
56 * is set. It's only valid during a GC for allocated pages. */
57 dont_move :1,
58 /* If the page is part of a large object then this flag is
59 * set. No other objects should be allocated to these pages.
60 * This is only valid when the page is allocated. */
61 large_object :1;
63 /* the generation that this page belongs to. This should be valid
64 * for all pages that may have objects allocated, even current
65 * allocation region pages - this allows the space of an object to
66 * be easily determined. */
67 int gen;
69 /* the number of bytes of this page that are used. This may be less
70 * than the actual bytes used for pages within the current
71 * allocation regions. It should be 0 for all unallocated pages (not
72 * hard to achieve). */
73 int bytes_used;
75 /* It is important to know the offset to the first object in the
76 * page. Currently it's only important to know if an object starts
77 * at the beginning of the page in which case the offset would be 0. */
78 int first_object_offset;
81 /* values for the page.allocated field */
84 /* the number of pages needed for the dynamic space - rounding up */
85 #define NUM_PAGES ((DYNAMIC_SPACE_SIZE+PAGE_BYTES-1)/PAGE_BYTES)
86 extern struct page page_table[NUM_PAGES];
90 void sniff_code_object(struct code *code, unsigned displacement);
91 void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code);
93 int update_x86_dynamic_space_free_pointer(void);
94 void gc_alloc_update_page_tables(int unboxed,
95 struct alloc_region *alloc_region);
97 * predicates
99 static inline int
100 space_matches_p(lispobj obj, int space)
102 int page_index=(void*)obj - (void *)DYNAMIC_SPACE_START;
103 return ((page_index >= 0)
104 && ((page_index =
105 ((unsigned int)page_index)/PAGE_BYTES) < NUM_PAGES)
106 && (page_table[page_index].gen == space));
109 static inline boolean
110 from_space_p(lispobj obj)
112 return space_matches_p(obj,from_space);
115 static inline boolean
116 new_space_p(lispobj obj)
118 return space_matches_p(obj,new_space);
123 #endif