Don't use a mutex in page fault handler
[sbcl.git] / src / runtime / gc-private.h
blob4908f813703d27426ed3619f4e2f0d6b40903ab1
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
12 /* Include this header only in files that are _really_ part of GC
13 or intimately tied to GC like 'raceroot'. */
15 #ifndef _GC_PRIVATE_H_
16 #define _GC_PRIVATE_H_
18 #include "genesis/weak-pointer.h"
20 // Gencgc distinguishes between "quick" and "ordinary" requests.
21 // Even on cheneygc we need this flag, but it's actually just ignored.
22 #define ALLOC_QUICK 1
24 #ifdef LISP_FEATURE_GENCGC
25 #include "gencgc-alloc-region.h"
26 void *
27 gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region,
28 int quick_p);
29 static inline void *
30 gc_general_alloc(sword_t nbytes, int page_type_flag, int quick_p)
32 struct alloc_region *my_region;
33 #ifdef SEGREGATED_CODE
34 if (1 <= page_type_flag && page_type_flag <= 3) {
35 my_region = &gc_alloc_region[page_type_flag-1];
36 #else
37 if (UNBOXED_PAGE_FLAG == page_type_flag) {
38 my_region = &unboxed_region;
39 } else if (BOXED_PAGE_FLAG & page_type_flag) {
40 my_region = &boxed_region;
41 #endif
42 } else {
43 lose("bad page type flag: %d", page_type_flag);
45 return gc_alloc_with_region(nbytes, page_type_flag, my_region, quick_p);
47 #else
48 extern void *gc_general_alloc(sword_t nbytes,int page_type_flag,int quick_p);
49 #endif
51 #define CHECK_COPY_PRECONDITIONS(object, nwords) \
52 gc_dcheck(is_lisp_pointer(object)); \
53 gc_dcheck(from_space_p(object)); \
54 gc_dcheck((nwords & 0x01) == 0)
56 #define CHECK_COPY_POSTCONDITIONS(copy, lowtag) \
57 gc_dcheck(lowtag_of(copy) == lowtag); \
58 gc_dcheck(!from_space_p(copy));
60 #define note_transported_object(old, new) /* do nothing */
62 static inline lispobj
63 gc_general_copy_object(lispobj object, long nwords, int page_type_flag)
65 lispobj *new;
67 CHECK_COPY_PRECONDITIONS(object, nwords);
69 /* Allocate space. */
70 new = gc_general_alloc(nwords*N_WORD_BYTES, page_type_flag, ALLOC_QUICK);
72 /* Copy the object. */
73 memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
75 note_transported_object(object, new);
77 return make_lispobj(new, lowtag_of(object));
80 extern sword_t (*scavtab[256])(lispobj *where, lispobj object);
81 extern struct weak_pointer *weak_pointers; /* in gc-common.c */
82 extern struct hash_table *weak_hash_tables; /* in gc-common.c */
83 extern struct hash_table *weak_AND_hash_tables; /* in gc-common.c */
85 // These next two are prototyped for both GCs
86 // but only gencgc will ever call them.
87 void gc_mark_range(lispobj*start, long count);
88 void gc_mark_obj(lispobj);
90 extern void heap_scavenge(lispobj *start, lispobj *limit);
91 extern sword_t scavenge(lispobj *start, sword_t n_words);
92 extern void scavenge_interrupt_contexts(struct thread *thread);
93 extern void scav_weak_hash_tables(int (*[5])(lispobj,lispobj),
94 void (*)(lispobj*));
95 extern void scav_binding_stack(lispobj*, lispobj*, void(*)(lispobj));
96 extern void scan_binding_stack(void);
97 extern void scan_weak_hash_tables(int (*[5])(lispobj,lispobj));
98 extern void scan_weak_pointers(void);
99 extern void scav_hash_table_entries (struct hash_table *hash_table,
100 int (*[5])(lispobj,lispobj),
101 void (*)(lispobj*));
102 extern int (*weak_ht_alivep_funs[5])(lispobj,lispobj);
103 extern void gc_scav_pair(lispobj where[2]);
105 lispobj copy_unboxed_object(lispobj object, sword_t nwords);
106 lispobj copy_object(lispobj object, sword_t nwords);
107 lispobj copy_large_object(lispobj object, sword_t nwords, int page_type_flag);
109 lispobj *search_read_only_space(void *pointer);
110 lispobj *search_static_space(void *pointer);
111 lispobj *search_immobile_space(void *pointer);
112 lispobj *search_dynamic_space(void *pointer);
114 static inline int instruction_ptr_p(void *pointer, lispobj *start_addr)
116 return widetag_of(*start_addr) == CODE_HEADER_WIDETAG &&
117 pointer >= (void*)(start_addr + code_header_words(*start_addr));
119 extern int properly_tagged_p_internal(lispobj pointer, lispobj *start_addr);
120 static inline int properly_tagged_descriptor_p(void *pointer, lispobj *start_addr) {
121 return is_lisp_pointer((lispobj)pointer) &&
122 properly_tagged_p_internal((lispobj)pointer, start_addr);
125 extern void scavenge_control_stack(struct thread *th);
126 extern void scrub_control_stack(void);
127 extern void scrub_thread_control_stack(struct thread *);
129 #ifndef LISP_FEATURE_IMMOBILE_SPACE
131 static inline boolean filler_obj_p(lispobj* obj) { return 0; }
133 #else
135 extern void enliven_immobile_obj(lispobj*,int);
136 extern void fixup_immobile_refs(lispobj (*)(lispobj), lispobj, struct code*);
138 #define IMMOBILE_OBJ_VISITED_FLAG 0x10
139 #define IMMOBILE_OBJ_GENERATION_MASK 0x0f // mask off the VISITED flag
141 // Note: this does not work on a SIMPLE-FUN
142 // because a simple-fun header does not contain a generation.
143 #define __immobile_obj_generation(x) (__immobile_obj_gen_bits(x) & IMMOBILE_OBJ_GENERATION_MASK)
145 typedef int low_page_index_t;
147 #ifdef LISP_FEATURE_LITTLE_ENDIAN
148 static inline int immobile_obj_gen_bits(lispobj* pointer) // native pointer
150 if (widetag_of(*pointer) == SIMPLE_FUN_WIDETAG)
151 pointer = fun_code_header(pointer);
152 return ((generation_index_t*)pointer)[3];
154 // Faster way when we know that the object can't be a simple-fun,
155 // such as when walking the immobile space.
156 static inline int __immobile_obj_gen_bits(lispobj* pointer) // native pointer
158 return ((generation_index_t*)pointer)[3];
160 #else
161 #error "Need to define immobile_obj_gen_bits() for big-endian"
162 #endif /* little-endian */
164 static inline boolean filler_obj_p(lispobj* obj) {
165 return *(int*)obj == (2<<N_WIDETAG_BITS | CODE_HEADER_WIDETAG);
168 #endif /* immobile space */
170 #define WEAK_POINTER_NWORDS \
171 ALIGN_UP((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
173 static inline boolean weak_pointer_breakable_p(struct weak_pointer *wp)
175 lispobj pointee = wp->value;
176 // A broken weak-pointer's value slot has unbound-marker
177 // which does not satisfy is_lisp_pointer().
178 return is_lisp_pointer(pointee) && (from_space_p(pointee)
179 #ifdef LISP_FEATURE_IMMOBILE_SPACE
180 || (immobile_space_p(pointee) &&
181 immobile_obj_gen_bits(native_pointer(pointee)) == from_space)
182 #endif
186 /// Same as Lisp LOGBITP, except no negative bignums allowed.
187 static inline boolean layout_bitmap_logbitp(int index, lispobj bitmap)
189 if (fixnump(bitmap))
190 return (index < (N_WORD_BITS - N_FIXNUM_TAG_BITS))
191 ? (bitmap >> (index+N_FIXNUM_TAG_BITS)) & 1
192 : (sword_t)bitmap < 0;
193 return positive_bignum_logbitp(index, (struct bignum*)native_pointer(bitmap));
196 #if defined(LISP_FEATURE_GENCGC)
198 /* Define a macro to avoid a detour through the write fault handler.
200 * It's usually more efficient to do these extra tests than to receive
201 * a signal. And it leaves the page protected, which is a bonus.
202 * The downside is that multiple operations on the same page ought to
203 * be batched, so that there is at most one unprotect/reprotect per page
204 * rather than per write operation per page.
206 * This also should fix -fsanitize=thread which makes handling of SIGSEGV
207 * during GC difficult. Not impossible, but definitely broken.
208 * It has to do with the way the sanitizer intercepts calls
209 * to sigaction() - it mucks with your sa_mask :-(.
211 * This macro take an aribtrary expression as the 'operation' rather than
212 * an address and value to assign, for two reasons:
213 * 1. there may be more than one store operation that has to be
214 * within the scope of the lifted write barrier,
215 * so a single lvalue and rvalue is maybe inadequate.
216 * 2. it might need to use a sync_fetch_and_<frob>() gcc intrinsic,
217 * so it's not necessarily just going to be an '=' operator
219 * KLUDGE: assume that faults do not occur in immobile space.
220 * for the most part. (This is pretty obviously not true,
221 * but seems only to be a problem in fullcgc)
224 #define NON_FAULTING_STORE(operation, addr) { \
225 page_index_t page_index = find_page_index(addr); \
226 if (page_index < 0 || !page_table[page_index].write_protected) { operation; } \
227 else { unprotect_page_index(page_index); \
228 operation; \
229 protect_page(page_address(page_index), page_index); }}
231 /* This is used bu the fault handler, and potentially during GC */
232 static inline void unprotect_page_index(page_index_t page_index)
234 os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
235 unsigned char *pflagbits = (unsigned char*)&page_table[page_index].gen - 1;
236 __sync_fetch_and_or(pflagbits, WP_CLEARED_BIT);
237 __sync_fetch_and_and(pflagbits, ~WRITE_PROTECTED_BIT);
240 static inline void protect_page(void* page_addr, page_index_t page_index)
242 os_protect((void *)page_addr,
243 GENCGC_CARD_BYTES,
244 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
246 /* Note: we never touch the write_protected_cleared bit when protecting
247 * a page. Consider two random threads that reach their SIGSEGV handlers
248 * concurrently, each checking why it got a write fault. One thread wins
249 * the race to remove the memory protection, and marks our shadow bit.
250 * wp_cleared is set so that the other thread can conclude that the fault
251 * was reasonable.
252 * If GC unprotects and reprotects a page, it's probably OK to reset the
253 * cleared bit 0 if it was 0 before. (Because the fault handler blocks
254 * SIG_STOP_FOR_GC which is usually SIGUSR2, handling the wp fault is
255 * atomic with respect to invocation of GC)
256 * But nothing is really gained by resetting the cleared flag.
257 * It is explicitly zeroed on pages marked as free though.
259 page_table[page_index].write_protected = 1;
262 #else
264 #define NON_FAULTING_STORE(operation, addr) operation
266 #endif
268 // For x86[-64], a simple-fun or closure's "self" slot is a fixum
269 // On other backends, it is a lisp ointer.
270 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
271 #define FUN_SELF_FIXNUM_TAGGED 1
272 #else
273 #define FUN_SELF_FIXNUM_TAGGED 0
274 #endif
276 #ifdef LISP_FEATURE_IMMOBILE_SPACE
277 static inline void *
278 fixedobj_page_address(low_page_index_t page_num)
280 return (void*)(FIXEDOBJ_SPACE_START + (page_num * IMMOBILE_CARD_BYTES));
282 static inline void *
283 varyobj_page_address(low_page_index_t page_num)
285 return (void*)(VARYOBJ_SPACE_START + (page_num * IMMOBILE_CARD_BYTES));
287 #endif
289 #endif /* _GC_PRIVATE_H_ */