Take pointer, not word count, as upper limit in verify_space()
[sbcl.git] / src / runtime / safepoint.c
bloba679188422a3b389d156cf12156db70f3219f60e
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
11 #include "sbcl.h"
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
53 void
54 alloc_gc_page()
56 os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
59 void
60 map_gc_page()
62 odxprint(misc, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
65 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
68 void
69 unmap_gc_page()
71 odxprint(misc, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
74 #endif /* !LISP_FEATURE_WIN32 */
76 /* Planned state progressions:
78 * none -> flight:
80 * unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
82 * flight -> message:
84 * happens when a master thread enters its trap.
86 * The only blocker for flight mode is the master thread itself
87 * (GC_FLIGHT can't be left until the master thread traps).
89 * message -> invoked:
91 * happens after each (other) thread is notified, i.e. it will
92 * eventually stop (already stopped). map_gc_page().
94 * Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
96 * invoked -> collect:
98 * happens when every gc-inhibitor comes to completion (that's
99 * normally pending interrupt trap).
101 * NB gc_stop_the_world, if it happens in non-master thread, "takes
102 * over" as a master, also deregistering itself as a blocker
103 * (i.e. it's ready to leave GC_INVOKED, but now it objects to
104 * leaving GC_COLLECT; this "usurpation" doesn't require any change
105 * to GC_COLLECT counter: for the counter, it's immaterial _which_
106 * thread is waiting).
108 * collect -> none:
110 * happens at gc_start_the_world (that should always happen in the
111 * master).
113 * Any thread waiting until GC end now continues.
115 struct gc_state {
116 /* Flag: conditions are initialized */
117 boolean initialized;
119 /* Per-process lock for gc_state */
120 pthread_mutex_t lock;
122 /* Conditions: one per phase */
123 pthread_cond_t phase_cond[GC_NPHASES];
125 /* For each [current or future] phase, a number of threads not yet ready to
126 * leave it */
127 int phase_wait[GC_NPHASES];
129 /* Master thread controlling the topmost stop/gc/start sequence */
130 struct thread* master;
131 struct thread* collector;
133 /* Current GC phase */
134 gc_phase_t phase;
137 static struct gc_state gc_state = {
138 .lock = PTHREAD_MUTEX_INITIALIZER,
139 .phase = GC_NONE,
142 void
143 gc_state_lock()
145 odxprint(safepoints,"GC state [%p] to be locked",gc_state.lock);
146 int result = pthread_mutex_lock(&gc_state.lock);
147 gc_assert(!result);
148 if (gc_state.master) {
149 fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d\n",
150 gc_state.master,arch_os_get_current_thread(),gc_state.phase);
151 odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
153 gc_assert(!gc_state.master);
154 gc_state.master = arch_os_get_current_thread();
155 if (!gc_state.initialized) {
156 int i;
157 for (i=GC_NONE; i<GC_NPHASES; ++i)
158 pthread_cond_init(&gc_state.phase_cond[i],NULL);
159 gc_state.initialized = 1;
161 odxprint(safepoints,"GC state [%p] locked in phase %d",gc_state.lock, gc_state.phase);
164 void
165 gc_state_unlock()
167 odxprint(safepoints,"GC state to be unlocked in phase %d",gc_state.phase);
168 gc_assert(arch_os_get_current_thread()==gc_state.master);
169 gc_state.master = NULL;
170 int result = pthread_mutex_unlock(&gc_state.lock);
171 gc_assert(!result);
172 odxprint(safepoints,"%s","GC state unlocked");
175 void
176 gc_state_wait(gc_phase_t phase)
178 struct thread* self = arch_os_get_current_thread();
179 odxprint(safepoints,"Waiting for %d -> %d [%d holders]",
180 gc_state.phase,phase,gc_state.phase_wait[gc_state.phase]);
181 gc_assert(gc_state.master == self);
182 gc_state.master = NULL;
183 while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET)))
184 pthread_cond_wait(&gc_state.phase_cond[phase],&gc_state.lock);
185 gc_assert(gc_state.master == NULL);
186 gc_state.master = self;
189 static void
190 set_csp_from_context(struct thread *self, os_context_t *ctx)
192 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
193 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
194 /* On POSIX platforms, it is sufficient to investigate only the part
195 * of the stack that was live before the interrupt, because in
196 * addition, we consider interrupt contexts explicitly. On Windows,
197 * however, we do not keep an explicit stack of exception contexts,
198 * and instead arrange for the conservative stack scan to also cover
199 * the context implicitly. The obvious way to do that is to start
200 * at the context itself: */
201 #ifdef LISP_FEATURE_WIN32
202 gc_assert((void **) ctx < sp);
203 sp = (void**) ctx;
204 #endif
205 gc_assert((void **)self->control_stack_start
206 <= sp && sp
207 < (void **)self->control_stack_end);
208 #else
209 /* Note that the exact value doesn't matter much here, since
210 * platforms with precise GC use get_csp() only as a boolean -- the
211 * precise GC already keeps track of the stack pointer itself. */
212 void **sp = (void **) 0xEEEEEEEE;
213 #endif
214 *self->csp_around_foreign_call = (lispobj) sp;
218 static inline gc_phase_t gc_phase_next(gc_phase_t old) {
219 return (old+1) % GC_NPHASES;
222 static inline gc_phase_t thread_gc_phase(struct thread* p)
224 boolean inhibit = (SymbolTlValue(GC_INHIBIT,p)==T)||
225 (SymbolTlValue(IN_WITHOUT_GCING,p)==IN_WITHOUT_GCING);
227 boolean inprogress =
228 (SymbolTlValue(GC_PENDING,p)!=T&& SymbolTlValue(GC_PENDING,p)!=NIL);
230 return
231 inprogress ? (gc_state.collector && (gc_state.collector != p)
232 ? GC_NONE : GC_QUIET)
233 : (inhibit ? GC_INVOKED : GC_NONE);
236 static inline void thread_gc_promote(struct thread* p, gc_phase_t cur, gc_phase_t old) {
237 if (old != GC_NONE)
238 gc_state.phase_wait[old]--;
239 if (cur != GC_NONE) {
240 gc_state.phase_wait[cur]++;
242 if (cur != GC_NONE)
243 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
246 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
247 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
248 if `writable' is true.
250 Return true if there is a non-null value in the flag.
252 When a thread enters C code or leaves it, a per-thread location is
253 modified. That machine word serves as a not-in-Lisp flag; for
254 convenience, when in C, it's filled with a topmost stack location
255 that may contain Lisp data. When thread is in Lisp, the word
256 contains NULL.
258 GENCGC uses each thread's flag value for conservative garbage collection.
260 There is a full VM page reserved for this word; page permissions
261 are switched to read-only for race-free examine + wait + use
262 scenarios. */
263 static inline boolean
264 set_thread_csp_access(struct thread* p, boolean writable)
266 os_protect((os_vm_address_t) p->csp_around_foreign_call,
267 THREAD_CSP_PAGE_SIZE,
268 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
269 : (OS_VM_PROT_READ));
270 return !!*p->csp_around_foreign_call;
273 static inline void gc_notify_early()
275 struct thread *self = arch_os_get_current_thread(), *p;
276 odxprint(safepoints,"%s","global notification");
277 pthread_mutex_lock(&all_threads_lock);
278 for_each_thread(p) {
279 if (p==self)
280 continue;
281 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
282 if (!set_thread_csp_access(p,0)) {
283 thread_gc_promote(p, gc_state.phase, GC_NONE);
284 } else {
285 thread_gc_promote(p, thread_gc_phase(p), GC_NONE);
288 pthread_mutex_unlock(&all_threads_lock);
291 static inline void gc_notify_final()
293 struct thread *p;
294 odxprint(safepoints,"%s","global notification");
295 gc_state.phase_wait[gc_state.phase]=0;
296 pthread_mutex_lock(&all_threads_lock);
297 for_each_thread(p) {
298 if (p == gc_state.collector)
299 continue;
300 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
301 if (!set_thread_csp_access(p,0)) {
302 thread_gc_promote(p, gc_state.phase, GC_NONE);
305 pthread_mutex_unlock(&all_threads_lock);
308 static inline void gc_done()
310 struct thread *self = arch_os_get_current_thread(), *p;
311 boolean inhibit = (SymbolTlValue(GC_INHIBIT,self)==T);
313 odxprint(safepoints,"%s","global denotification");
314 pthread_mutex_lock(&all_threads_lock);
315 for_each_thread(p) {
316 if (inhibit && (SymbolTlValue(GC_PENDING,p)==T))
317 SetTlSymbolValue(GC_PENDING,NIL,p);
318 set_thread_csp_access(p,1);
320 pthread_mutex_unlock(&all_threads_lock);
323 static inline void gc_handle_phase()
325 odxprint(safepoints,"Entering phase %d",gc_state.phase);
326 switch (gc_state.phase) {
327 case GC_FLIGHT:
328 unmap_gc_page();
329 break;
330 case GC_MESSAGE:
331 gc_notify_early();
332 break;
333 case GC_INVOKED:
334 map_gc_page();
335 break;
336 case GC_SETTLED:
337 gc_notify_final();
338 unmap_gc_page();
339 break;
340 case GC_COLLECT:
341 map_gc_page();
342 break;
343 case GC_NONE:
344 gc_done();
345 break;
346 default:
347 break;
352 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
353 * `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
354 * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
355 * for it. */
356 static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
357 odxprint(safepoints,"GC advance request %d -> %d in phase %d",old,cur,gc_state.phase);
358 if (cur == old)
359 return;
360 if (cur == gc_state.phase)
361 return;
362 if (old < gc_state.phase)
363 old = GC_NONE;
364 if (old != GC_NONE) {
365 gc_state.phase_wait[old]--;
366 odxprint(safepoints,"%d holders of phase %d without me",gc_state.phase_wait[old],old);
368 if (cur != GC_NONE) {
369 gc_state.phase_wait[cur]++;
370 odxprint(safepoints,"%d holders of phase %d with me",gc_state.phase_wait[cur],cur);
372 /* roll forth as long as there's no waiters */
373 while (gc_state.phase_wait[gc_state.phase]==0
374 && gc_state.phase != cur) {
375 gc_state.phase = gc_phase_next(gc_state.phase);
376 odxprint(safepoints,"no blockers, direct advance to %d",gc_state.phase);
377 gc_handle_phase();
378 pthread_cond_broadcast(&gc_state.phase_cond[gc_state.phase]);
380 odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
381 gc_state_wait(cur);
384 void
385 thread_register_gc_trigger()
387 odxprint(misc, "/thread_register_gc_trigger");
388 struct thread *self = arch_os_get_current_thread();
389 gc_state_lock();
390 if (gc_state.phase == GC_NONE &&
391 SymbolTlValue(IN_SAFEPOINT,self)!=T &&
392 thread_gc_phase(self)==GC_NONE) {
393 gc_advance(GC_FLIGHT,GC_NONE);
395 gc_state_unlock();
398 static inline int
399 thread_may_gc()
401 /* Thread may gc if all of these are true:
402 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
403 * 2) GC_PENDING != :in-progress (outside of recursion protection)
404 * Note that we are in a safepoint here, which is always outside of PA. */
406 struct thread *self = arch_os_get_current_thread();
407 return (SymbolValue(GC_INHIBIT, self) == NIL
408 && (SymbolTlValue(GC_PENDING, self) == T ||
409 SymbolTlValue(GC_PENDING, self) == NIL));
412 #ifdef LISP_FEATURE_SB_THRUPTION
413 static inline int
414 thread_may_thrupt(os_context_t *ctx)
416 struct thread * self = arch_os_get_current_thread();
417 /* Thread may be interrupted if all of these are true:
418 * 1) Deferrables are unblocked in the context of the signal that
419 * went into the safepoint. -- Otherwise the surrounding code
420 * didn't want to be interrupted by a signal, so presumably it didn't
421 * want to be INTERRUPT-THREADed either.
422 * (See interrupt_handle_pending for an exception.)
423 * 2) On POSIX: There is no pending signal. This is important even
424 * after checking the sigmask, since we could be in the
425 * handle_pending trap following re-enabling of interrupts.
426 * Signals are unblocked in that case, but the signal is still
427 * pending; we want to run GC before handling the signal and
428 * therefore entered this safepoint. But the thruption would call
429 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
430 * trap, leading to recursion.
431 * 3) INTERRUPTS_ENABLED is non-nil.
432 * 4) No GC pending; it takes precedence.
433 * Note that we are in a safepoint here, which is always outside of PA. */
435 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
436 return 0;
438 if (SymbolValue(GC_PENDING, self) != NIL)
439 return 0;
441 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
442 return 0;
444 #ifdef LISP_FEATURE_WIN32
445 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
446 return 0;
447 #else
448 /* ctx is NULL if the caller wants to ignore the sigmask. */
449 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
450 return 0;
451 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
452 return 0;
453 #endif
455 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
456 /* This special case prevents TERMINATE-THREAD from hitting
457 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
458 * deferrables are already unblocked there. Further
459 * investigation may be in order. */
460 return 0;
462 return 1;
465 // returns 0 if skipped, 1 otherwise
467 check_pending_thruptions(os_context_t *ctx)
469 struct thread *p = arch_os_get_current_thread();
471 #ifdef LISP_FEATURE_WIN32
472 pthread_t pself = p->os_thread;
473 sigset_t oldset;
474 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
475 * in the self-kill case; instead we do it here while also clearing the
476 * "signal". */
477 if (pself->pending_signal_set)
478 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
479 SetSymbolValue(THRUPTION_PENDING, T, p);
480 #endif
482 if (!thread_may_thrupt(ctx))
483 return 0;
484 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
485 return 0;
486 SetSymbolValue(THRUPTION_PENDING, NIL, p);
488 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
489 int was_in_lisp = !foreign_function_call_active_p(p);
490 if (was_in_lisp) {
491 if (!ctx)
492 lose("self-kill bug");
493 fake_foreign_function_call(ctx);
495 #endif
497 #ifdef LISP_FEATURE_WIN32
498 oldset = pself->blocked_signal_set;
499 pself->blocked_signal_set = deferrable_sigset;
500 if (ctx) fake_foreign_function_call(ctx);
501 #else
502 sigset_t oldset;
503 block_deferrable_signals(&oldset);
504 #endif
506 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
508 #ifdef LISP_FEATURE_WIN32
509 if (ctx) undo_fake_foreign_function_call(ctx);
510 pself->blocked_signal_set = oldset;
511 if (ctx) ctx->sigmask = oldset;
512 #else
513 thread_sigmask(SIG_SETMASK, &oldset, 0);
514 #endif
516 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
517 if (was_in_lisp)
518 undo_fake_foreign_function_call(ctx);
519 #endif
521 return 1;
523 #endif
526 on_stack_p(struct thread *th, void *esp)
528 return (void *)th->control_stack_start
529 <= esp && esp
530 < (void *)th->control_stack_end;
533 #ifndef LISP_FEATURE_WIN32
534 /* (Technically, we still allocate an altstack even on Windows. Since
535 * Windows has a contiguous stack with an automatic guard page of
536 * user-configurable size instead of an alternative stack though, the
537 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
539 on_altstack_p(struct thread *th, void *esp)
541 void *start = (void *)th+dynamic_values_bytes;
542 void *end = (char *)start + 32*SIGSTKSZ;
543 return start <= esp && esp < end;
545 #endif
547 void
548 assert_on_stack(struct thread *th, void *esp)
550 if (on_stack_p(th, esp))
551 return;
552 #ifndef LISP_FEATURE_WIN32
553 if (on_altstack_p(th, esp))
554 lose("thread %p: esp on altstack: %p", th, esp);
555 #endif
556 lose("thread %p: bogus esp: %p", th, esp);
559 // returns 0 if skipped, 1 otherwise
561 check_pending_gc(os_context_t *ctx)
563 odxprint(misc, "check_pending_gc");
564 struct thread * self = arch_os_get_current_thread();
565 int done = 0;
566 sigset_t sigset;
568 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
569 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
570 (SymbolValue(GC_PENDING,self) == NIL))) {
571 SetSymbolValue(IN_SAFEPOINT,NIL,self);
573 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
574 if ((SymbolTlValue(GC_PENDING, self) == T)) {
575 lispobj gc_happened = NIL;
577 bind_variable(IN_SAFEPOINT,T,self);
578 block_deferrable_signals(&sigset);
579 if(SymbolTlValue(GC_PENDING,self)==T)
580 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
581 unbind(self);
582 thread_sigmask(SIG_SETMASK,&sigset,NULL);
583 if (gc_happened == T) {
584 /* POST_GC wants to enable interrupts */
585 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
586 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
587 odxprint(misc, "going to call POST_GC");
588 funcall0(StaticSymbolFunction(POST_GC));
590 done = 1;
594 return done;
598 void thread_in_lisp_raised(os_context_t *ctxptr)
600 struct thread *self = arch_os_get_current_thread();
601 gc_phase_t phase;
602 odxprint(safepoints,"%s","thread_in_lisp_raised");
603 gc_state_lock();
605 if (gc_state.phase == GC_FLIGHT &&
606 SymbolTlValue(GC_PENDING,self)==T &&
607 thread_gc_phase(self)==GC_NONE &&
608 thread_may_gc() && SymbolTlValue(IN_SAFEPOINT,self)!=T) {
609 set_csp_from_context(self, ctxptr);
610 gc_advance(GC_QUIET,GC_FLIGHT);
611 set_thread_csp_access(self,1);
612 if (gc_state.collector) {
613 gc_advance(GC_NONE,GC_QUIET);
614 } else {
615 *self->csp_around_foreign_call = 0;
616 SetTlSymbolValue(GC_PENDING,T,self);
618 gc_state_unlock();
619 check_pending_gc(ctxptr);
620 #ifdef LISP_FEATURE_SB_THRUPTION
621 while(check_pending_thruptions(ctxptr));
622 #endif
623 return;
625 if (gc_state.phase == GC_FLIGHT) {
626 gc_state_wait(GC_MESSAGE);
628 phase = thread_gc_phase(self);
629 if (phase == GC_NONE) {
630 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
631 set_thread_csp_access(self,1);
632 set_csp_from_context(self, ctxptr);
633 if (gc_state.phase <= GC_SETTLED)
634 gc_advance(phase,gc_state.phase);
635 else
636 gc_state_wait(phase);
637 *self->csp_around_foreign_call = 0;
638 gc_state_unlock();
639 check_pending_gc(ctxptr);
640 #ifdef LISP_FEATURE_SB_THRUPTION
641 while(check_pending_thruptions(ctxptr));
642 #endif
643 } else {
644 gc_advance(phase,gc_state.phase);
645 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
646 gc_state_unlock();
650 void thread_in_safety_transition(os_context_t *ctxptr)
652 struct thread *self = arch_os_get_current_thread();
654 odxprint(safepoints,"%s","GC safety transition");
655 gc_state_lock();
656 if (set_thread_csp_access(self,1)) {
657 gc_state_wait(thread_gc_phase(self));
658 gc_state_unlock();
659 #ifdef LISP_FEATURE_SB_THRUPTION
660 while(check_pending_thruptions(ctxptr));
661 #endif
662 } else {
663 gc_phase_t phase = thread_gc_phase(self);
664 if (phase == GC_NONE) {
665 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
666 set_csp_from_context(self, ctxptr);
667 if (gc_state.phase <= GC_SETTLED)
668 gc_advance(phase,gc_state.phase);
669 else
670 gc_state_wait(phase);
671 *self->csp_around_foreign_call = 0;
672 } else {
673 gc_advance(phase,gc_state.phase);
674 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
676 gc_state_unlock();
680 void thread_interrupted(os_context_t *ctxptr)
682 struct thread *self = arch_os_get_current_thread();
684 odxprint(safepoints,"%s","pending interrupt trap");
685 gc_state_lock();
686 if (gc_state.phase != GC_NONE) {
687 if (set_thread_csp_access(self,1)) {
688 gc_state_unlock();
689 thread_in_safety_transition(ctxptr);
690 } else {
691 gc_state_unlock();
692 thread_in_lisp_raised(ctxptr);
694 } else {
695 gc_state_unlock();
697 check_pending_gc(ctxptr);
698 #ifdef LISP_FEATURE_SB_THRUPTION
699 while(check_pending_thruptions(ctxptr));
700 #endif
703 void
704 gc_stop_the_world()
706 struct thread* self = arch_os_get_current_thread();
707 odxprint(safepoints, "stop the world");
708 gc_state_lock();
709 gc_state.collector = self;
710 gc_state.phase_wait[GC_QUIET]++;
712 switch(gc_state.phase) {
713 case GC_NONE:
714 gc_advance(GC_QUIET,gc_state.phase);
715 case GC_FLIGHT:
716 case GC_MESSAGE:
717 case GC_INVOKED:
718 gc_state_wait(GC_QUIET);
719 case GC_QUIET:
720 gc_state.phase_wait[GC_QUIET]=1;
721 gc_advance(GC_COLLECT,GC_QUIET);
722 break;
723 case GC_COLLECT:
724 break;
725 default:
726 lose("Stopping the world in unexpected state %d",gc_state.phase);
727 break;
729 set_thread_csp_access(self,1);
730 gc_state_unlock();
731 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
735 void gc_start_the_world()
737 odxprint(safepoints,"%s","start the world");
738 gc_state_lock();
739 gc_state.collector = NULL;
740 SetSymbolValue(IN_WITHOUT_GCING,IN_WITHOUT_GCING,
741 arch_os_get_current_thread());
742 gc_advance(GC_NONE,GC_COLLECT);
743 gc_state_unlock();
747 #ifdef LISP_FEATURE_SB_THRUPTION
748 /* wake_thread(thread) -- ensure a thruption delivery to
749 * `thread'. */
751 # ifdef LISP_FEATURE_WIN32
753 void
754 wake_thread_io(struct thread * thread)
756 SetEvent(thread->private_events.events[1]);
757 win32_maybe_interrupt_io(thread);
760 void
761 wake_thread_win32(struct thread *thread)
763 struct thread *self = arch_os_get_current_thread();
765 wake_thread_io(thread);
767 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
768 return;
770 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
772 if ((SymbolTlValue(GC_PENDING,thread)==T)||
773 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
774 return;
776 wake_thread_io(thread);
777 pthread_mutex_unlock(&all_threads_lock);
779 gc_state_lock();
780 if (gc_state.phase == GC_NONE) {
781 gc_advance(GC_INVOKED,GC_NONE);
782 gc_advance(GC_NONE,GC_INVOKED);
784 gc_state_unlock();
786 pthread_mutex_lock(&all_threads_lock);
787 return;
789 # else
791 wake_thread_posix(os_thread_t os_thread)
793 int found = 0;
794 struct thread *thread;
795 struct thread *self = arch_os_get_current_thread();
797 /* Must not and need not attempt to signal ourselves while we're the
798 * STW initiator. */
799 if (self->os_thread == os_thread) {
800 SetTlSymbolValue(THRUPTION_PENDING,T,self);
801 WITH_GC_AT_SAFEPOINTS_ONLY()
802 while (check_pending_thruptions(0 /* ignore the sigmask */))
804 return 0;
807 /* We are not in a signal handler here, so need to block signals
808 * manually. */
809 sigset_t oldset;
810 block_deferrable_signals(&oldset);
812 gc_state_lock();
813 if (gc_state.phase == GC_NONE) {
814 odxprint(safepoints, "wake_thread_posix: invoking");
815 gc_advance(GC_INVOKED,GC_NONE);
817 /* only if in foreign code, notify using signal */
818 pthread_mutex_lock(&all_threads_lock);
819 for_each_thread (thread)
820 if (thread->os_thread == os_thread) {
821 /* it's still alive... */
822 found = 1;
824 odxprint(safepoints, "wake_thread_posix: found");
825 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
826 if (SymbolTlValue(GC_PENDING,thread) == T
827 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
828 break;
830 if (os_get_csp(thread)) {
831 odxprint(safepoints, "wake_thread_posix: kill");
832 /* ... and in foreign code. Push it into a safety
833 * transition. */
834 int status = pthread_kill(os_thread, SIGPIPE);
835 if (status)
836 lose("wake_thread_posix: pthread_kill failed with %d\n",
837 status);
839 break;
841 pthread_mutex_unlock(&all_threads_lock);
843 gc_advance(GC_NONE,GC_INVOKED);
844 } else {
845 odxprint(safepoints, "wake_thread_posix: passive");
846 /* We are not able to wake the thread up actively, but maybe
847 * some other thread will take care of it. Kludge: Unless it is
848 * in foreign code. Let's at least try to get our return value
849 * right. */
850 pthread_mutex_lock(&all_threads_lock);
851 for_each_thread (thread)
852 if (thread->os_thread == os_thread) {
853 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
854 found = 1;
855 break;
857 pthread_mutex_unlock(&all_threads_lock);
859 gc_state_unlock();
861 odxprint(safepoints, "wake_thread_posix leaving, found=%d", found);
862 thread_sigmask(SIG_SETMASK, &oldset, 0);
863 return found ? 0 : -1;
865 #endif /* !LISP_FEATURE_WIN32 */
866 #endif /* LISP_FEATURE_SB_THRUPTION */
868 void**
869 os_get_csp(struct thread* th)
871 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
873 th->csp_around_foreign_call,
874 *(void***)th->csp_around_foreign_call,
875 th->control_stack_start,
876 th->control_stack_end));
877 return *(void***)th->csp_around_foreign_call;
881 #ifndef LISP_FEATURE_WIN32
883 # ifdef LISP_FEATURE_SB_THRUPTION
884 void
885 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
887 struct thread *self = arch_os_get_current_thread();
889 void *transition_sp = os_get_csp(self);
890 if (!transition_sp)
891 /* In Lisp code. Do not run thruptions asynchronously. The
892 * next safepoint will take care of it. */
893 return;
895 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
896 if (!foreign_function_call_active_p(self))
897 lose("csp && !ffca");
898 #endif
900 /* In C code. As a rule, we assume that running thruptions is OK. */
901 *self->csp_around_foreign_call = 0;
902 thread_in_lisp_raised(ctx);
903 *self->csp_around_foreign_call = (intptr_t) transition_sp;
905 # endif
907 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
909 #define XSTR(s) STR(s)
910 #define STR(s) #s
911 /* Designed to be of the same type as call_into_lisp. Ignores its
912 * arguments. */
913 lispobj
914 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
917 asm("int3; .byte " XSTR(trap_GlobalSafepoint));
918 return 0;
921 lispobj
922 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
924 asm("int3; .byte " XSTR(trap_CspSafepoint));
925 return 0;
928 #endif /* C_STACK_IS_CONTROL_STACK */
931 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
933 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
934 fault_address,
935 GC_SAFEPOINT_PAGE_ADDR,
936 arch_os_get_current_thread()->csp_around_foreign_call));
938 struct thread *self = arch_os_get_current_thread();
940 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
941 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
942 /* We're on the altstack and don't want to run Lisp code. */
943 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
944 #else
945 if (foreign_function_call_active_p(self)) lose("GSP trap in C?");
946 fake_foreign_function_call(ctx);
947 thread_in_lisp_raised(ctx);
948 undo_fake_foreign_function_call(ctx);
949 #endif
950 return 1;
953 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
954 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
955 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
956 #else
957 if (!foreign_function_call_active_p(self)) lose("CSP trap in Lisp?");
958 thread_in_safety_transition(ctx);
959 #endif
960 return 1;
963 /* not a safepoint */
964 return 0;
966 #endif /* LISP_FEATURE_WIN32 */
968 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
969 void
970 signal_handler_callback(lispobj run_handler, int signo, void *info, void *ctx)
972 init_thread_data scribble;
973 void *args[2];
974 DX_ALLOC_SAP(args_sap, args);
976 args[0] = info;
977 args[1] = ctx;
979 attach_os_thread(&scribble);
981 odxprint(misc, "callback from signal handler thread for: %d\n", signo);
982 funcall3(StaticSymbolFunction(SIGNAL_HANDLER_CALLBACK),
983 run_handler, make_fixnum(signo), args_sap);
985 detach_os_thread(&scribble);
986 return;
988 #endif
990 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */