Enable callbacks from foreign threads on non-sb-safepoint.
[sbcl.git] / src / runtime / safepoint.c
blob91b7ddac9d30f7c070e858b108aa682f7363d302
1 /*
2 * This software is part of the SBCL system. See the README file for
3 * more information.
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
11 #include "sbcl.h"
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
53 void
54 alloc_gc_page()
56 os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
59 void
60 map_gc_page()
62 odxprint(misc, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
65 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
68 void
69 unmap_gc_page()
71 odxprint(misc, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
74 #endif /* !LISP_FEATURE_WIN32 */
76 /* Planned state progressions:
78 * none -> flight:
80 * unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
82 * flight -> message:
84 * happens when a master thread enters its trap.
86 * The only blocker for flight mode is the master thread itself
87 * (GC_FLIGHT can't be left until the master thread traps).
89 * message -> invoked:
91 * happens after each (other) thread is notified, i.e. it will
92 * eventually stop (already stopped). map_gc_page().
94 * Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
96 * invoked -> collect:
98 * happens when every gc-inhibitor comes to completion (that's
99 * normally pending interrupt trap).
101 * NB gc_stop_the_world, if it happens in non-master thread, "takes
102 * over" as a master, also deregistering itself as a blocker
103 * (i.e. it's ready to leave GC_INVOKED, but now it objects to
104 * leaving GC_COLLECT; this "usurpation" doesn't require any change
105 * to GC_COLLECT counter: for the counter, it's immaterial _which_
106 * thread is waiting).
108 * collect -> none:
110 * happens at gc_start_the_world (that should always happen in the
111 * master).
113 * Any thread waiting until GC end now continues.
115 struct gc_state {
116 /* Flag: conditions are initialized */
117 boolean initialized;
119 /* Per-process lock for gc_state */
120 pthread_mutex_t lock;
122 /* Conditions: one per phase */
123 pthread_cond_t phase_cond[GC_NPHASES];
125 /* For each [current or future] phase, a number of threads not yet ready to
126 * leave it */
127 int phase_wait[GC_NPHASES];
129 /* Master thread controlling the topmost stop/gc/start sequence */
130 struct thread* master;
131 struct thread* collector;
133 /* Current GC phase */
134 gc_phase_t phase;
137 static struct gc_state gc_state = {
138 .lock = PTHREAD_MUTEX_INITIALIZER,
139 .phase = GC_NONE,
142 void
143 gc_state_lock()
145 odxprint(safepoints,"GC state [%p] to be locked",gc_state.lock);
146 gc_assert(0==pthread_mutex_lock(&gc_state.lock));
147 if (gc_state.master) {
148 fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d\n",
149 gc_state.master,arch_os_get_current_thread(),gc_state.phase);
150 odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
152 gc_assert(!gc_state.master);
153 gc_state.master = arch_os_get_current_thread();
154 if (!gc_state.initialized) {
155 int i;
156 for (i=GC_NONE; i<GC_NPHASES; ++i)
157 pthread_cond_init(&gc_state.phase_cond[i],NULL);
158 gc_state.initialized = 1;
160 odxprint(safepoints,"GC state [%p] locked in phase %d",gc_state.lock, gc_state.phase);
163 void
164 gc_state_unlock()
166 odxprint(safepoints,"GC state to be unlocked in phase %d",gc_state.phase);
167 gc_assert(arch_os_get_current_thread()==gc_state.master);
168 gc_state.master = NULL;
169 gc_assert(0==pthread_mutex_unlock(&gc_state.lock));
170 odxprint(safepoints,"%s","GC state unlocked");
173 void
174 gc_state_wait(gc_phase_t phase)
176 struct thread* self = arch_os_get_current_thread();
177 odxprint(safepoints,"Waiting for %d -> %d [%d holders]",
178 gc_state.phase,phase,gc_state.phase_wait[gc_state.phase]);
179 gc_assert(gc_state.master == self);
180 gc_state.master = NULL;
181 while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET)))
182 pthread_cond_wait(&gc_state.phase_cond[phase],&gc_state.lock);
183 gc_assert(gc_state.master == NULL);
184 gc_state.master = self;
187 static void
188 set_csp_from_context(struct thread *self, os_context_t *ctx)
190 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
191 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
192 /* On POSIX platforms, it is sufficient to investigate only the part
193 * of the stack that was live before the interrupt, because in
194 * addition, we consider interrupt contexts explicitly. On Windows,
195 * however, we do not keep an explicit stack of exception contexts,
196 * and instead arrange for the conservative stack scan to also cover
197 * the context implicitly. The obvious way to do that is to start
198 * at the context itself: */
199 #ifdef LISP_FEATURE_WIN32
200 gc_assert((void **) ctx < sp);
201 sp = (void**) ctx;
202 #endif
203 gc_assert((void **)self->control_stack_start
204 <= sp && sp
205 < (void **)self->control_stack_end);
206 #else
207 /* Note that the exact value doesn't matter much here, since
208 * platforms with precise GC use get_csp() only as a boolean -- the
209 * precise GC already keeps track of the stack pointer itself. */
210 void **sp = (void **) 0xEEEEEEEE;
211 #endif
212 *self->csp_around_foreign_call = (lispobj) sp;
216 static inline gc_phase_t gc_phase_next(gc_phase_t old) {
217 return (old+1) % GC_NPHASES;
220 static inline gc_phase_t thread_gc_phase(struct thread* p)
222 boolean inhibit = (SymbolTlValue(GC_INHIBIT,p)==T)||
223 (SymbolTlValue(IN_WITHOUT_GCING,p)==IN_WITHOUT_GCING);
225 boolean inprogress =
226 (SymbolTlValue(GC_PENDING,p)!=T&& SymbolTlValue(GC_PENDING,p)!=NIL);
228 return
229 inprogress ? (gc_state.collector && (gc_state.collector != p)
230 ? GC_NONE : GC_QUIET)
231 : (inhibit ? GC_INVOKED : GC_NONE);
234 static inline void thread_gc_promote(struct thread* p, gc_phase_t cur, gc_phase_t old) {
235 if (old != GC_NONE)
236 gc_state.phase_wait[old]--;
237 if (cur != GC_NONE) {
238 gc_state.phase_wait[cur]++;
240 if (cur != GC_NONE)
241 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
244 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
245 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
246 if `writable' is true.
248 Return true if there is a non-null value in the flag.
250 When a thread enters C code or leaves it, a per-thread location is
251 modified. That machine word serves as a not-in-Lisp flag; for
252 convenience, when in C, it's filled with a topmost stack location
253 that may contain Lisp data. When thread is in Lisp, the word
254 contains NULL.
256 GENCGC uses each thread's flag value for conservative garbage collection.
258 There is a full VM page reserved for this word; page permissions
259 are switched to read-only for race-free examine + wait + use
260 scenarios. */
261 static inline boolean
262 set_thread_csp_access(struct thread* p, boolean writable)
264 os_protect((os_vm_address_t) p->csp_around_foreign_call,
265 THREAD_CSP_PAGE_SIZE,
266 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
267 : (OS_VM_PROT_READ));
268 return !!*p->csp_around_foreign_call;
271 static inline void gc_notify_early()
273 struct thread *self = arch_os_get_current_thread(), *p;
274 odxprint(safepoints,"%s","global notification");
275 pthread_mutex_lock(&all_threads_lock);
276 for_each_thread(p) {
277 if (p==self)
278 continue;
279 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
280 if (!set_thread_csp_access(p,0)) {
281 thread_gc_promote(p, gc_state.phase, GC_NONE);
282 } else {
283 thread_gc_promote(p, thread_gc_phase(p), GC_NONE);
286 pthread_mutex_unlock(&all_threads_lock);
289 static inline void gc_notify_final()
291 struct thread *p;
292 odxprint(safepoints,"%s","global notification");
293 gc_state.phase_wait[gc_state.phase]=0;
294 pthread_mutex_lock(&all_threads_lock);
295 for_each_thread(p) {
296 if (p == gc_state.collector)
297 continue;
298 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
299 if (!set_thread_csp_access(p,0)) {
300 thread_gc_promote(p, gc_state.phase, GC_NONE);
303 pthread_mutex_unlock(&all_threads_lock);
306 static inline void gc_done()
308 struct thread *self = arch_os_get_current_thread(), *p;
309 boolean inhibit = (SymbolTlValue(GC_INHIBIT,self)==T);
311 odxprint(safepoints,"%s","global denotification");
312 pthread_mutex_lock(&all_threads_lock);
313 for_each_thread(p) {
314 if (inhibit && (SymbolTlValue(GC_PENDING,p)==T))
315 SetTlSymbolValue(GC_PENDING,NIL,p);
316 set_thread_csp_access(p,1);
318 pthread_mutex_unlock(&all_threads_lock);
321 static inline void gc_handle_phase()
323 odxprint(safepoints,"Entering phase %d",gc_state.phase);
324 switch (gc_state.phase) {
325 case GC_FLIGHT:
326 unmap_gc_page();
327 break;
328 case GC_MESSAGE:
329 gc_notify_early();
330 break;
331 case GC_INVOKED:
332 map_gc_page();
333 break;
334 case GC_SETTLED:
335 gc_notify_final();
336 unmap_gc_page();
337 break;
338 case GC_COLLECT:
339 map_gc_page();
340 break;
341 case GC_NONE:
342 gc_done();
343 break;
344 default:
345 break;
350 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
351 * `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
352 * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
353 * for it. */
354 static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
355 odxprint(safepoints,"GC advance request %d -> %d in phase %d",old,cur,gc_state.phase);
356 if (cur == old)
357 return;
358 if (cur == gc_state.phase)
359 return;
360 if (old < gc_state.phase)
361 old = GC_NONE;
362 if (old != GC_NONE) {
363 gc_state.phase_wait[old]--;
364 odxprint(safepoints,"%d holders of phase %d without me",gc_state.phase_wait[old],old);
366 if (cur != GC_NONE) {
367 gc_state.phase_wait[cur]++;
368 odxprint(safepoints,"%d holders of phase %d with me",gc_state.phase_wait[cur],cur);
370 /* roll forth as long as there's no waiters */
371 while (gc_state.phase_wait[gc_state.phase]==0
372 && gc_state.phase != cur) {
373 gc_state.phase = gc_phase_next(gc_state.phase);
374 odxprint(safepoints,"no blockers, direct advance to %d",gc_state.phase);
375 gc_handle_phase();
376 pthread_cond_broadcast(&gc_state.phase_cond[gc_state.phase]);
378 odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
379 gc_state_wait(cur);
382 void
383 thread_register_gc_trigger()
385 odxprint(misc, "/thread_register_gc_trigger");
386 struct thread *self = arch_os_get_current_thread();
387 gc_state_lock();
388 if (gc_state.phase == GC_NONE &&
389 SymbolTlValue(IN_SAFEPOINT,self)!=T &&
390 thread_gc_phase(self)==GC_NONE) {
391 gc_advance(GC_FLIGHT,GC_NONE);
393 gc_state_unlock();
396 static inline int
397 thread_may_gc()
399 /* Thread may gc if all of these are true:
400 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
401 * 2) GC_PENDING != :in-progress (outside of recursion protection)
402 * Note that we are in a safepoint here, which is always outside of PA. */
404 struct thread *self = arch_os_get_current_thread();
405 return (SymbolValue(GC_INHIBIT, self) == NIL
406 && (SymbolTlValue(GC_PENDING, self) == T ||
407 SymbolTlValue(GC_PENDING, self) == NIL));
410 #ifdef LISP_FEATURE_SB_THRUPTION
411 static inline int
412 thread_may_thrupt(os_context_t *ctx)
414 struct thread * self = arch_os_get_current_thread();
415 /* Thread may be interrupted if all of these are true:
416 * 1) Deferrables are unblocked in the context of the signal that
417 * went into the safepoint. -- Otherwise the surrounding code
418 * didn't want to be interrupted by a signal, so presumably it didn't
419 * want to be INTERRUPT-THREADed either.
420 * (See interrupt_handle_pending for an exception.)
421 * 2) On POSIX: There is no pending signal. This is important even
422 * after checking the sigmask, since we could be in the
423 * handle_pending trap following re-enabling of interrupts.
424 * Signals are unblocked in that case, but the signal is still
425 * pending; we want to run GC before handling the signal and
426 * therefore entered this safepoint. But the thruption would call
427 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
428 * trap, leading to recursion.
429 * 3) INTERRUPTS_ENABLED is non-nil.
430 * 4) No GC pending; it takes precedence.
431 * Note that we are in a safepoint here, which is always outside of PA. */
433 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
434 return 0;
436 if (SymbolValue(GC_PENDING, self) != NIL)
437 return 0;
439 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
440 return 0;
442 #ifdef LISP_FEATURE_WIN32
443 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
444 return 0;
445 #else
446 /* ctx is NULL if the caller wants to ignore the sigmask. */
447 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
448 return 0;
449 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
450 return 0;
451 #endif
453 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
454 /* This special case prevents TERMINATE-THREAD from hitting
455 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
456 * deferrables are already unblocked there. Further
457 * investigation may be in order. */
458 return 0;
460 return 1;
463 // returns 0 if skipped, 1 otherwise
465 check_pending_thruptions(os_context_t *ctx)
467 struct thread *p = arch_os_get_current_thread();
469 #ifdef LISP_FEATURE_WIN32
470 pthread_t pself = p->os_thread;
471 sigset_t oldset;
472 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
473 * in the self-kill case; instead we do it here while also clearing the
474 * "signal". */
475 if (pself->pending_signal_set)
476 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
477 SetSymbolValue(THRUPTION_PENDING, T, p);
478 #endif
480 if (!thread_may_thrupt(ctx))
481 return 0;
482 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
483 return 0;
484 SetSymbolValue(THRUPTION_PENDING, NIL, p);
486 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
487 int was_in_lisp = !foreign_function_call_active_p(p);
488 if (was_in_lisp) {
489 if (!ctx)
490 lose("self-kill bug");
491 fake_foreign_function_call(ctx);
493 #endif
495 #ifdef LISP_FEATURE_WIN32
496 oldset = pself->blocked_signal_set;
497 pself->blocked_signal_set = deferrable_sigset;
498 if (ctx) fake_foreign_function_call(ctx);
499 #else
500 sigset_t oldset;
501 block_deferrable_signals(0, &oldset);
502 #endif
504 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
506 #ifdef LISP_FEATURE_WIN32
507 if (ctx) undo_fake_foreign_function_call(ctx);
508 pself->blocked_signal_set = oldset;
509 if (ctx) ctx->sigmask = oldset;
510 #else
511 pthread_sigmask(SIG_SETMASK, &oldset, 0);
512 #endif
514 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
515 if (was_in_lisp)
516 undo_fake_foreign_function_call(ctx);
517 #endif
519 return 1;
521 #endif
524 on_stack_p(struct thread *th, void *esp)
526 return (void *)th->control_stack_start
527 <= esp && esp
528 < (void *)th->control_stack_end;
531 #ifndef LISP_FEATURE_WIN32
532 /* (Technically, we still allocate an altstack even on Windows. Since
533 * Windows has a contiguous stack with an automatic guard page of
534 * user-configurable size instead of an alternative stack though, the
535 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
537 on_altstack_p(struct thread *th, void *esp)
539 void *start = (void *)th+dynamic_values_bytes;
540 void *end = (char *)start + 32*SIGSTKSZ;
541 return start <= esp && esp < end;
543 #endif
545 void
546 assert_on_stack(struct thread *th, void *esp)
548 if (on_stack_p(th, esp))
549 return;
550 #ifndef LISP_FEATURE_WIN32
551 if (on_altstack_p(th, esp))
552 lose("thread %p: esp on altstack: %p", th, esp);
553 #endif
554 lose("thread %p: bogus esp: %p", th, esp);
557 // returns 0 if skipped, 1 otherwise
559 check_pending_gc(os_context_t *ctx)
561 odxprint(misc, "check_pending_gc");
562 struct thread * self = arch_os_get_current_thread();
563 int done = 0;
564 sigset_t sigset;
566 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
567 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
568 (SymbolValue(GC_PENDING,self) == NIL))) {
569 SetSymbolValue(IN_SAFEPOINT,NIL,self);
571 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
572 if ((SymbolTlValue(GC_PENDING, self) == T)) {
573 lispobj gc_happened = NIL;
575 bind_variable(IN_SAFEPOINT,T,self);
576 block_deferrable_signals(NULL,&sigset);
577 if(SymbolTlValue(GC_PENDING,self)==T)
578 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
579 unbind(self);
580 thread_sigmask(SIG_SETMASK,&sigset,NULL);
581 if (gc_happened == T) {
582 /* POST_GC wants to enable interrupts */
583 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
584 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
585 odxprint(misc, "going to call POST_GC");
586 funcall0(StaticSymbolFunction(POST_GC));
588 done = 1;
592 return done;
596 void thread_in_lisp_raised(os_context_t *ctxptr)
598 struct thread *self = arch_os_get_current_thread();
599 gc_phase_t phase;
600 odxprint(safepoints,"%s","thread_in_lisp_raised");
601 gc_state_lock();
603 if (gc_state.phase == GC_FLIGHT &&
604 SymbolTlValue(GC_PENDING,self)==T &&
605 thread_gc_phase(self)==GC_NONE &&
606 thread_may_gc() && SymbolTlValue(IN_SAFEPOINT,self)!=T) {
607 set_csp_from_context(self, ctxptr);
608 gc_advance(GC_QUIET,GC_FLIGHT);
609 set_thread_csp_access(self,1);
610 if (gc_state.collector) {
611 gc_advance(GC_NONE,GC_QUIET);
612 } else {
613 *self->csp_around_foreign_call = 0;
614 SetTlSymbolValue(GC_PENDING,T,self);
616 gc_state_unlock();
617 check_pending_gc(ctxptr);
618 #ifdef LISP_FEATURE_SB_THRUPTION
619 while(check_pending_thruptions(ctxptr));
620 #endif
621 return;
623 if (gc_state.phase == GC_FLIGHT) {
624 gc_state_wait(GC_MESSAGE);
626 phase = thread_gc_phase(self);
627 if (phase == GC_NONE) {
628 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
629 set_thread_csp_access(self,1);
630 set_csp_from_context(self, ctxptr);
631 if (gc_state.phase <= GC_SETTLED)
632 gc_advance(phase,gc_state.phase);
633 else
634 gc_state_wait(phase);
635 *self->csp_around_foreign_call = 0;
636 gc_state_unlock();
637 check_pending_gc(ctxptr);
638 #ifdef LISP_FEATURE_SB_THRUPTION
639 while(check_pending_thruptions(ctxptr));
640 #endif
641 } else {
642 gc_advance(phase,gc_state.phase);
643 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
644 gc_state_unlock();
648 void thread_in_safety_transition(os_context_t *ctxptr)
650 struct thread *self = arch_os_get_current_thread();
652 odxprint(safepoints,"%s","GC safety transition");
653 gc_state_lock();
654 if (set_thread_csp_access(self,1)) {
655 gc_state_wait(thread_gc_phase(self));
656 gc_state_unlock();
657 #ifdef LISP_FEATURE_SB_THRUPTION
658 while(check_pending_thruptions(ctxptr));
659 #endif
660 } else {
661 gc_phase_t phase = thread_gc_phase(self);
662 if (phase == GC_NONE) {
663 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
664 set_csp_from_context(self, ctxptr);
665 if (gc_state.phase <= GC_SETTLED)
666 gc_advance(phase,gc_state.phase);
667 else
668 gc_state_wait(phase);
669 *self->csp_around_foreign_call = 0;
670 } else {
671 gc_advance(phase,gc_state.phase);
672 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
674 gc_state_unlock();
678 void thread_interrupted(os_context_t *ctxptr)
680 struct thread *self = arch_os_get_current_thread();
682 odxprint(safepoints,"%s","pending interrupt trap");
683 gc_state_lock();
684 if (gc_state.phase != GC_NONE) {
685 if (set_thread_csp_access(self,1)) {
686 gc_state_unlock();
687 thread_in_safety_transition(ctxptr);
688 } else {
689 gc_state_unlock();
690 thread_in_lisp_raised(ctxptr);
692 } else {
693 gc_state_unlock();
695 check_pending_gc(ctxptr);
696 #ifdef LISP_FEATURE_SB_THRUPTION
697 while(check_pending_thruptions(ctxptr));
698 #endif
701 void
702 gc_stop_the_world()
704 struct thread* self = arch_os_get_current_thread();
705 odxprint(safepoints, "stop the world");
706 gc_state_lock();
707 gc_state.collector = self;
708 gc_state.phase_wait[GC_QUIET]++;
710 switch(gc_state.phase) {
711 case GC_NONE:
712 gc_advance(GC_QUIET,gc_state.phase);
713 case GC_FLIGHT:
714 case GC_MESSAGE:
715 case GC_INVOKED:
716 gc_state_wait(GC_QUIET);
717 case GC_QUIET:
718 gc_state.phase_wait[GC_QUIET]=1;
719 gc_advance(GC_COLLECT,GC_QUIET);
720 break;
721 case GC_COLLECT:
722 break;
723 default:
724 lose("Stopping the world in unexpected state %d",gc_state.phase);
725 break;
727 set_thread_csp_access(self,1);
728 gc_state_unlock();
729 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
733 void gc_start_the_world()
735 odxprint(safepoints,"%s","start the world");
736 gc_state_lock();
737 gc_state.collector = NULL;
738 SetSymbolValue(IN_WITHOUT_GCING,IN_WITHOUT_GCING,
739 arch_os_get_current_thread());
740 gc_advance(GC_NONE,GC_COLLECT);
741 gc_state_unlock();
745 #ifdef LISP_FEATURE_SB_THRUPTION
746 /* wake_thread(thread) -- ensure a thruption delivery to
747 * `thread'. */
749 # ifdef LISP_FEATURE_WIN32
751 void
752 wake_thread_io(struct thread * thread)
754 SetEvent(thread->private_events.events[1]);
755 win32_maybe_interrupt_io(thread);
758 void
759 wake_thread_win32(struct thread *thread)
761 struct thread *self = arch_os_get_current_thread();
763 wake_thread_io(thread);
765 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
766 return;
768 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
770 if ((SymbolTlValue(GC_PENDING,thread)==T)||
771 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
772 return;
774 wake_thread_io(thread);
775 pthread_mutex_unlock(&all_threads_lock);
777 gc_state_lock();
778 if (gc_state.phase == GC_NONE) {
779 gc_advance(GC_INVOKED,GC_NONE);
780 gc_advance(GC_NONE,GC_INVOKED);
782 gc_state_unlock();
784 pthread_mutex_lock(&all_threads_lock);
785 return;
787 # else
789 wake_thread_posix(os_thread_t os_thread)
791 int found = 0;
792 struct thread *thread;
793 struct thread *self = arch_os_get_current_thread();
795 /* Must not and need not attempt to signal ourselves while we're the
796 * STW initiator. */
797 if (self->os_thread == os_thread) {
798 SetTlSymbolValue(THRUPTION_PENDING,T,self);
799 WITH_GC_AT_SAFEPOINTS_ONLY()
800 while (check_pending_thruptions(0 /* ignore the sigmask */))
802 return 0;
805 /* We are not in a signal handler here, so need to block signals
806 * manually. */
807 sigset_t oldset;
808 block_deferrable_signals(0, &oldset);
810 gc_state_lock();
811 if (gc_state.phase == GC_NONE) {
812 odxprint(safepoints, "wake_thread_posix: invoking");
813 gc_advance(GC_INVOKED,GC_NONE);
815 /* only if in foreign code, notify using signal */
816 pthread_mutex_lock(&all_threads_lock);
817 for_each_thread (thread)
818 if (thread->os_thread == os_thread) {
819 /* it's still alive... */
820 found = 1;
822 odxprint(safepoints, "wake_thread_posix: found");
823 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
824 if (SymbolTlValue(GC_PENDING,thread) == T
825 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
826 break;
828 if (os_get_csp(thread)) {
829 odxprint(safepoints, "wake_thread_posix: kill");
830 /* ... and in foreign code. Push it into a safety
831 * transition. */
832 int status = pthread_kill(os_thread, SIGPIPE);
833 if (status)
834 lose("wake_thread_posix: pthread_kill failed with %d\n",
835 status);
837 break;
839 pthread_mutex_unlock(&all_threads_lock);
841 gc_advance(GC_NONE,GC_INVOKED);
842 } else {
843 odxprint(safepoints, "wake_thread_posix: passive");
844 /* We are not able to wake the thread up actively, but maybe
845 * some other thread will take care of it. Kludge: Unless it is
846 * in foreign code. Let's at least try to get our return value
847 * right. */
848 pthread_mutex_lock(&all_threads_lock);
849 for_each_thread (thread)
850 if (thread->os_thread == os_thread) {
851 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
852 found = 1;
853 break;
855 pthread_mutex_unlock(&all_threads_lock);
857 gc_state_unlock();
859 odxprint(safepoints, "wake_thread_posix leaving, found=%d", found);
860 pthread_sigmask(SIG_SETMASK, &oldset, 0);
861 return found ? 0 : -1;
863 #endif /* !LISP_FEATURE_WIN32 */
864 #endif /* LISP_FEATURE_SB_THRUPTION */
866 void**
867 os_get_csp(struct thread* th)
869 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
871 th->csp_around_foreign_call,
872 *(void***)th->csp_around_foreign_call,
873 th->control_stack_start,
874 th->control_stack_end));
875 return *(void***)th->csp_around_foreign_call;
879 #ifndef LISP_FEATURE_WIN32
881 # ifdef LISP_FEATURE_SB_THRUPTION
882 void
883 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
885 struct thread *self = arch_os_get_current_thread();
887 void *transition_sp = os_get_csp(self);
888 if (!transition_sp)
889 /* In Lisp code. Do not run thruptions asynchronously. The
890 * next safepoint will take care of it. */
891 return;
893 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
894 if (!foreign_function_call_active_p(self))
895 lose("csp && !ffca");
896 #endif
898 /* In C code. As a rule, we assume that running thruptions is OK. */
899 *self->csp_around_foreign_call = 0;
900 thread_in_lisp_raised(ctx);
901 *self->csp_around_foreign_call = transition_sp;
903 # endif
905 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
907 /* Designed to be of the same type as call_into_lisp. Ignores its
908 * arguments. */
909 lispobj
910 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
912 #if trap_GlobalSafepoint != 0x1a
913 # error trap_GlobalSafepoint mismatch
914 #endif
915 asm("int3; .byte 0x1a;");
916 return 0;
919 lispobj
920 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
922 #if trap_CspSafepoint != 0x1b
923 # error trap_CspSafepoint mismatch
924 #endif
925 asm("int3; .byte 0x1b;");
926 return 0;
929 #endif /* C_STACK_IS_CONTROL_STACK */
932 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
934 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
935 fault_address,
936 GC_SAFEPOINT_PAGE_ADDR,
937 arch_os_get_current_thread()->csp_around_foreign_call));
939 struct thread *self = arch_os_get_current_thread();
941 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
942 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
943 /* We're on the altstack and don't want to run Lisp code. */
944 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
945 #else
946 if (foreign_function_call_active_p(self)) lose("GSP trap in C?");
947 fake_foreign_function_call(ctx);
948 thread_in_lisp_raised(ctx);
949 undo_fake_foreign_function_call(ctx);
950 #endif
951 return 1;
954 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
955 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
956 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
957 #else
958 if (!foreign_function_call_active_p(self)) lose("CSP trap in Lisp?");
959 thread_in_safety_transition(ctx);
960 #endif
961 return 1;
964 /* not a safepoint */
965 return 0;
967 #endif /* LISP_FEATURE_WIN32 */
969 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
970 void
971 signal_handler_callback(lispobj run_handler, int signo, void *info, void *ctx)
973 init_thread_data scribble;
974 void *args[2];
975 DX_ALLOC_SAP(args_sap, args);
977 args[0] = info;
978 args[1] = ctx;
980 attach_os_thread(&scribble);
982 odxprint(misc, "callback from signal handler thread for: %d\n", signo);
983 funcall3(StaticSymbolFunction(SIGNAL_HANDLER_CALLBACK),
984 run_handler, make_fixnum(signo), args_sap);
986 detach_os_thread(&scribble);
987 return;
989 #endif
991 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */