2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 os_validate(GC_SAFEPOINT_PAGE_ADDR
, 4);
62 odxprint(misc
, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR
,
65 OS_VM_PROT_READ
| OS_VM_PROT_WRITE
);
71 odxprint(misc
, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR
, 4, OS_VM_PROT_NONE
);
74 #endif /* !LISP_FEATURE_WIN32 */
76 /* Planned state progressions:
80 * unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
84 * happens when a master thread enters its trap.
86 * The only blocker for flight mode is the master thread itself
87 * (GC_FLIGHT can't be left until the master thread traps).
91 * happens after each (other) thread is notified, i.e. it will
92 * eventually stop (already stopped). map_gc_page().
94 * Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
98 * happens when every gc-inhibitor comes to completion (that's
99 * normally pending interrupt trap).
101 * NB gc_stop_the_world, if it happens in non-master thread, "takes
102 * over" as a master, also deregistering itself as a blocker
103 * (i.e. it's ready to leave GC_INVOKED, but now it objects to
104 * leaving GC_COLLECT; this "usurpation" doesn't require any change
105 * to GC_COLLECT counter: for the counter, it's immaterial _which_
106 * thread is waiting).
110 * happens at gc_start_the_world (that should always happen in the
113 * Any thread waiting until GC end now continues.
116 /* Flag: conditions are initialized */
119 /* Per-process lock for gc_state */
120 pthread_mutex_t lock
;
122 /* Conditions: one per phase */
123 pthread_cond_t phase_cond
[GC_NPHASES
];
125 /* For each [current or future] phase, a number of threads not yet ready to
127 int phase_wait
[GC_NPHASES
];
129 /* Master thread controlling the topmost stop/gc/start sequence */
130 struct thread
* master
;
131 struct thread
* collector
;
133 /* Current GC phase */
137 static struct gc_state gc_state
= {
138 .lock
= PTHREAD_MUTEX_INITIALIZER
,
145 odxprint(safepoints
,"GC state [%p] to be locked",gc_state
.lock
);
146 int result
= pthread_mutex_lock(&gc_state
.lock
);
148 if (gc_state
.master
) {
149 fprintf(stderr
,"GC state lock glitch [%p] in thread %p phase %d\n",
150 gc_state
.master
,arch_os_get_current_thread(),gc_state
.phase
);
151 odxprint(safepoints
,"GC state lock glitch [%p]",gc_state
.master
);
153 gc_assert(!gc_state
.master
);
154 gc_state
.master
= arch_os_get_current_thread();
155 if (!gc_state
.initialized
) {
157 for (i
=GC_NONE
; i
<GC_NPHASES
; ++i
)
158 pthread_cond_init(&gc_state
.phase_cond
[i
],NULL
);
159 gc_state
.initialized
= 1;
161 odxprint(safepoints
,"GC state [%p] locked in phase %d",gc_state
.lock
, gc_state
.phase
);
167 odxprint(safepoints
,"GC state to be unlocked in phase %d",gc_state
.phase
);
168 gc_assert(arch_os_get_current_thread()==gc_state
.master
);
169 gc_state
.master
= NULL
;
170 int result
= pthread_mutex_unlock(&gc_state
.lock
);
172 odxprint(safepoints
,"%s","GC state unlocked");
176 gc_state_wait(gc_phase_t phase
)
178 struct thread
* self
= arch_os_get_current_thread();
179 odxprint(safepoints
,"Waiting for %d -> %d [%d holders]",
180 gc_state
.phase
,phase
,gc_state
.phase_wait
[gc_state
.phase
]);
181 gc_assert(gc_state
.master
== self
);
182 gc_state
.master
= NULL
;
183 while(gc_state
.phase
!= phase
&& !(phase
== GC_QUIET
&& (gc_state
.phase
> GC_QUIET
)))
184 pthread_cond_wait(&gc_state
.phase_cond
[phase
],&gc_state
.lock
);
185 gc_assert(gc_state
.master
== NULL
);
186 gc_state
.master
= self
;
190 set_csp_from_context(struct thread
*self
, os_context_t
*ctx
)
192 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
193 void **sp
= (void **) *os_context_register_addr(ctx
, reg_SP
);
194 /* On POSIX platforms, it is sufficient to investigate only the part
195 * of the stack that was live before the interrupt, because in
196 * addition, we consider interrupt contexts explicitly. On Windows,
197 * however, we do not keep an explicit stack of exception contexts,
198 * and instead arrange for the conservative stack scan to also cover
199 * the context implicitly. The obvious way to do that is to start
200 * at the context itself: */
201 #ifdef LISP_FEATURE_WIN32
202 gc_assert((void **) ctx
< sp
);
205 gc_assert((void **)self
->control_stack_start
207 < (void **)self
->control_stack_end
);
209 /* Note that the exact value doesn't matter much here, since
210 * platforms with precise GC use get_csp() only as a boolean -- the
211 * precise GC already keeps track of the stack pointer itself. */
212 void **sp
= (void **) 0xEEEEEEEE;
214 *self
->csp_around_foreign_call
= (lispobj
) sp
;
218 static inline gc_phase_t
gc_phase_next(gc_phase_t old
) {
219 return (old
+1) % GC_NPHASES
;
222 static inline gc_phase_t
thread_gc_phase(struct thread
* p
)
224 boolean inhibit
= (SymbolTlValue(GC_INHIBIT
,p
)==T
)||
225 (SymbolTlValue(IN_WITHOUT_GCING
,p
)==IN_WITHOUT_GCING
);
228 (SymbolTlValue(GC_PENDING
,p
)!=T
&& SymbolTlValue(GC_PENDING
,p
)!=NIL
);
231 inprogress
? (gc_state
.collector
&& (gc_state
.collector
!= p
)
232 ? GC_NONE
: GC_QUIET
)
233 : (inhibit
? GC_INVOKED
: GC_NONE
);
236 static inline void thread_gc_promote(struct thread
* p
, gc_phase_t cur
, gc_phase_t old
) {
238 gc_state
.phase_wait
[old
]--;
239 if (cur
!= GC_NONE
) {
240 gc_state
.phase_wait
[cur
]++;
243 SetTlSymbolValue(STOP_FOR_GC_PENDING
,T
,p
);
246 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
247 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
248 if `writable' is true.
250 Return true if there is a non-null value in the flag.
252 When a thread enters C code or leaves it, a per-thread location is
253 modified. That machine word serves as a not-in-Lisp flag; for
254 convenience, when in C, it's filled with a topmost stack location
255 that may contain Lisp data. When thread is in Lisp, the word
258 GENCGC uses each thread's flag value for conservative garbage collection.
260 There is a full VM page reserved for this word; page permissions
261 are switched to read-only for race-free examine + wait + use
263 static inline boolean
264 set_thread_csp_access(struct thread
* p
, boolean writable
)
266 os_protect((os_vm_address_t
) p
->csp_around_foreign_call
,
267 THREAD_CSP_PAGE_SIZE
,
268 writable
? (OS_VM_PROT_READ
|OS_VM_PROT_WRITE
)
269 : (OS_VM_PROT_READ
));
270 return !!*p
->csp_around_foreign_call
;
273 static inline void gc_notify_early()
275 struct thread
*self
= arch_os_get_current_thread(), *p
;
276 odxprint(safepoints
,"%s","global notification");
277 pthread_mutex_lock(&all_threads_lock
);
281 odxprint(safepoints
,"notifying thread %p csp %p",p
,*p
->csp_around_foreign_call
);
282 if (!set_thread_csp_access(p
,0)) {
283 thread_gc_promote(p
, gc_state
.phase
, GC_NONE
);
285 thread_gc_promote(p
, thread_gc_phase(p
), GC_NONE
);
288 pthread_mutex_unlock(&all_threads_lock
);
291 static inline void gc_notify_final()
294 odxprint(safepoints
,"%s","global notification");
295 gc_state
.phase_wait
[gc_state
.phase
]=0;
296 pthread_mutex_lock(&all_threads_lock
);
298 if (p
== gc_state
.collector
)
300 odxprint(safepoints
,"notifying thread %p csp %p",p
,*p
->csp_around_foreign_call
);
301 if (!set_thread_csp_access(p
,0)) {
302 thread_gc_promote(p
, gc_state
.phase
, GC_NONE
);
305 pthread_mutex_unlock(&all_threads_lock
);
308 static inline void gc_done()
310 struct thread
*self
= arch_os_get_current_thread(), *p
;
311 boolean inhibit
= (SymbolTlValue(GC_INHIBIT
,self
)==T
);
313 odxprint(safepoints
,"%s","global denotification");
314 pthread_mutex_lock(&all_threads_lock
);
316 if (inhibit
&& (SymbolTlValue(GC_PENDING
,p
)==T
))
317 SetTlSymbolValue(GC_PENDING
,NIL
,p
);
318 set_thread_csp_access(p
,1);
320 pthread_mutex_unlock(&all_threads_lock
);
323 static inline void gc_handle_phase()
325 odxprint(safepoints
,"Entering phase %d",gc_state
.phase
);
326 switch (gc_state
.phase
) {
352 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
353 * `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
354 * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
356 static inline void gc_advance(gc_phase_t cur
, gc_phase_t old
) {
357 odxprint(safepoints
,"GC advance request %d -> %d in phase %d",old
,cur
,gc_state
.phase
);
360 if (cur
== gc_state
.phase
)
362 if (old
< gc_state
.phase
)
364 if (old
!= GC_NONE
) {
365 gc_state
.phase_wait
[old
]--;
366 odxprint(safepoints
,"%d holders of phase %d without me",gc_state
.phase_wait
[old
],old
);
368 if (cur
!= GC_NONE
) {
369 gc_state
.phase_wait
[cur
]++;
370 odxprint(safepoints
,"%d holders of phase %d with me",gc_state
.phase_wait
[cur
],cur
);
372 /* roll forth as long as there's no waiters */
373 while (gc_state
.phase_wait
[gc_state
.phase
]==0
374 && gc_state
.phase
!= cur
) {
375 gc_state
.phase
= gc_phase_next(gc_state
.phase
);
376 odxprint(safepoints
,"no blockers, direct advance to %d",gc_state
.phase
);
378 pthread_cond_broadcast(&gc_state
.phase_cond
[gc_state
.phase
]);
380 odxprint(safepoints
,"going to wait for %d threads",gc_state
.phase_wait
[gc_state
.phase
]);
385 thread_register_gc_trigger()
387 odxprint(misc
, "/thread_register_gc_trigger");
388 struct thread
*self
= arch_os_get_current_thread();
390 if (gc_state
.phase
== GC_NONE
&&
391 SymbolTlValue(IN_SAFEPOINT
,self
)!=T
&&
392 thread_gc_phase(self
)==GC_NONE
) {
393 gc_advance(GC_FLIGHT
,GC_NONE
);
401 /* Thread may gc if all of these are true:
402 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
403 * 2) GC_PENDING != :in-progress (outside of recursion protection)
404 * Note that we are in a safepoint here, which is always outside of PA. */
406 struct thread
*self
= arch_os_get_current_thread();
407 return (SymbolValue(GC_INHIBIT
, self
) == NIL
408 && (SymbolTlValue(GC_PENDING
, self
) == T
||
409 SymbolTlValue(GC_PENDING
, self
) == NIL
));
412 #ifdef LISP_FEATURE_SB_THRUPTION
414 thread_may_thrupt(os_context_t
*ctx
)
416 struct thread
* self
= arch_os_get_current_thread();
417 /* Thread may be interrupted if all of these are true:
418 * 1) Deferrables are unblocked in the context of the signal that
419 * went into the safepoint. -- Otherwise the surrounding code
420 * didn't want to be interrupted by a signal, so presumably it didn't
421 * want to be INTERRUPT-THREADed either.
422 * (See interrupt_handle_pending for an exception.)
423 * 2) On POSIX: There is no pending signal. This is important even
424 * after checking the sigmask, since we could be in the
425 * handle_pending trap following re-enabling of interrupts.
426 * Signals are unblocked in that case, but the signal is still
427 * pending; we want to run GC before handling the signal and
428 * therefore entered this safepoint. But the thruption would call
429 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
430 * trap, leading to recursion.
431 * 3) INTERRUPTS_ENABLED is non-nil.
432 * 4) No GC pending; it takes precedence.
433 * Note that we are in a safepoint here, which is always outside of PA. */
435 if (SymbolValue(INTERRUPTS_ENABLED
, self
) == NIL
)
438 if (SymbolValue(GC_PENDING
, self
) != NIL
)
441 if (SymbolValue(STOP_FOR_GC_PENDING
, self
) != NIL
)
444 #ifdef LISP_FEATURE_WIN32
445 if (deferrables_blocked_p(&self
->os_thread
->blocked_signal_set
))
448 /* ctx is NULL if the caller wants to ignore the sigmask. */
449 if (ctx
&& deferrables_blocked_p(os_context_sigmask_addr(ctx
)))
451 if (SymbolValue(INTERRUPT_PENDING
, self
) != NIL
)
455 if (SymbolValue(RESTART_CLUSTERS
, self
) == NIL
)
456 /* This special case prevents TERMINATE-THREAD from hitting
457 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
458 * deferrables are already unblocked there. Further
459 * investigation may be in order. */
465 // returns 0 if skipped, 1 otherwise
467 check_pending_thruptions(os_context_t
*ctx
)
469 struct thread
*p
= arch_os_get_current_thread();
471 #ifdef LISP_FEATURE_WIN32
472 pthread_t pself
= p
->os_thread
;
474 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
475 * in the self-kill case; instead we do it here while also clearing the
477 if (pself
->pending_signal_set
)
478 if (__sync_fetch_and_and(&pself
->pending_signal_set
,0))
479 SetSymbolValue(THRUPTION_PENDING
, T
, p
);
482 if (!thread_may_thrupt(ctx
))
484 if (SymbolValue(THRUPTION_PENDING
, p
) == NIL
)
486 SetSymbolValue(THRUPTION_PENDING
, NIL
, p
);
488 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
489 int was_in_lisp
= !foreign_function_call_active_p(p
);
492 lose("self-kill bug");
493 fake_foreign_function_call(ctx
);
497 #ifdef LISP_FEATURE_WIN32
498 oldset
= pself
->blocked_signal_set
;
499 pself
->blocked_signal_set
= deferrable_sigset
;
500 if (ctx
) fake_foreign_function_call(ctx
);
503 block_deferrable_signals(&oldset
);
506 funcall0(StaticSymbolFunction(RUN_INTERRUPTION
));
508 #ifdef LISP_FEATURE_WIN32
509 if (ctx
) undo_fake_foreign_function_call(ctx
);
510 pself
->blocked_signal_set
= oldset
;
511 if (ctx
) ctx
->sigmask
= oldset
;
513 thread_sigmask(SIG_SETMASK
, &oldset
, 0);
516 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
518 undo_fake_foreign_function_call(ctx
);
526 on_stack_p(struct thread
*th
, void *esp
)
528 return (void *)th
->control_stack_start
530 < (void *)th
->control_stack_end
;
533 #ifndef LISP_FEATURE_WIN32
534 /* (Technically, we still allocate an altstack even on Windows. Since
535 * Windows has a contiguous stack with an automatic guard page of
536 * user-configurable size instead of an alternative stack though, the
537 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
539 on_altstack_p(struct thread
*th
, void *esp
)
541 void *start
= (void *)th
+dynamic_values_bytes
;
542 void *end
= (char *)start
+ 32*SIGSTKSZ
;
543 return start
<= esp
&& esp
< end
;
548 assert_on_stack(struct thread
*th
, void *esp
)
550 if (on_stack_p(th
, esp
))
552 #ifndef LISP_FEATURE_WIN32
553 if (on_altstack_p(th
, esp
))
554 lose("thread %p: esp on altstack: %p", th
, esp
);
556 lose("thread %p: bogus esp: %p", th
, esp
);
559 // returns 0 if skipped, 1 otherwise
561 check_pending_gc(os_context_t
*ctx
)
563 odxprint(misc
, "check_pending_gc");
564 struct thread
* self
= arch_os_get_current_thread();
568 if ((SymbolValue(IN_SAFEPOINT
,self
) == T
) &&
569 ((SymbolValue(GC_INHIBIT
,self
) == NIL
) &&
570 (SymbolValue(GC_PENDING
,self
) == NIL
))) {
571 SetSymbolValue(IN_SAFEPOINT
,NIL
,self
);
573 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT
, self
) == NIL
)) {
574 if ((SymbolTlValue(GC_PENDING
, self
) == T
)) {
575 lispobj gc_happened
= NIL
;
577 bind_variable(IN_SAFEPOINT
,T
,self
);
578 block_deferrable_signals(&sigset
);
579 if(SymbolTlValue(GC_PENDING
,self
)==T
)
580 gc_happened
= funcall0(StaticSymbolFunction(SUB_GC
));
582 thread_sigmask(SIG_SETMASK
,&sigset
,NULL
);
583 if (gc_happened
== T
) {
584 /* POST_GC wants to enable interrupts */
585 if (SymbolValue(INTERRUPTS_ENABLED
,self
) == T
||
586 SymbolValue(ALLOW_WITH_INTERRUPTS
,self
) == T
) {
587 odxprint(misc
, "going to call POST_GC");
588 funcall0(StaticSymbolFunction(POST_GC
));
598 void thread_in_lisp_raised(os_context_t
*ctxptr
)
600 struct thread
*self
= arch_os_get_current_thread();
602 odxprint(safepoints
,"%s","thread_in_lisp_raised");
605 if (gc_state
.phase
== GC_FLIGHT
&&
606 SymbolTlValue(GC_PENDING
,self
)==T
&&
607 thread_gc_phase(self
)==GC_NONE
&&
608 thread_may_gc() && SymbolTlValue(IN_SAFEPOINT
,self
)!=T
) {
609 set_csp_from_context(self
, ctxptr
);
610 gc_advance(GC_QUIET
,GC_FLIGHT
);
611 set_thread_csp_access(self
,1);
612 if (gc_state
.collector
) {
613 gc_advance(GC_NONE
,GC_QUIET
);
615 *self
->csp_around_foreign_call
= 0;
616 SetTlSymbolValue(GC_PENDING
,T
,self
);
619 check_pending_gc(ctxptr
);
620 #ifdef LISP_FEATURE_SB_THRUPTION
621 while(check_pending_thruptions(ctxptr
));
625 if (gc_state
.phase
== GC_FLIGHT
) {
626 gc_state_wait(GC_MESSAGE
);
628 phase
= thread_gc_phase(self
);
629 if (phase
== GC_NONE
) {
630 SetTlSymbolValue(STOP_FOR_GC_PENDING
,NIL
,self
);
631 set_thread_csp_access(self
,1);
632 set_csp_from_context(self
, ctxptr
);
633 if (gc_state
.phase
<= GC_SETTLED
)
634 gc_advance(phase
,gc_state
.phase
);
636 gc_state_wait(phase
);
637 *self
->csp_around_foreign_call
= 0;
639 check_pending_gc(ctxptr
);
640 #ifdef LISP_FEATURE_SB_THRUPTION
641 while(check_pending_thruptions(ctxptr
));
644 gc_advance(phase
,gc_state
.phase
);
645 SetTlSymbolValue(STOP_FOR_GC_PENDING
,T
,self
);
650 void thread_in_safety_transition(os_context_t
*ctxptr
)
652 struct thread
*self
= arch_os_get_current_thread();
654 odxprint(safepoints
,"%s","GC safety transition");
656 if (set_thread_csp_access(self
,1)) {
657 gc_state_wait(thread_gc_phase(self
));
659 #ifdef LISP_FEATURE_SB_THRUPTION
660 while(check_pending_thruptions(ctxptr
));
663 gc_phase_t phase
= thread_gc_phase(self
);
664 if (phase
== GC_NONE
) {
665 SetTlSymbolValue(STOP_FOR_GC_PENDING
,NIL
,self
);
666 set_csp_from_context(self
, ctxptr
);
667 if (gc_state
.phase
<= GC_SETTLED
)
668 gc_advance(phase
,gc_state
.phase
);
670 gc_state_wait(phase
);
671 *self
->csp_around_foreign_call
= 0;
673 gc_advance(phase
,gc_state
.phase
);
674 SetTlSymbolValue(STOP_FOR_GC_PENDING
,T
,self
);
680 void thread_interrupted(os_context_t
*ctxptr
)
682 struct thread
*self
= arch_os_get_current_thread();
684 odxprint(safepoints
,"%s","pending interrupt trap");
686 if (gc_state
.phase
!= GC_NONE
) {
687 if (set_thread_csp_access(self
,1)) {
689 thread_in_safety_transition(ctxptr
);
692 thread_in_lisp_raised(ctxptr
);
697 check_pending_gc(ctxptr
);
698 #ifdef LISP_FEATURE_SB_THRUPTION
699 while(check_pending_thruptions(ctxptr
));
706 struct thread
* self
= arch_os_get_current_thread();
707 odxprint(safepoints
, "stop the world");
709 gc_state
.collector
= self
;
710 gc_state
.phase_wait
[GC_QUIET
]++;
712 switch(gc_state
.phase
) {
714 gc_advance(GC_QUIET
,gc_state
.phase
);
718 gc_state_wait(GC_QUIET
);
720 gc_state
.phase_wait
[GC_QUIET
]=1;
721 gc_advance(GC_COLLECT
,GC_QUIET
);
726 lose("Stopping the world in unexpected state %d",gc_state
.phase
);
729 set_thread_csp_access(self
,1);
731 SetTlSymbolValue(STOP_FOR_GC_PENDING
,NIL
,self
);
735 void gc_start_the_world()
737 odxprint(safepoints
,"%s","start the world");
739 gc_state
.collector
= NULL
;
740 SetSymbolValue(IN_WITHOUT_GCING
,IN_WITHOUT_GCING
,
741 arch_os_get_current_thread());
742 gc_advance(GC_NONE
,GC_COLLECT
);
747 #ifdef LISP_FEATURE_SB_THRUPTION
748 /* wake_thread(thread) -- ensure a thruption delivery to
751 # ifdef LISP_FEATURE_WIN32
754 wake_thread_io(struct thread
* thread
)
756 SetEvent(thread
->private_events
.events
[1]);
757 win32_maybe_interrupt_io(thread
);
761 wake_thread_win32(struct thread
*thread
)
763 struct thread
*self
= arch_os_get_current_thread();
765 wake_thread_io(thread
);
767 if (SymbolTlValue(THRUPTION_PENDING
,thread
)==T
)
770 SetTlSymbolValue(THRUPTION_PENDING
,T
,thread
);
772 if ((SymbolTlValue(GC_PENDING
,thread
)==T
)||
773 (SymbolTlValue(STOP_FOR_GC_PENDING
,thread
)==T
))
776 wake_thread_io(thread
);
777 pthread_mutex_unlock(&all_threads_lock
);
780 if (gc_state
.phase
== GC_NONE
) {
781 gc_advance(GC_INVOKED
,GC_NONE
);
782 gc_advance(GC_NONE
,GC_INVOKED
);
786 pthread_mutex_lock(&all_threads_lock
);
791 wake_thread_posix(os_thread_t os_thread
)
794 struct thread
*thread
;
795 struct thread
*self
= arch_os_get_current_thread();
797 /* Must not and need not attempt to signal ourselves while we're the
799 if (self
->os_thread
== os_thread
) {
800 SetTlSymbolValue(THRUPTION_PENDING
,T
,self
);
801 WITH_GC_AT_SAFEPOINTS_ONLY()
802 while (check_pending_thruptions(0 /* ignore the sigmask */))
807 /* We are not in a signal handler here, so need to block signals
810 block_deferrable_signals(&oldset
);
813 if (gc_state
.phase
== GC_NONE
) {
814 odxprint(safepoints
, "wake_thread_posix: invoking");
815 gc_advance(GC_INVOKED
,GC_NONE
);
817 /* only if in foreign code, notify using signal */
818 pthread_mutex_lock(&all_threads_lock
);
819 for_each_thread (thread
)
820 if (thread
->os_thread
== os_thread
) {
821 /* it's still alive... */
824 odxprint(safepoints
, "wake_thread_posix: found");
825 SetTlSymbolValue(THRUPTION_PENDING
,T
,thread
);
826 if (SymbolTlValue(GC_PENDING
,thread
) == T
827 || SymbolTlValue(STOP_FOR_GC_PENDING
,thread
) == T
)
830 if (os_get_csp(thread
)) {
831 odxprint(safepoints
, "wake_thread_posix: kill");
832 /* ... and in foreign code. Push it into a safety
834 int status
= pthread_kill(os_thread
, SIGPIPE
);
836 lose("wake_thread_posix: pthread_kill failed with %d\n",
841 pthread_mutex_unlock(&all_threads_lock
);
843 gc_advance(GC_NONE
,GC_INVOKED
);
845 odxprint(safepoints
, "wake_thread_posix: passive");
846 /* We are not able to wake the thread up actively, but maybe
847 * some other thread will take care of it. Kludge: Unless it is
848 * in foreign code. Let's at least try to get our return value
850 pthread_mutex_lock(&all_threads_lock
);
851 for_each_thread (thread
)
852 if (thread
->os_thread
== os_thread
) {
853 SetTlSymbolValue(THRUPTION_PENDING
,T
,thread
);
857 pthread_mutex_unlock(&all_threads_lock
);
861 odxprint(safepoints
, "wake_thread_posix leaving, found=%d", found
);
862 thread_sigmask(SIG_SETMASK
, &oldset
, 0);
863 return found
? 0 : -1;
865 #endif /* !LISP_FEATURE_WIN32 */
866 #endif /* LISP_FEATURE_SB_THRUPTION */
869 os_get_csp(struct thread
* th
)
871 FSHOW_SIGNAL((stderr
, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
873 th
->csp_around_foreign_call
,
874 *(void***)th
->csp_around_foreign_call
,
875 th
->control_stack_start
,
876 th
->control_stack_end
));
877 return *(void***)th
->csp_around_foreign_call
;
881 #ifndef LISP_FEATURE_WIN32
883 # ifdef LISP_FEATURE_SB_THRUPTION
885 thruption_handler(int signal
, siginfo_t
*info
, os_context_t
*ctx
)
887 struct thread
*self
= arch_os_get_current_thread();
889 void *transition_sp
= os_get_csp(self
);
891 /* In Lisp code. Do not run thruptions asynchronously. The
892 * next safepoint will take care of it. */
895 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
896 if (!foreign_function_call_active_p(self
))
897 lose("csp && !ffca");
900 /* In C code. As a rule, we assume that running thruptions is OK. */
901 *self
->csp_around_foreign_call
= 0;
902 thread_in_lisp_raised(ctx
);
903 *self
->csp_around_foreign_call
= (intptr_t) transition_sp
;
907 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
909 #define XSTR(s) STR(s)
911 /* Designed to be of the same type as call_into_lisp. Ignores its
914 handle_global_safepoint_violation(lispobj fun
, lispobj
*args
, int nargs
)
917 asm("int3; .byte " XSTR(trap_GlobalSafepoint
));
922 handle_csp_safepoint_violation(lispobj fun
, lispobj
*args
, int nargs
)
924 asm("int3; .byte " XSTR(trap_CspSafepoint
));
928 #endif /* C_STACK_IS_CONTROL_STACK */
931 handle_safepoint_violation(os_context_t
*ctx
, os_vm_address_t fault_address
)
933 FSHOW_SIGNAL((stderr
, "fault_address = %p, sp = %p, &csp = %p\n",
935 GC_SAFEPOINT_PAGE_ADDR
,
936 arch_os_get_current_thread()->csp_around_foreign_call
));
938 struct thread
*self
= arch_os_get_current_thread();
940 if (fault_address
== (os_vm_address_t
) GC_SAFEPOINT_PAGE_ADDR
) {
941 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
942 /* We're on the altstack and don't want to run Lisp code. */
943 arrange_return_to_c_function(ctx
, handle_global_safepoint_violation
, 0);
945 if (foreign_function_call_active_p(self
)) lose("GSP trap in C?");
946 fake_foreign_function_call(ctx
);
947 thread_in_lisp_raised(ctx
);
948 undo_fake_foreign_function_call(ctx
);
953 if (fault_address
== (os_vm_address_t
) self
->csp_around_foreign_call
) {
954 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
955 arrange_return_to_c_function(ctx
, handle_csp_safepoint_violation
, 0);
957 if (!foreign_function_call_active_p(self
)) lose("CSP trap in Lisp?");
958 thread_in_safety_transition(ctx
);
963 /* not a safepoint */
966 #endif /* LISP_FEATURE_WIN32 */
968 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
970 signal_handler_callback(lispobj run_handler
, int signo
, void *info
, void *ctx
)
972 init_thread_data scribble
;
974 DX_ALLOC_SAP(args_sap
, args
);
979 attach_os_thread(&scribble
);
981 odxprint(misc
, "callback from signal handler thread for: %d\n", signo
);
982 funcall3(StaticSymbolFunction(SIGNAL_HANDLER_CALLBACK
),
983 run_handler
, make_fixnum(signo
), args_sap
);
985 detach_os_thread(&scribble
);
990 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */