2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
72 * This is a workaround for some slightly silly Linux/GNU Libc
73 * behaviour: glibc defines sigset_t to support 1024 signals, which is
74 * more than the kernel. This is usually not a problem, but becomes
75 * one when we want to save a signal mask from a ucontext, and restore
76 * it later into another ucontext: the ucontext is allocated on the
77 * stack by the kernel, so copying a libc-sized sigset_t into it will
78 * overflow and cause other data on the stack to be corrupted */
79 /* FIXME: do not rely on NSIG being a multiple of 8 */
81 #ifdef LISP_FEATURE_WIN32
82 # define REAL_SIGSET_SIZE_BYTES (4)
84 # define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
88 sigcopyset(sigset_t
*new, sigset_t
*old
)
90 memcpy(new, old
, REAL_SIGSET_SIZE_BYTES
);
93 /* When we catch an internal error, should we pass it back to Lisp to
94 * be handled in a high-level way? (Early in cold init, the answer is
95 * 'no', because Lisp is still too brain-dead to handle anything.
96 * After sufficient initialization has been completed, the answer
98 boolean internal_errors_enabled
= 0;
100 #ifndef LISP_FEATURE_WIN32
102 void (*interrupt_low_level_handlers
[NSIG
]) (int, siginfo_t
*, os_context_t
*);
104 union interrupt_handler interrupt_handlers
[NSIG
];
106 /* Under Linux on some architectures, we appear to have to restore the
107 * FPU control word from the context, as after the signal is delivered
108 * we appear to have a null FPU control word. */
109 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
110 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
111 os_context_t *context = arch_os_get_context(&void_context); \
112 os_restore_fp_control(context);
114 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
115 os_context_t *context = arch_os_get_context(&void_context);
118 /* Foreign code may want to start some threads on its own.
119 * Non-targetted, truly asynchronous signals can be delivered to
120 * basically any thread, but invoking Lisp handlers in such foregign
121 * threads is really bad, so let's resignal it.
123 * This should at least bring attention to the problem, but it cannot
124 * work for SIGSEGV and similar. It is good enough for timers, and
125 * maybe all deferrables. */
127 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
129 add_handled_signals(sigset_t
*sigset
)
132 for(i
= 1; i
< NSIG
; i
++) {
133 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers
[i
], SIG_DFL
)) ||
134 !(ARE_SAME_HANDLER(interrupt_handlers
[i
].c
, SIG_DFL
))) {
135 sigaddset(sigset
, i
);
140 void block_signals(sigset_t
*what
, sigset_t
*where
, sigset_t
*old
);
144 maybe_resignal_to_lisp_thread(int signal
, os_context_t
*context
)
146 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
147 if (!pthread_getspecific(lisp_thread
)) {
148 if (!(sigismember(&deferrable_sigset
,signal
))) {
149 corruption_warning_and_maybe_lose
150 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
156 sigemptyset(&sigset
);
157 add_handled_signals(&sigset
);
158 block_signals(&sigset
, 0, 0);
159 block_signals(&sigset
, os_context_sigmask_addr(context
), 0);
160 kill(getpid(), signal
);
168 /* These are to be used in signal handlers. Currently all handlers are
169 * called from one of:
171 * interrupt_handle_now_handler
172 * maybe_now_maybe_later
173 * unblock_me_trampoline
174 * low_level_handle_now_handler
175 * low_level_maybe_now_maybe_later
176 * low_level_unblock_me_trampoline
178 * This gives us a single point of control (or six) over errno, fp
179 * control word, and fixing up signal context on sparc.
181 * The SPARC/Linux platform doesn't quite do signals the way we want
182 * them done. The third argument in the handler isn't filled in by the
183 * kernel properly, so we fix it up ourselves in the
184 * arch_os_get_context(..) function. -- CSR, 2002-07-23
186 #define SAVE_ERRNO(signal,context,void_context) \
188 int _saved_errno = errno; \
189 RESTORE_FP_CONTROL_WORD(context,void_context); \
190 if (!maybe_resignal_to_lisp_thread(signal, context)) \
193 #define RESTORE_ERRNO \
195 errno = _saved_errno; \
198 static void run_deferred_handler(struct interrupt_data
*data
,
199 os_context_t
*context
);
200 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
201 static void store_signal_data_for_later (struct interrupt_data
*data
,
202 void *handler
, int signal
,
204 os_context_t
*context
);
207 /* Generic signal related utilities. */
210 get_current_sigmask(sigset_t
*sigset
)
212 /* Get the current sigmask, by blocking the empty set. */
213 thread_sigmask(SIG_BLOCK
, 0, sigset
);
217 block_signals(sigset_t
*what
, sigset_t
*where
, sigset_t
*old
)
222 sigcopyset(old
, where
);
223 for(i
= 1; i
< NSIG
; i
++) {
224 if (sigismember(what
, i
))
228 thread_sigmask(SIG_BLOCK
, what
, old
);
233 unblock_signals(sigset_t
*what
, sigset_t
*where
, sigset_t
*old
)
238 sigcopyset(old
, where
);
239 for(i
= 1; i
< NSIG
; i
++) {
240 if (sigismember(what
, i
))
244 thread_sigmask(SIG_UNBLOCK
, what
, old
);
249 print_sigset(sigset_t
*sigset
)
252 for(i
= 1; i
< NSIG
; i
++) {
253 if (sigismember(sigset
, i
))
254 fprintf(stderr
, "Signal %d masked\n", i
);
258 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
259 * if all re unmasked else die. Passing NULL for sigset is a shorthand
260 * for the current sigmask. */
262 all_signals_blocked_p(sigset_t
*sigset
, sigset_t
*sigset2
,
265 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
267 boolean has_blocked
= 0, has_unblocked
= 0;
270 get_current_sigmask(¤t
);
273 for(i
= 1; i
< NSIG
; i
++) {
274 if (sigismember(sigset2
, i
)) {
275 if (sigismember(sigset
, i
))
281 if (has_blocked
&& has_unblocked
) {
282 print_sigset(sigset
);
283 lose("some %s signals blocked, some unblocked\n", name
);
293 /* Deferrables, blockables, gc signals. */
296 sigaddset_deferrable(sigset_t
*s
)
298 sigaddset(s
, SIGHUP
);
299 sigaddset(s
, SIGINT
);
300 sigaddset(s
, SIGTERM
);
301 sigaddset(s
, SIGQUIT
);
302 sigaddset(s
, SIGPIPE
);
303 sigaddset(s
, SIGALRM
);
304 sigaddset(s
, SIGURG
);
305 sigaddset(s
, SIGTSTP
);
306 sigaddset(s
, SIGCHLD
);
308 #ifndef LISP_FEATURE_HPUX
309 sigaddset(s
, SIGXCPU
);
310 sigaddset(s
, SIGXFSZ
);
312 sigaddset(s
, SIGVTALRM
);
313 sigaddset(s
, SIGPROF
);
314 sigaddset(s
, SIGWINCH
);
318 sigaddset_blockable(sigset_t
*sigset
)
320 sigaddset_deferrable(sigset
);
321 sigaddset_gc(sigset
);
325 sigaddset_gc(sigset_t
*sigset
)
327 #ifdef THREADS_USING_GCSIGNAL
328 sigaddset(sigset
,SIG_STOP_FOR_GC
);
332 /* initialized in interrupt_init */
333 sigset_t deferrable_sigset
;
334 sigset_t blockable_sigset
;
339 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
341 deferrables_blocked_p(sigset_t
*sigset
)
343 return all_signals_blocked_p(sigset
, &deferrable_sigset
, "deferrable");
348 check_deferrables_unblocked_or_lose(sigset_t
*sigset
)
350 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
351 if (deferrables_blocked_p(sigset
))
352 lose("deferrables blocked\n");
357 check_deferrables_blocked_or_lose(sigset_t
*sigset
)
359 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
360 if (!deferrables_blocked_p(sigset
))
361 lose("deferrables unblocked\n");
365 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
367 blockables_blocked_p(sigset_t
*sigset
)
369 return all_signals_blocked_p(sigset
, &blockable_sigset
, "blockable");
374 check_blockables_unblocked_or_lose(sigset_t
*sigset
)
376 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
377 if (blockables_blocked_p(sigset
))
378 lose("blockables blocked\n");
383 check_blockables_blocked_or_lose(sigset_t
*sigset
)
385 #if !defined(LISP_FEATURE_WIN32)
386 /* On Windows, there are no actual signals, but since the win32 port
387 * tracks the sigmask and checks it explicitly, some functions are
388 * still required to keep the mask set up properly. (After all, the
389 * goal of the sigmask emulation is to not have to change all the
390 * call sites in the first place.)
392 * However, this does not hold for all signals equally: While
393 * deferrables matter ("is interrupt-thread okay?"), it is not worth
394 * having to set up blockables properly (which include the
395 * non-existing GC signals).
397 * Yet, as the original comment explains it:
398 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
399 * fake_foreign_function_call machinery are sometimes useful here[...].
401 * So we merely skip this assertion.
402 * -- DFL, trying to expand on a comment by AK.
404 if (!blockables_blocked_p(sigset
))
405 lose("blockables unblocked\n");
409 #ifndef LISP_FEATURE_SB_SAFEPOINT
410 #if !defined(LISP_FEATURE_WIN32)
412 gc_signals_blocked_p(sigset_t
*sigset
)
414 return all_signals_blocked_p(sigset
, &gc_sigset
, "gc");
419 check_gc_signals_unblocked_or_lose(sigset_t
*sigset
)
421 #if !defined(LISP_FEATURE_WIN32)
422 if (gc_signals_blocked_p(sigset
))
423 lose("gc signals blocked\n");
428 check_gc_signals_blocked_or_lose(sigset_t
*sigset
)
430 #if !defined(LISP_FEATURE_WIN32)
431 if (!gc_signals_blocked_p(sigset
))
432 lose("gc signals unblocked\n");
438 block_deferrable_signals(sigset_t
*where
, sigset_t
*old
)
440 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
441 block_signals(&deferrable_sigset
, where
, old
);
446 block_blockable_signals(sigset_t
*where
, sigset_t
*old
)
448 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
449 block_signals(&blockable_sigset
, where
, old
);
453 #ifndef LISP_FEATURE_SB_SAFEPOINT
455 block_gc_signals(sigset_t
*where
, sigset_t
*old
)
457 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
458 block_signals(&gc_sigset
, where
, old
);
464 unblock_deferrable_signals(sigset_t
*where
, sigset_t
*old
)
466 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
467 if (interrupt_handler_pending_p())
468 lose("unblock_deferrable_signals: losing proposition\n");
469 #ifndef LISP_FEATURE_SB_SAFEPOINT
470 check_gc_signals_unblocked_or_lose(where
);
472 unblock_signals(&deferrable_sigset
, where
, old
);
477 unblock_blockable_signals(sigset_t
*where
, sigset_t
*old
)
479 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
480 unblock_signals(&blockable_sigset
, where
, old
);
484 #ifndef LISP_FEATURE_SB_SAFEPOINT
486 unblock_gc_signals(sigset_t
*where
, sigset_t
*old
)
488 #ifndef LISP_FEATURE_WIN32
489 unblock_signals(&gc_sigset
, where
, old
);
495 unblock_signals_in_context_and_maybe_warn(os_context_t
*context
)
497 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
498 sigset_t
*sigset
= os_context_sigmask_addr(context
);
499 #ifndef LISP_FEATURE_SB_SAFEPOINT
500 if (all_signals_blocked_p(sigset
, &gc_sigset
, "gc")) {
501 corruption_warning_and_maybe_lose(
502 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
503 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
504 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
505 unblock_gc_signals(sigset
, 0);
508 if (!interrupt_handler_pending_p()) {
509 unblock_deferrable_signals(sigset
, 0);
516 check_interrupts_enabled_or_lose(os_context_t
*context
)
518 struct thread
*thread
=arch_os_get_current_thread();
519 if (SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
)
520 lose("interrupts not enabled\n");
521 if (arch_pseudo_atomic_atomic(context
))
522 lose ("in pseudo atomic section\n");
525 /* Save sigset (or the current sigmask if 0) if there is no pending
526 * handler, because that means that deferabbles are already blocked.
527 * The purpose is to avoid losing the pending gc signal if a
528 * deferrable interrupt async unwinds between clearing the pseudo
529 * atomic and trapping to GC.*/
530 #ifndef LISP_FEATURE_SB_SAFEPOINT
532 maybe_save_gc_mask_and_block_deferrables(sigset_t
*sigset
)
534 #ifndef LISP_FEATURE_WIN32
535 struct thread
*thread
= arch_os_get_current_thread();
536 struct interrupt_data
*data
= thread
->interrupt_data
;
538 /* Obviously, this function is called when signals may not be
539 * blocked. Let's make sure we are not interrupted. */
540 block_blockable_signals(0, &oldset
);
541 #ifndef LISP_FEATURE_SB_THREAD
542 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
544 if (data
->gc_blocked_deferrables
)
545 lose("gc_blocked_deferrables already true\n");
547 if ((!data
->pending_handler
) &&
548 (!data
->gc_blocked_deferrables
)) {
549 FSHOW_SIGNAL((stderr
,"/setting gc_blocked_deferrables\n"));
550 data
->gc_blocked_deferrables
= 1;
552 /* This is the sigmask of some context. */
553 sigcopyset(&data
->pending_mask
, sigset
);
554 sigaddset_deferrable(sigset
);
555 thread_sigmask(SIG_SETMASK
,&oldset
,0);
558 /* Operating on the current sigmask. Save oldset and
559 * unblock gc signals. In the end, this is equivalent to
560 * blocking the deferrables. */
561 sigcopyset(&data
->pending_mask
, &oldset
);
562 thread_sigmask(SIG_UNBLOCK
, &gc_sigset
, 0);
566 thread_sigmask(SIG_SETMASK
,&oldset
,0);
571 /* Are we leaving WITH-GCING and already running with interrupts
572 * enabled, without the protection of *GC-INHIBIT* T and there is gc
573 * (or stop for gc) pending, but we haven't trapped yet? */
575 in_leaving_without_gcing_race_p(struct thread
*thread
)
577 return ((SymbolValue(IN_WITHOUT_GCING
,thread
) != NIL
) &&
578 (SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
) &&
579 (SymbolValue(GC_INHIBIT
,thread
) == NIL
) &&
580 ((SymbolValue(GC_PENDING
,thread
) != NIL
)
581 #if defined(LISP_FEATURE_SB_THREAD)
582 || (SymbolValue(STOP_FOR_GC_PENDING
,thread
) != NIL
)
587 /* Check our baroque invariants. */
589 check_interrupt_context_or_lose(os_context_t
*context
)
591 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
592 struct thread
*thread
= arch_os_get_current_thread();
593 struct interrupt_data
*data
= thread
->interrupt_data
;
594 int interrupt_deferred_p
= (data
->pending_handler
!= 0);
595 int interrupt_pending
= (SymbolValue(INTERRUPT_PENDING
,thread
) != NIL
);
596 sigset_t
*sigset
= os_context_sigmask_addr(context
);
597 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
598 * handle_allocation_trap. */
599 #if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
600 int interrupts_enabled
= (SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
);
601 int gc_inhibit
= (SymbolValue(GC_INHIBIT
,thread
) != NIL
);
602 int gc_pending
= (SymbolValue(GC_PENDING
,thread
) == T
);
603 int pseudo_atomic_interrupted
= get_pseudo_atomic_interrupted(thread
);
604 int in_race_p
= in_leaving_without_gcing_race_p(thread
);
605 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
606 * section and trapping, a SIG_STOP_FOR_GC would see the next
607 * check fail, for this reason sig_stop_for_gc handler does not
608 * call this function. */
609 if (interrupt_deferred_p
) {
610 if (!(!interrupts_enabled
|| pseudo_atomic_interrupted
|| in_race_p
))
611 lose("Stray deferred interrupt.\n");
614 if (!(pseudo_atomic_interrupted
|| gc_inhibit
|| in_race_p
))
615 lose("GC_PENDING, but why?\n");
616 #if defined(LISP_FEATURE_SB_THREAD)
618 int stop_for_gc_pending
=
619 (SymbolValue(STOP_FOR_GC_PENDING
,thread
) != NIL
);
620 if (stop_for_gc_pending
)
621 if (!(pseudo_atomic_interrupted
|| gc_inhibit
|| in_race_p
))
622 lose("STOP_FOR_GC_PENDING, but why?\n");
623 if (pseudo_atomic_interrupted
)
624 if (!(gc_pending
|| stop_for_gc_pending
|| interrupt_deferred_p
))
625 lose("pseudo_atomic_interrupted, but why?\n");
628 if (pseudo_atomic_interrupted
)
629 if (!(gc_pending
|| interrupt_deferred_p
))
630 lose("pseudo_atomic_interrupted, but why?\n");
633 if (interrupt_pending
&& !interrupt_deferred_p
)
634 lose("INTERRUPT_PENDING but not pending handler.\n");
635 if ((data
->gc_blocked_deferrables
) && interrupt_pending
)
636 lose("gc_blocked_deferrables and interrupt pending\n.");
637 if (data
->gc_blocked_deferrables
)
638 check_deferrables_blocked_or_lose(sigset
);
639 if (interrupt_pending
|| interrupt_deferred_p
||
640 data
->gc_blocked_deferrables
)
641 check_deferrables_blocked_or_lose(sigset
);
643 check_deferrables_unblocked_or_lose(sigset
);
644 #ifndef LISP_FEATURE_SB_SAFEPOINT
645 /* If deferrables are unblocked then we are open to signals
646 * that run lisp code. */
647 check_gc_signals_unblocked_or_lose(sigset
);
654 * utility routines used by various signal handlers
658 build_fake_control_stack_frames(struct thread
*th
,os_context_t
*context
)
660 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
664 /* Build a fake stack frame or frames */
666 #if !defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_ARM64)
667 access_control_frame_pointer(th
) =
669 (*os_context_register_addr(context
, reg_CSP
));
670 if ((lispobj
*)(uword_t
)
671 (*os_context_register_addr(context
, reg_CFP
))
672 == access_control_frame_pointer(th
)) {
673 /* There is a small window during call where the callee's
674 * frame isn't built yet. */
675 if (lowtag_of(*os_context_register_addr(context
, reg_CODE
))
676 == FUN_POINTER_LOWTAG
) {
677 /* We have called, but not built the new frame, so
678 * build it for them. */
679 access_control_frame_pointer(th
)[0] =
680 *os_context_register_addr(context
, reg_OCFP
);
681 access_control_frame_pointer(th
)[1] =
682 *os_context_register_addr(context
, reg_LRA
);
683 access_control_frame_pointer(th
) += 2;
684 /* Build our frame on top of it. */
685 oldcont
= (lispobj
)(*os_context_register_addr(context
, reg_CFP
));
688 /* We haven't yet called, build our frame as if the
689 * partial frame wasn't there. */
690 oldcont
= (lispobj
)(*os_context_register_addr(context
, reg_OCFP
));
693 #elif defined (LISP_FEATURE_ARM)
694 access_control_frame_pointer(th
) =
695 SymbolValue(CONTROL_STACK_POINTER
, th
);
696 #elif defined (LISP_FEATURE_ARM64)
697 access_control_frame_pointer(th
) =
698 (lispobj
*)(uword_t
) (*os_context_register_addr(context
, reg_CSP
));
700 /* We can't tell whether we are still in the caller if it had to
701 * allocate a stack frame due to stack arguments. */
702 /* This observation provoked some past CMUCL maintainer to ask
703 * "Can anything strange happen during return?" */
706 oldcont
= (lispobj
)(*os_context_register_addr(context
, reg_CFP
));
709 access_control_stack_pointer(th
) = access_control_frame_pointer(th
) + 3;
711 access_control_frame_pointer(th
)[0] = oldcont
;
712 access_control_frame_pointer(th
)[1] = NIL
;
713 access_control_frame_pointer(th
)[2] =
714 (lispobj
)(*os_context_register_addr(context
, reg_CODE
));
718 /* Stores the context for gc to scavange and builds fake stack
721 fake_foreign_function_call(os_context_t
*context
)
724 struct thread
*thread
=arch_os_get_current_thread();
726 /* context_index incrementing must not be interrupted */
727 check_blockables_blocked_or_lose(0);
729 /* Get current Lisp state from context. */
730 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
731 dynamic_space_free_pointer
= SymbolValue(ALLOCATION_POINTER
, thread
);
734 #ifdef LISP_FEATURE_SB_THREAD
735 thread
->pseudo_atomic_bits
=
737 dynamic_space_free_pointer
=
740 (*os_context_register_addr(context
, reg_ALLOC
));
741 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
742 /* dynamic_space_free_pointer); */
743 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
744 if ((sword_t
)dynamic_space_free_pointer
& 1) {
745 lose("dead in fake_foreign_function_call, context = %x\n", context
);
748 /* why doesnt PPC and SPARC do something like this: */
749 #if defined(LISP_FEATURE_HPPA)
750 if ((sword_t
)dynamic_space_free_pointer
& 4) {
751 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context
, dynamic_space_free_pointer
);
756 set_binding_stack_pointer(thread
,
757 *os_context_register_addr(context
, reg_BSP
));
760 #if defined(LISP_FEATURE_ARM)
761 /* Stash our control stack pointer */
762 bind_variable(INTERRUPTED_CONTROL_STACK_POINTER
,
763 SymbolValue(CONTROL_STACK_POINTER
, thread
),
767 build_fake_control_stack_frames(thread
,context
);
769 /* Do dynamic binding of the active interrupt context index
770 * and save the context in the context array. */
772 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX
,thread
));
774 if (context_index
>= MAX_INTERRUPTS
) {
775 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS
);
778 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX
,
779 make_fixnum(context_index
+ 1),thread
);
781 thread
->interrupt_contexts
[context_index
] = context
;
783 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
784 /* x86oid targets don't maintain the foreign function call flag at
785 * all, so leave them to believe that they are never in foreign
787 foreign_function_call_active_p(thread
) = 1;
791 /* blocks all blockable signals. If you are calling from a signal handler,
792 * the usual signal mask will be restored from the context when the handler
793 * finishes. Otherwise, be careful */
795 undo_fake_foreign_function_call(os_context_t
*context
)
797 struct thread
*thread
=arch_os_get_current_thread();
798 /* Block all blockable signals. */
799 block_blockable_signals(0, 0);
801 foreign_function_call_active_p(thread
) = 0;
803 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
806 #if defined(LISP_FEATURE_ARM)
807 /* Restore our saved control stack pointer */
808 SetSymbolValue(CONTROL_STACK_POINTER
,
809 SymbolValue(INTERRUPTED_CONTROL_STACK_POINTER
,
815 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
816 /* Put the dynamic space free pointer back into the context. */
817 *os_context_register_addr(context
, reg_ALLOC
) =
818 (uword_t
) dynamic_space_free_pointer
819 | (*os_context_register_addr(context
, reg_ALLOC
)
822 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
824 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
827 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
828 /* Put the pseudo-atomic bits and dynamic space free pointer back
829 * into the context (p-a-bits for p-a, and dynamic space free
830 * pointer for ROOM). */
831 *os_context_register_addr(context
, reg_ALLOC
) =
832 (uword_t
) dynamic_space_free_pointer
833 | (thread
->pseudo_atomic_bits
& LOWTAG_MASK
);
834 /* And clear them so we don't get bit later by call-in/call-out
835 * not updating them. */
836 thread
->pseudo_atomic_bits
= 0;
838 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
839 SetSymbolValue(ALLOCATION_POINTER
, dynamic_space_free_pointer
, thread
);
843 /* a handler for the signal caused by execution of a trap opcode
844 * signalling an internal error */
846 interrupt_internal_error(os_context_t
*context
, boolean continuable
)
848 DX_ALLOC_SAP(context_sap
, context
);
850 fake_foreign_function_call(context
);
852 if (!internal_errors_enabled
) {
853 describe_internal_error(context
);
854 /* There's no good way to recover from an internal error
855 * before the Lisp error handling mechanism is set up. */
856 lose("internal error too early in init, can't recover\n");
859 #ifndef LISP_FEATURE_SB_SAFEPOINT
860 unblock_gc_signals(0, 0);
863 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
864 thread_sigmask(SIG_SETMASK
, os_context_sigmask_addr(context
), 0);
867 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
868 /* Workaround for blocked SIGTRAP. */
871 sigemptyset(&newset
);
872 sigaddset(&newset
, SIGTRAP
);
873 thread_sigmask(SIG_UNBLOCK
, &newset
, 0);
877 SHOW("in interrupt_internal_error");
879 /* Display some rudimentary debugging information about the
880 * error, so that even if the Lisp error handler gets badly
881 * confused, we have a chance to determine what's going on. */
882 describe_internal_error(context
);
884 funcall2(StaticSymbolFunction(INTERNAL_ERROR
), context_sap
,
885 continuable
? T
: NIL
);
887 undo_fake_foreign_function_call(context
); /* blocks signals again */
889 arch_skip_instruction(context
);
893 interrupt_handler_pending_p(void)
895 struct thread
*thread
= arch_os_get_current_thread();
896 struct interrupt_data
*data
= thread
->interrupt_data
;
897 return (data
->pending_handler
!= 0);
901 interrupt_handle_pending(os_context_t
*context
)
903 /* There are three ways we can get here. First, if an interrupt
904 * occurs within pseudo-atomic, it will be deferred, and we'll
905 * trap to here at the end of the pseudo-atomic block. Second, if
906 * the GC (in alloc()) decides that a GC is required, it will set
907 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
908 * and alloc() is always called from within pseudo-atomic, and
909 * thus we end up here again. Third, when calling GC-ON or at the
910 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
911 * here if there is a pending GC. Fourth, ahem, at the end of
912 * WITHOUT-INTERRUPTS (bar complications with nesting).
914 * A fourth way happens with safepoints: In addition to a stop for
915 * GC that is pending, there are thruptions. Both mechanisms are
916 * mostly signal-free, yet also of an asynchronous nature, so it makes
917 * sense to let interrupt_handle_pending take care of running them:
918 * It gets run precisely at those places where it is safe to process
919 * pending asynchronous tasks. */
921 struct thread
*thread
= arch_os_get_current_thread();
922 struct interrupt_data
*data
= thread
->interrupt_data
;
924 if (arch_pseudo_atomic_atomic(context
)) {
925 lose("Handling pending interrupt in pseudo atomic.");
928 FSHOW_SIGNAL((stderr
, "/entering interrupt_handle_pending\n"));
930 check_blockables_blocked_or_lose(0);
931 #ifndef LISP_FEATURE_SB_SAFEPOINT
933 * (On safepoint builds, there is no gc_blocked_deferrables nor
936 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
937 * handler, then the pending mask was saved and
938 * gc_blocked_deferrables set. Hence, there can be no pending
939 * handler and it's safe to restore the pending mask.
941 * Note, that if gc_blocked_deferrables is false we may still have
942 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
943 * pseudo atomic was interrupt be a deferrable first. */
944 if (data
->gc_blocked_deferrables
) {
945 if (data
->pending_handler
)
946 lose("GC blocked deferrables but still got a pending handler.");
947 if (SymbolValue(GC_INHIBIT
,thread
)!=NIL
)
948 lose("GC blocked deferrables while GC is inhibited.");
949 /* Restore the saved signal mask from the original signal (the
950 * one that interrupted us during the critical section) into
951 * the os_context for the signal we're currently in the
952 * handler for. This should ensure that when we return from
953 * the handler the blocked signals are unblocked. */
954 #ifndef LISP_FEATURE_WIN32
955 sigcopyset(os_context_sigmask_addr(context
), &data
->pending_mask
);
957 data
->gc_blocked_deferrables
= 0;
961 if (SymbolValue(GC_INHIBIT
,thread
)==NIL
) {
962 void *original_pending_handler
= data
->pending_handler
;
964 #ifdef LISP_FEATURE_SB_SAFEPOINT
965 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
966 if (SymbolValue(STOP_FOR_GC_PENDING
,thread
) != NIL
967 # ifdef LISP_FEATURE_SB_THRUPTION
968 || (SymbolValue(THRUPTION_PENDING
,thread
) != NIL
969 && SymbolValue(INTERRUPTS_ENABLED
, thread
) != NIL
)
972 /* We ought to take this chance to do a pitstop now. */
973 thread_in_lisp_raised(context
);
974 #elif defined(LISP_FEATURE_SB_THREAD)
975 if (SymbolValue(STOP_FOR_GC_PENDING
,thread
) != NIL
) {
976 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
977 * the signal handler if it actually stops us. */
978 arch_clear_pseudo_atomic_interrupted(context
);
979 sig_stop_for_gc_handler(SIG_STOP_FOR_GC
,NULL
,context
);
982 /* Test for T and not for != NIL since the value :IN-PROGRESS
983 * is used in SUB-GC as part of the mechanism to supress
985 if (SymbolValue(GC_PENDING
,thread
) == T
) {
987 /* Two reasons for doing this. First, if there is a
988 * pending handler we don't want to run. Second, we are
989 * going to clear pseudo atomic interrupted to avoid
990 * spurious trapping on every allocation in SUB_GC and
991 * having a pending handler with interrupts enabled and
992 * without pseudo atomic interrupted breaks an
994 if (data
->pending_handler
) {
995 bind_variable(ALLOW_WITH_INTERRUPTS
, NIL
, thread
);
996 bind_variable(INTERRUPTS_ENABLED
, NIL
, thread
);
999 arch_clear_pseudo_atomic_interrupted(context
);
1001 /* GC_PENDING is cleared in SUB-GC, or if another thread
1002 * is doing a gc already we will get a SIG_STOP_FOR_GC and
1003 * that will clear it.
1005 * If there is a pending handler or gc was triggerred in a
1006 * signal handler then maybe_gc won't run POST_GC and will
1007 * return normally. */
1008 if (!maybe_gc(context
))
1009 lose("GC not inhibited but maybe_gc did not GC.");
1011 if (data
->pending_handler
) {
1015 } else if (SymbolValue(GC_PENDING
,thread
) != NIL
) {
1016 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
1017 * GC-PENDING is not NIL then we cannot trap on pseudo
1018 * atomic due to GC (see if(GC_PENDING) logic in
1019 * cheneygc.c an gengcgc.c), plus there is a outer
1020 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
1022 lose("Trapping to run pending handler while GC in progress.");
1025 check_blockables_blocked_or_lose(0);
1027 /* No GC shall be lost. If SUB_GC triggers another GC then
1028 * that should be handled on the spot. */
1029 if (SymbolValue(GC_PENDING
,thread
) != NIL
)
1030 lose("GC_PENDING after doing gc.");
1031 #ifdef THREADS_USING_GCSIGNAL
1032 if (SymbolValue(STOP_FOR_GC_PENDING
,thread
) != NIL
)
1033 lose("STOP_FOR_GC_PENDING after doing gc.");
1035 /* Check two things. First, that gc does not clobber a handler
1036 * that's already pending. Second, that there is no interrupt
1037 * lossage: if original_pending_handler was NULL then even if
1038 * an interrupt arrived during GC (POST-GC, really) it was
1040 if (original_pending_handler
!= data
->pending_handler
)
1041 lose("pending handler changed in gc: %x -> %x.",
1042 original_pending_handler
, data
->pending_handler
);
1045 #ifndef LISP_FEATURE_WIN32
1046 /* There may be no pending handler, because it was only a gc that
1047 * had to be executed or because Lisp is a bit too eager to call
1048 * DO-PENDING-INTERRUPT. */
1049 if ((SymbolValue(INTERRUPTS_ENABLED
,thread
) != NIL
) &&
1050 (data
->pending_handler
)) {
1051 /* No matter how we ended up here, clear both
1052 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1053 * because we checked above that there is no GC pending. */
1054 SetSymbolValue(INTERRUPT_PENDING
, NIL
, thread
);
1055 arch_clear_pseudo_atomic_interrupted(context
);
1056 /* Restore the sigmask in the context. */
1057 sigcopyset(os_context_sigmask_addr(context
), &data
->pending_mask
);
1058 run_deferred_handler(data
, context
);
1060 #ifdef LISP_FEATURE_SB_THRUPTION
1061 if (SymbolValue(THRUPTION_PENDING
,thread
)==T
)
1062 /* Special case for the following situation: There is a
1063 * thruption pending, but a signal had been deferred. The
1064 * pitstop at the top of this function could only take care
1065 * of GC, and skipped the thruption, so we need to try again
1066 * now that INTERRUPT_PENDING and the sigmask have been
1068 while (check_pending_thruptions(context
))
1072 #ifdef LISP_FEATURE_GENCGC
1073 if (get_pseudo_atomic_interrupted(thread
))
1074 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1076 /* It is possible that the end of this function was reached
1077 * without never actually doing anything, the tests in Lisp for
1078 * when to call receive-pending-interrupt are not exact. */
1079 FSHOW_SIGNAL((stderr
, "/exiting interrupt_handle_pending\n"));
1084 interrupt_handle_now(int signal
, siginfo_t
*info
, os_context_t
*context
)
1086 boolean were_in_lisp
;
1087 union interrupt_handler handler
;
1089 check_blockables_blocked_or_lose(0);
1091 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1092 if (sigismember(&deferrable_sigset
,signal
))
1093 check_interrupts_enabled_or_lose(context
);
1096 handler
= interrupt_handlers
[signal
];
1098 if (ARE_SAME_HANDLER(handler
.c
, SIG_IGN
)) {
1102 were_in_lisp
= !foreign_function_call_active_p(arch_os_get_current_thread());
1105 fake_foreign_function_call(context
);
1108 FSHOW_SIGNAL((stderr
,
1109 "/entering interrupt_handle_now(%d, info, context)\n",
1112 if (ARE_SAME_HANDLER(handler
.c
, SIG_DFL
)) {
1114 /* This can happen if someone tries to ignore or default one
1115 * of the signals we need for runtime support, and the runtime
1116 * support decides to pass on it. */
1117 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal
);
1119 } else if (lowtag_of(handler
.lisp
) == FUN_POINTER_LOWTAG
) {
1120 /* Once we've decided what to do about contexts in a
1121 * return-elsewhere world (the original context will no longer
1122 * be available; should we copy it or was nobody using it anyway?)
1123 * then we should convert this to return-elsewhere */
1125 /* CMUCL comment said "Allocate the SAPs while the interrupts
1126 * are still disabled.". I (dan, 2003.08.21) assume this is
1127 * because we're not in pseudoatomic and allocation shouldn't
1128 * be interrupted. In which case it's no longer an issue as
1129 * all our allocation from C now goes through a PA wrapper,
1130 * but still, doesn't hurt.
1132 * Yeah, but non-gencgc platforms don't really wrap allocation
1133 * in PA. MG - 2005-08-29 */
1136 #ifndef LISP_FEATURE_SB_SAFEPOINT
1137 /* Leave deferrable signals blocked, the handler itself will
1138 * allow signals again when it sees fit. */
1139 unblock_gc_signals(0, 0);
1141 WITH_GC_AT_SAFEPOINTS_ONLY()
1143 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1144 DX_ALLOC_SAP(context_sap
, context
);
1145 DX_ALLOC_SAP(info_sap
, info
);
1147 FSHOW_SIGNAL((stderr
,"/calling Lisp-level handler\n"));
1149 funcall3(handler
.lisp
,
1150 make_fixnum(signal
),
1155 /* This cannot happen in sane circumstances. */
1157 FSHOW_SIGNAL((stderr
,"/calling C-level handler\n"));
1159 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1160 /* Allow signals again. */
1161 thread_sigmask(SIG_SETMASK
, os_context_sigmask_addr(context
), 0);
1162 (*handler
.c
)(signal
, info
, context
);
1168 undo_fake_foreign_function_call(context
); /* block signals again */
1171 FSHOW_SIGNAL((stderr
,
1172 "/returning from interrupt_handle_now(%d, info, context)\n",
1176 /* This is called at the end of a critical section if the indications
1177 * are that some signal was deferred during the section. Note that as
1178 * far as C or the kernel is concerned we dealt with the signal
1179 * already; we're just doing the Lisp-level processing now that we
1182 run_deferred_handler(struct interrupt_data
*data
, os_context_t
*context
)
1184 /* The pending_handler may enable interrupts and then another
1185 * interrupt may hit, overwrite interrupt_data, so reset the
1186 * pending handler before calling it. Trust the handler to finish
1187 * with the siginfo before enabling interrupts. */
1188 void (*pending_handler
) (int, siginfo_t
*, os_context_t
*) =
1189 data
->pending_handler
;
1191 data
->pending_handler
=0;
1192 FSHOW_SIGNAL((stderr
, "/running deferred handler %p\n", pending_handler
));
1193 (*pending_handler
)(data
->pending_signal
,&(data
->pending_info
), context
);
1196 #ifndef LISP_FEATURE_WIN32
1198 maybe_defer_handler(void *handler
, struct interrupt_data
*data
,
1199 int signal
, siginfo_t
*info
, os_context_t
*context
)
1201 struct thread
*thread
=arch_os_get_current_thread();
1203 check_blockables_blocked_or_lose(0);
1205 if (SymbolValue(INTERRUPT_PENDING
,thread
) != NIL
)
1206 lose("interrupt already pending\n");
1207 if (thread
->interrupt_data
->pending_handler
)
1208 lose("there is a pending handler already (PA)\n");
1209 if (data
->gc_blocked_deferrables
)
1210 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1211 check_interrupt_context_or_lose(context
);
1212 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1213 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1214 * atomic section inside a WITHOUT-INTERRUPTS.
1216 * Also, if in_leaving_without_gcing_race_p then
1217 * interrupt_handle_pending is going to be called soon, so
1218 * stashing the signal away is safe.
1220 if ((SymbolValue(INTERRUPTS_ENABLED
,thread
) == NIL
) ||
1221 in_leaving_without_gcing_race_p(thread
)) {
1222 FSHOW_SIGNAL((stderr
,
1223 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1224 (unsigned int)handler
,signal
,
1225 in_leaving_without_gcing_race_p(thread
)));
1226 store_signal_data_for_later(data
,handler
,signal
,info
,context
);
1227 SetSymbolValue(INTERRUPT_PENDING
, T
,thread
);
1228 check_interrupt_context_or_lose(context
);
1231 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1232 * actually use its argument for anything on x86, so this branch
1233 * may succeed even when context is null (gencgc alloc()) */
1234 if (arch_pseudo_atomic_atomic(context
)) {
1235 FSHOW_SIGNAL((stderr
,
1236 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1237 (unsigned int)handler
,signal
));
1238 store_signal_data_for_later(data
,handler
,signal
,info
,context
);
1239 arch_set_pseudo_atomic_interrupted(context
);
1240 check_interrupt_context_or_lose(context
);
1243 FSHOW_SIGNAL((stderr
,
1244 "/maybe_defer_handler(%x,%d): not deferred\n",
1245 (unsigned int)handler
,signal
));
1250 store_signal_data_for_later (struct interrupt_data
*data
, void *handler
,
1252 siginfo_t
*info
, os_context_t
*context
)
1254 if (data
->pending_handler
)
1255 lose("tried to overwrite pending interrupt handler %x with %x\n",
1256 data
->pending_handler
, handler
);
1258 lose("tried to defer null interrupt handler\n");
1259 data
->pending_handler
= handler
;
1260 data
->pending_signal
= signal
;
1262 memcpy(&(data
->pending_info
), info
, sizeof(siginfo_t
));
1264 FSHOW_SIGNAL((stderr
, "/store_signal_data_for_later: signal: %d\n",
1268 lose("Null context");
1270 /* the signal mask in the context (from before we were
1271 * interrupted) is copied to be restored when run_deferred_handler
1272 * happens. Then the usually-blocked signals are added to the mask
1273 * in the context so that we are running with blocked signals when
1274 * the handler returns */
1275 sigcopyset(&(data
->pending_mask
),os_context_sigmask_addr(context
));
1276 sigaddset_deferrable(os_context_sigmask_addr(context
));
1280 maybe_now_maybe_later(int signal
, siginfo_t
*info
, void *void_context
)
1282 SAVE_ERRNO(signal
,context
,void_context
);
1283 struct thread
*thread
= arch_os_get_current_thread();
1284 struct interrupt_data
*data
= thread
->interrupt_data
;
1285 if(!maybe_defer_handler(interrupt_handle_now
,data
,signal
,info
,context
))
1286 interrupt_handle_now(signal
, info
, context
);
1291 low_level_interrupt_handle_now(int signal
, siginfo_t
*info
,
1292 os_context_t
*context
)
1294 /* No FP control fixage needed, caller has done that. */
1295 check_blockables_blocked_or_lose(0);
1296 check_interrupts_enabled_or_lose(context
);
1297 (*interrupt_low_level_handlers
[signal
])(signal
, info
, context
);
1298 /* No Darwin context fixage needed, caller does that. */
1302 low_level_maybe_now_maybe_later(int signal
, siginfo_t
*info
, void *void_context
)
1304 SAVE_ERRNO(signal
,context
,void_context
);
1305 struct thread
*thread
= arch_os_get_current_thread();
1306 struct interrupt_data
*data
= thread
->interrupt_data
;
1308 if(!maybe_defer_handler(low_level_interrupt_handle_now
,data
,
1309 signal
,info
,context
))
1310 low_level_interrupt_handle_now(signal
, info
, context
);
1315 #ifdef THREADS_USING_GCSIGNAL
1317 /* This function must not cons, because that may trigger a GC. */
1319 sig_stop_for_gc_handler(int signal
, siginfo_t
*info
, os_context_t
*context
)
1321 struct thread
*thread
=arch_os_get_current_thread();
1322 boolean was_in_lisp
;
1324 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1325 * pseudo atomic until gc is finally allowed. */
1326 if (SymbolValue(GC_INHIBIT
,thread
) != NIL
) {
1327 FSHOW_SIGNAL((stderr
, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1328 SetSymbolValue(STOP_FOR_GC_PENDING
,T
,thread
);
1330 } else if (arch_pseudo_atomic_atomic(context
)) {
1331 FSHOW_SIGNAL((stderr
,"sig_stop_for_gc deferred (PA)\n"));
1332 SetSymbolValue(STOP_FOR_GC_PENDING
,T
,thread
);
1333 arch_set_pseudo_atomic_interrupted(context
);
1334 maybe_save_gc_mask_and_block_deferrables
1335 (os_context_sigmask_addr(context
));
1339 FSHOW_SIGNAL((stderr
, "/sig_stop_for_gc_handler\n"));
1341 /* Not PA and GC not inhibited -- we can stop now. */
1343 was_in_lisp
= !foreign_function_call_active_p(arch_os_get_current_thread());
1346 /* need the context stored so it can have registers scavenged */
1347 fake_foreign_function_call(context
);
1350 /* Not pending anymore. */
1351 SetSymbolValue(GC_PENDING
,NIL
,thread
);
1352 SetSymbolValue(STOP_FOR_GC_PENDING
,NIL
,thread
);
1354 /* Consider this: in a PA section GC is requested: GC_PENDING,
1355 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1356 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1357 * but a SIG_STOP_FOR_GC arrives before trapping to
1358 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1359 * pseudo_atomic_interrupted is not and we go on running with
1360 * pseudo_atomic_interrupted but without a pending interrupt or
1361 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1363 if (thread
->interrupt_data
->gc_blocked_deferrables
) {
1364 FSHOW_SIGNAL((stderr
,"cleaning up after gc_blocked_deferrables\n"));
1365 clear_pseudo_atomic_interrupted(thread
);
1366 sigcopyset(os_context_sigmask_addr(context
),
1367 &thread
->interrupt_data
->pending_mask
);
1368 thread
->interrupt_data
->gc_blocked_deferrables
= 0;
1371 if(thread_state(thread
)!=STATE_RUNNING
) {
1372 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1373 fixnum_value(thread
->state
));
1376 set_thread_state(thread
,STATE_STOPPED
);
1377 FSHOW_SIGNAL((stderr
,"suspended\n"));
1379 /* While waiting for gc to finish occupy ourselves with zeroing
1380 * the unused portion of the control stack to reduce conservatism.
1381 * On hypothetic platforms with threads and exact gc it is
1382 * actually a must. */
1383 scrub_control_stack();
1385 wait_for_thread_state_change(thread
, STATE_STOPPED
);
1386 FSHOW_SIGNAL((stderr
,"resumed\n"));
1388 if(thread_state(thread
)!=STATE_RUNNING
) {
1389 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1390 fixnum_value(thread_state(thread
)));
1394 undo_fake_foreign_function_call(context
);
1401 interrupt_handle_now_handler(int signal
, siginfo_t
*info
, void *void_context
)
1403 SAVE_ERRNO(signal
,context
,void_context
);
1404 #ifndef LISP_FEATURE_WIN32
1405 if ((signal
== SIGILL
) || (signal
== SIGBUS
)
1406 #if !(defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_ANDROID))
1407 || (signal
== SIGEMT
)
1410 corruption_warning_and_maybe_lose("Signal %d received (PC: %p)", signal
,
1411 *os_context_pc_addr(context
));
1413 interrupt_handle_now(signal
, info
, context
);
1417 /* manipulate the signal context and stack such that when the handler
1418 * returns, it will call function instead of whatever it was doing
1422 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1423 extern int *context_eflags_addr(os_context_t
*context
);
1426 extern lispobj
call_into_lisp(lispobj fun
, lispobj
*args
, int nargs
);
1427 extern void post_signal_tramp(void);
1428 extern void call_into_lisp_tramp(void);
1431 arrange_return_to_c_function(os_context_t
*context
,
1432 call_into_lisp_lookalike funptr
,
1435 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1436 check_gc_signals_unblocked_or_lose
1437 (os_context_sigmask_addr(context
));
1439 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1440 void * fun
=native_pointer(function
);
1441 void *code
= &(((struct simple_fun
*) fun
)->code
);
1444 /* Build a stack frame showing `interrupted' so that the
1445 * user's backtrace makes (as much) sense (as usual) */
1447 /* fp state is saved and restored by call_into_lisp */
1448 /* FIXME: errno is not restored, but since current uses of this
1449 * function only call Lisp code that signals an error, it's not
1450 * much of a problem. In other words, running out of the control
1451 * stack between a syscall and (GET-ERRNO) may clobber errno if
1452 * something fails during signalling or in the handler. But I
1453 * can't see what can go wrong as long as there is no CONTINUE
1454 * like restart on them. */
1455 #ifdef LISP_FEATURE_X86
1456 /* Suppose the existence of some function that saved all
1457 * registers, called call_into_lisp, then restored GP registers and
1458 * returned. It would look something like this:
1466 pushl {address of function to call}
1467 call 0x8058db0 <call_into_lisp>
1474 * What we do here is set up the stack that call_into_lisp would
1475 * expect to see if it had been called by this code, and frob the
1476 * signal context so that signal return goes directly to call_into_lisp,
1477 * and when that function (and the lisp function it invoked) returns,
1478 * it returns to the second half of this imaginary function which
1479 * restores all registers and returns to C
1481 * For this to work, the latter part of the imaginary function
1482 * must obviously exist in reality. That would be post_signal_tramp
1485 #ifndef LISP_FEATURE_DARWIN
1486 u32
*sp
=(u32
*)*os_context_register_addr(context
,reg_ESP
);
1489 #if defined(LISP_FEATURE_DARWIN)
1490 u32
*register_save_area
= (u32
*)os_validate(0, 0x40);
1492 FSHOW_SIGNAL((stderr
, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function
, sp
));
1493 FSHOW_SIGNAL((stderr
, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context
, &context
));
1495 /* 1. os_validate (malloc/mmap) register_save_block
1496 * 2. copy register state into register_save_block
1497 * 3. put a pointer to register_save_block in a register in the context
1498 * 4. set the context's EIP to point to a trampoline which:
1499 * a. builds the fake stack frame from the block
1500 * b. frees the block
1501 * c. calls the function
1504 *register_save_area
= *os_context_pc_addr(context
);
1505 *(register_save_area
+ 1) = function
;
1506 *(register_save_area
+ 2) = *os_context_register_addr(context
,reg_EDI
);
1507 *(register_save_area
+ 3) = *os_context_register_addr(context
,reg_ESI
);
1508 *(register_save_area
+ 4) = *os_context_register_addr(context
,reg_EDX
);
1509 *(register_save_area
+ 5) = *os_context_register_addr(context
,reg_ECX
);
1510 *(register_save_area
+ 6) = *os_context_register_addr(context
,reg_EBX
);
1511 *(register_save_area
+ 7) = *os_context_register_addr(context
,reg_EAX
);
1512 *(register_save_area
+ 8) = *context_eflags_addr(context
);
1514 *os_context_pc_addr(context
) =
1515 (os_context_register_t
) funptr
;
1516 *os_context_register_addr(context
,reg_ECX
) =
1517 (os_context_register_t
) register_save_area
;
1520 /* return address for call_into_lisp: */
1521 *(sp
-15) = (u32
)post_signal_tramp
;
1522 *(sp
-14) = function
; /* args for call_into_lisp : function*/
1523 *(sp
-13) = 0; /* arg array */
1524 *(sp
-12) = 0; /* no. args */
1525 /* this order matches that used in POPAD */
1526 *(sp
-11)=*os_context_register_addr(context
,reg_EDI
);
1527 *(sp
-10)=*os_context_register_addr(context
,reg_ESI
);
1529 *(sp
-9)=*os_context_register_addr(context
,reg_ESP
)-8;
1530 /* POPAD ignores the value of ESP: */
1532 *(sp
-7)=*os_context_register_addr(context
,reg_EBX
);
1534 *(sp
-6)=*os_context_register_addr(context
,reg_EDX
);
1535 *(sp
-5)=*os_context_register_addr(context
,reg_ECX
);
1536 *(sp
-4)=*os_context_register_addr(context
,reg_EAX
);
1537 *(sp
-3)=*context_eflags_addr(context
);
1538 *(sp
-2)=*os_context_register_addr(context
,reg_EBP
);
1539 *(sp
-1)=*os_context_pc_addr(context
);
1543 #elif defined(LISP_FEATURE_X86_64)
1544 u64
*sp
=(u64
*)*os_context_register_addr(context
,reg_RSP
);
1546 /* return address for call_into_lisp: */
1547 *(sp
-18) = (u64
)post_signal_tramp
;
1549 *(sp
-17)=*os_context_register_addr(context
,reg_R15
);
1550 *(sp
-16)=*os_context_register_addr(context
,reg_R14
);
1551 *(sp
-15)=*os_context_register_addr(context
,reg_R13
);
1552 *(sp
-14)=*os_context_register_addr(context
,reg_R12
);
1553 *(sp
-13)=*os_context_register_addr(context
,reg_R11
);
1554 *(sp
-12)=*os_context_register_addr(context
,reg_R10
);
1555 *(sp
-11)=*os_context_register_addr(context
,reg_R9
);
1556 *(sp
-10)=*os_context_register_addr(context
,reg_R8
);
1557 *(sp
-9)=*os_context_register_addr(context
,reg_RDI
);
1558 *(sp
-8)=*os_context_register_addr(context
,reg_RSI
);
1559 /* skip RBP and RSP */
1560 *(sp
-7)=*os_context_register_addr(context
,reg_RBX
);
1561 *(sp
-6)=*os_context_register_addr(context
,reg_RDX
);
1562 *(sp
-5)=*os_context_register_addr(context
,reg_RCX
);
1563 *(sp
-4)=*os_context_register_addr(context
,reg_RAX
);
1564 *(sp
-3)=*context_eflags_addr(context
);
1565 *(sp
-2)=*os_context_register_addr(context
,reg_RBP
);
1566 *(sp
-1)=*os_context_pc_addr(context
);
1568 *os_context_register_addr(context
,reg_RDI
) =
1569 (os_context_register_t
)function
; /* function */
1570 *os_context_register_addr(context
,reg_RSI
) = 0; /* arg. array */
1571 *os_context_register_addr(context
,reg_RDX
) = 0; /* no. args */
1573 struct thread
*th
=arch_os_get_current_thread();
1574 build_fake_control_stack_frames(th
,context
);
1577 #ifdef LISP_FEATURE_X86
1579 #if !defined(LISP_FEATURE_DARWIN)
1580 *os_context_pc_addr(context
) = (os_context_register_t
)funptr
;
1581 *os_context_register_addr(context
,reg_ECX
) = 0;
1582 *os_context_register_addr(context
,reg_EBP
) = (os_context_register_t
)(sp
-2);
1584 *os_context_register_addr(context
,reg_UESP
) =
1585 (os_context_register_t
)(sp
-15);
1587 *os_context_register_addr(context
,reg_ESP
) = (os_context_register_t
)(sp
-15);
1588 #endif /* __NETBSD__ */
1589 #endif /* LISP_FEATURE_DARWIN */
1591 #elif defined(LISP_FEATURE_X86_64)
1592 *os_context_pc_addr(context
) = (os_context_register_t
)funptr
;
1593 *os_context_register_addr(context
,reg_RCX
) = 0;
1594 *os_context_register_addr(context
,reg_RBP
) = (os_context_register_t
)(sp
-2);
1595 *os_context_register_addr(context
,reg_RSP
) = (os_context_register_t
)(sp
-18);
1597 /* this much of the calling convention is common to all
1599 *os_context_pc_addr(context
) = (os_context_register_t
)(unsigned long)code
;
1600 *os_context_register_addr(context
,reg_NARGS
) = 0;
1602 *os_context_register_addr(context
,reg_LIP
) =
1603 (os_context_register_t
)(unsigned long)code
;
1605 *os_context_register_addr(context
,reg_CFP
) =
1606 (os_context_register_t
)(unsigned long)access_control_frame_pointer(th
);
1608 #ifdef ARCH_HAS_NPC_REGISTER
1609 *os_context_npc_addr(context
) =
1610 4 + *os_context_pc_addr(context
);
1612 #if defined(LISP_FEATURE_SPARC) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64)
1613 *os_context_register_addr(context
,reg_CODE
) =
1614 (os_context_register_t
)(fun
+ FUN_POINTER_LOWTAG
);
1616 FSHOW((stderr
, "/arranged return to Lisp function (0x%lx)\n",
1621 arrange_return_to_lisp_function(os_context_t
*context
, lispobj function
)
1623 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1624 arrange_return_to_c_function(context
,
1625 (call_into_lisp_lookalike
)call_into_lisp_tramp
,
1628 arrange_return_to_c_function(context
, call_into_lisp
, function
);
1632 // These have undefined_alien_function tramp in x-assem.S
1633 #if !(defined(LISP_FEATURE_X86_64) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64))
1634 /* KLUDGE: Theoretically the approach we use for undefined alien
1635 * variables should work for functions as well, but on PPC/Darwin
1636 * we get bus error at bogus addresses instead, hence this workaround,
1637 * that has the added benefit of automatically discriminating between
1638 * functions and variables.
1641 undefined_alien_function(void)
1643 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUN_ERROR
));
1647 void lower_thread_control_stack_guard_page(struct thread
*th
)
1649 protect_control_stack_guard_page(0, th
);
1650 protect_control_stack_return_guard_page(1, th
);
1651 th
->control_stack_guard_page_protected
= NIL
;
1652 fprintf(stderr
, "INFO: Control stack guard page unprotected\n");
1655 void reset_thread_control_stack_guard_page(struct thread
*th
)
1657 memset(CONTROL_STACK_GUARD_PAGE(th
), 0, os_vm_page_size
);
1658 protect_control_stack_guard_page(1, th
);
1659 protect_control_stack_return_guard_page(0, th
);
1660 th
->control_stack_guard_page_protected
= T
;
1661 fprintf(stderr
, "INFO: Control stack guard page reprotected\n");
1664 /* Called from the REPL, too. */
1665 void reset_control_stack_guard_page(void)
1667 struct thread
*th
=arch_os_get_current_thread();
1668 if (th
->control_stack_guard_page_protected
== NIL
) {
1669 reset_thread_control_stack_guard_page(th
);
1673 void lower_control_stack_guard_page(void)
1675 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1679 handle_guard_page_triggered(os_context_t
*context
,os_vm_address_t addr
)
1681 struct thread
*th
=arch_os_get_current_thread();
1683 if(addr
>= CONTROL_STACK_HARD_GUARD_PAGE(th
) &&
1684 addr
< CONTROL_STACK_HARD_GUARD_PAGE(th
) + os_vm_page_size
) {
1685 lose("Control stack exhausted, fault: %p, PC: %p",
1686 addr
, *os_context_pc_addr(context
));
1688 else if(addr
>= CONTROL_STACK_GUARD_PAGE(th
) &&
1689 addr
< CONTROL_STACK_GUARD_PAGE(th
) + os_vm_page_size
) {
1690 /* We hit the end of the control stack: disable guard page
1691 * protection so the error handler has some headroom, protect the
1692 * previous page so that we can catch returns from the guard page
1693 * and restore it. */
1694 if (th
->control_stack_guard_page_protected
== NIL
)
1695 lose("control_stack_guard_page_protected NIL");
1696 lower_control_stack_guard_page();
1697 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1698 /* For the unfortunate case, when the control stack is
1699 * exhausted in a signal handler. */
1700 unblock_signals_in_context_and_maybe_warn(context
);
1702 arrange_return_to_lisp_function
1703 (context
, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR
));
1706 else if(addr
>= CONTROL_STACK_RETURN_GUARD_PAGE(th
) &&
1707 addr
< CONTROL_STACK_RETURN_GUARD_PAGE(th
) + os_vm_page_size
) {
1708 /* We're returning from the guard page: reprotect it, and
1709 * unprotect this one. This works even if we somehow missed
1710 * the return-guard-page, and hit it on our way to new
1711 * exhaustion instead. */
1712 if (th
->control_stack_guard_page_protected
!= NIL
)
1713 lose("control_stack_guard_page_protected not NIL");
1714 reset_control_stack_guard_page();
1717 else if(addr
>= BINDING_STACK_HARD_GUARD_PAGE(th
) &&
1718 addr
< BINDING_STACK_HARD_GUARD_PAGE(th
) + os_vm_page_size
) {
1719 lose("Binding stack exhausted");
1721 else if(addr
>= BINDING_STACK_GUARD_PAGE(th
) &&
1722 addr
< BINDING_STACK_GUARD_PAGE(th
) + os_vm_page_size
) {
1723 protect_binding_stack_guard_page(0, NULL
);
1724 protect_binding_stack_return_guard_page(1, NULL
);
1725 fprintf(stderr
, "INFO: Binding stack guard page unprotected\n");
1727 /* For the unfortunate case, when the binding stack is
1728 * exhausted in a signal handler. */
1729 unblock_signals_in_context_and_maybe_warn(context
);
1730 arrange_return_to_lisp_function
1731 (context
, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR
));
1734 else if(addr
>= BINDING_STACK_RETURN_GUARD_PAGE(th
) &&
1735 addr
< BINDING_STACK_RETURN_GUARD_PAGE(th
) + os_vm_page_size
) {
1736 protect_binding_stack_guard_page(1, NULL
);
1737 protect_binding_stack_return_guard_page(0, NULL
);
1738 fprintf(stderr
, "INFO: Binding stack guard page reprotected\n");
1741 else if(addr
>= ALIEN_STACK_HARD_GUARD_PAGE(th
) &&
1742 addr
< ALIEN_STACK_HARD_GUARD_PAGE(th
) + os_vm_page_size
) {
1743 lose("Alien stack exhausted");
1745 else if(addr
>= ALIEN_STACK_GUARD_PAGE(th
) &&
1746 addr
< ALIEN_STACK_GUARD_PAGE(th
) + os_vm_page_size
) {
1747 protect_alien_stack_guard_page(0, NULL
);
1748 protect_alien_stack_return_guard_page(1, NULL
);
1749 fprintf(stderr
, "INFO: Alien stack guard page unprotected\n");
1751 /* For the unfortunate case, when the alien stack is
1752 * exhausted in a signal handler. */
1753 unblock_signals_in_context_and_maybe_warn(context
);
1754 arrange_return_to_lisp_function
1755 (context
, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR
));
1758 else if(addr
>= ALIEN_STACK_RETURN_GUARD_PAGE(th
) &&
1759 addr
< ALIEN_STACK_RETURN_GUARD_PAGE(th
) + os_vm_page_size
) {
1760 protect_alien_stack_guard_page(1, NULL
);
1761 protect_alien_stack_return_guard_page(0, NULL
);
1762 fprintf(stderr
, "INFO: Alien stack guard page reprotected\n");
1765 else if (addr
>= undefined_alien_address
&&
1766 addr
< undefined_alien_address
+ os_vm_page_size
) {
1767 arrange_return_to_lisp_function
1768 (context
, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR
));
1775 * noise to install handlers
1778 #ifndef LISP_FEATURE_WIN32
1779 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1780 * they are blocked, in Linux 2.6 the default handler is invoked
1781 * instead that usually coredumps. One might hastily think that adding
1782 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1783 * the whole sa_mask is ignored and instead of not adding the signal
1784 * in question to the mask. That means if it's not blockable the
1785 * signal must be unblocked at the beginning of signal handlers.
1787 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1788 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1789 * will be unblocked in the sigmask during the signal handler. -- RMK
1792 static volatile int sigaction_nodefer_works
= -1;
1794 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1795 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1798 sigaction_nodefer_test_handler(int signal
, siginfo_t
*info
, void *void_context
)
1802 get_current_sigmask(¤t
);
1803 /* There should be exactly two blocked signals: the two we added
1804 * to sa_mask when setting up the handler. NetBSD doesn't block
1805 * the signal we're handling when SA_NODEFER is set; Linux before
1806 * 2.6.13 or so also doesn't block the other signal when
1807 * SA_NODEFER is set. */
1808 for(i
= 1; i
< NSIG
; i
++)
1809 if (sigismember(¤t
, i
) !=
1810 (((i
== SA_NODEFER_TEST_BLOCK_SIGNAL
) || (i
== signal
)) ? 1 : 0)) {
1811 FSHOW_SIGNAL((stderr
, "SA_NODEFER doesn't work, signal %d\n", i
));
1812 sigaction_nodefer_works
= 0;
1814 if (sigaction_nodefer_works
== -1)
1815 sigaction_nodefer_works
= 1;
1819 see_if_sigaction_nodefer_works(void)
1821 struct sigaction sa
, old_sa
;
1823 sa
.sa_flags
= SA_SIGINFO
| SA_NODEFER
;
1824 sa
.sa_sigaction
= sigaction_nodefer_test_handler
;
1825 sigemptyset(&sa
.sa_mask
);
1826 sigaddset(&sa
.sa_mask
, SA_NODEFER_TEST_BLOCK_SIGNAL
);
1827 sigaddset(&sa
.sa_mask
, SA_NODEFER_TEST_KILL_SIGNAL
);
1828 sigaction(SA_NODEFER_TEST_KILL_SIGNAL
, &sa
, &old_sa
);
1829 /* Make sure no signals are blocked. */
1832 sigemptyset(&empty
);
1833 thread_sigmask(SIG_SETMASK
, &empty
, 0);
1835 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL
);
1836 while (sigaction_nodefer_works
== -1);
1837 sigaction(SA_NODEFER_TEST_KILL_SIGNAL
, &old_sa
, NULL
);
1840 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1841 #undef SA_NODEFER_TEST_KILL_SIGNAL
1843 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1846 signal_thread_trampoline(void *pthread_arg
)
1848 intptr_t signo
= (intptr_t) pthread_arg
;
1849 os_context_t fake_context
;
1850 siginfo_t fake_info
;
1851 #ifdef LISP_FEATURE_PPC
1855 memset(&fake_info
, 0, sizeof(fake_info
));
1856 memset(&fake_context
, 0, sizeof(fake_context
));
1857 #ifdef LISP_FEATURE_PPC
1858 memset(&uc_regs
, 0, sizeof(uc_regs
));
1859 fake_context
.uc_mcontext
.uc_regs
= &uc_regs
;
1862 *os_context_pc_addr(&fake_context
) = (intptr_t) &signal_thread_trampoline
;
1863 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1864 *os_context_sp_addr(&fake_context
) = (intptr_t) __builtin_frame_address(0);
1867 signal_handler_callback(interrupt_handlers
[signo
].lisp
,
1868 signo
, &fake_info
, &fake_context
);
1873 sigprof_handler_trampoline(int signal
, siginfo_t
*info
, void *void_context
)
1875 SAVE_ERRNO(signal
,context
,void_context
);
1876 struct thread
*self
= arch_os_get_current_thread();
1878 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1879 * inline allocation does not). In this case, give up. */
1880 if (get_pseudo_atomic_atomic(self
))
1883 struct alloc_region tmp
= self
->alloc_region
;
1884 self
->alloc_region
= self
->sprof_alloc_region
;
1885 self
->sprof_alloc_region
= tmp
;
1887 interrupt_handle_now_handler(signal
, info
, void_context
);
1889 /* And we're back. We know that the SIGPROF handler never unwinds
1890 * non-locally, and can simply swap things back: */
1892 tmp
= self
->alloc_region
;
1893 self
->alloc_region
= self
->sprof_alloc_region
;
1894 self
->sprof_alloc_region
= tmp
;
1897 ; /* Dear C compiler, it's OK to have a label here. */
1902 spawn_signal_thread_handler(int signal
, siginfo_t
*info
, void *void_context
)
1904 SAVE_ERRNO(signal
,context
,void_context
);
1906 pthread_attr_t attr
;
1909 if (pthread_attr_init(&attr
))
1911 if (pthread_attr_setstacksize(&attr
, thread_control_stack_size
))
1913 if (pthread_create(&th
, &attr
, &signal_thread_trampoline
, (void*)(intptr_t) signal
))
1915 if (pthread_attr_destroy(&attr
))
1922 lose("spawn_signal_thread_handler");
1927 unblock_me_trampoline(int signal
, siginfo_t
*info
, void *void_context
)
1929 SAVE_ERRNO(signal
,context
,void_context
);
1932 sigemptyset(&unblock
);
1933 sigaddset(&unblock
, signal
);
1934 thread_sigmask(SIG_UNBLOCK
, &unblock
, 0);
1935 interrupt_handle_now(signal
, info
, context
);
1940 low_level_unblock_me_trampoline(int signal
, siginfo_t
*info
, void *void_context
)
1942 SAVE_ERRNO(signal
,context
,void_context
);
1945 sigemptyset(&unblock
);
1946 sigaddset(&unblock
, signal
);
1947 thread_sigmask(SIG_UNBLOCK
, &unblock
, 0);
1948 (*interrupt_low_level_handlers
[signal
])(signal
, info
, context
);
1953 low_level_handle_now_handler(int signal
, siginfo_t
*info
, void *void_context
)
1955 SAVE_ERRNO(signal
,context
,void_context
);
1956 (*interrupt_low_level_handlers
[signal
])(signal
, info
, context
);
1961 undoably_install_low_level_interrupt_handler (int signal
,
1962 interrupt_handler_t handler
)
1964 struct sigaction sa
;
1966 if (0 > signal
|| signal
>= NSIG
) {
1967 lose("bad signal number %d\n", signal
);
1970 if (ARE_SAME_HANDLER(handler
, SIG_DFL
))
1971 sa
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))handler
;
1972 else if (sigismember(&deferrable_sigset
,signal
))
1973 sa
.sa_sigaction
= low_level_maybe_now_maybe_later
;
1974 else if (!sigaction_nodefer_works
&&
1975 !sigismember(&blockable_sigset
, signal
))
1976 sa
.sa_sigaction
= low_level_unblock_me_trampoline
;
1978 sa
.sa_sigaction
= low_level_handle_now_handler
;
1980 #ifdef LISP_FEATURE_SB_THRUPTION
1981 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1982 * but we don't actually want to defer it. And if we put it only
1983 * into blockable_sigset, we'd have to special-case it around thread
1984 * creation at least. */
1985 if (signal
== SIGPIPE
)
1986 sa
.sa_sigaction
= low_level_handle_now_handler
;
1989 sigcopyset(&sa
.sa_mask
, &blockable_sigset
);
1990 sa
.sa_flags
= SA_SIGINFO
| SA_RESTART
1991 | (sigaction_nodefer_works
? SA_NODEFER
: 0);
1992 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
1993 if(signal
==SIG_MEMORY_FAULT
) {
1994 sa
.sa_flags
|= SA_ONSTACK
;
1995 # ifdef LISP_FEATURE_SB_SAFEPOINT
1996 sigaddset(&sa
.sa_mask
, SIGRTMIN
);
1997 sigaddset(&sa
.sa_mask
, SIGRTMIN
+1);
2002 sigaction(signal
, &sa
, NULL
);
2003 interrupt_low_level_handlers
[signal
] =
2004 (ARE_SAME_HANDLER(handler
, SIG_DFL
) ? 0 : handler
);
2008 /* This is called from Lisp. */
2010 install_handler(int signal
, void handler(int, siginfo_t
*, os_context_t
*),
2013 #ifndef LISP_FEATURE_WIN32
2014 struct sigaction sa
;
2016 union interrupt_handler oldhandler
;
2018 FSHOW((stderr
, "/entering POSIX install_handler(%d, ..)\n", signal
));
2020 block_blockable_signals(0, &old
);
2022 FSHOW((stderr
, "/interrupt_low_level_handlers[signal]=%p\n",
2023 interrupt_low_level_handlers
[signal
]));
2024 if (interrupt_low_level_handlers
[signal
]==0) {
2025 if (ARE_SAME_HANDLER(handler
, SIG_DFL
) ||
2026 ARE_SAME_HANDLER(handler
, SIG_IGN
))
2027 sa
.sa_sigaction
= (void (*)(int, siginfo_t
*, void*))handler
;
2028 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
2029 else if (signal
== SIGPROF
)
2030 sa
.sa_sigaction
= sigprof_handler_trampoline
;
2031 else if (!synchronous
)
2032 sa
.sa_sigaction
= spawn_signal_thread_handler
;
2034 else if (sigismember(&deferrable_sigset
, signal
))
2035 sa
.sa_sigaction
= maybe_now_maybe_later
;
2036 else if (!sigaction_nodefer_works
&&
2037 !sigismember(&blockable_sigset
, signal
))
2038 sa
.sa_sigaction
= unblock_me_trampoline
;
2040 sa
.sa_sigaction
= interrupt_handle_now_handler
;
2042 sigcopyset(&sa
.sa_mask
, &blockable_sigset
);
2043 sa
.sa_flags
= SA_SIGINFO
| SA_RESTART
|
2044 (sigaction_nodefer_works
? SA_NODEFER
: 0);
2045 sigaction(signal
, &sa
, NULL
);
2048 oldhandler
= interrupt_handlers
[signal
];
2049 interrupt_handlers
[signal
].c
= handler
;
2051 thread_sigmask(SIG_SETMASK
, &old
, 0);
2053 FSHOW((stderr
, "/leaving POSIX install_handler(%d, ..)\n", signal
));
2055 return (uword_t
)oldhandler
.lisp
;
2057 /* Probably-wrong Win32 hack */
2062 /* This must not go through lisp as it's allowed anytime, even when on
2065 sigabrt_handler(int signal
, siginfo_t
*info
, os_context_t
*context
)
2067 /* Save the interrupt context. No need to undo it, since lose()
2068 * shouldn't return. */
2069 fake_foreign_function_call(context
);
2070 lose("SIGABRT received.\n");
2074 interrupt_init(void)
2076 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2078 SHOW("entering interrupt_init()");
2079 #ifndef LISP_FEATURE_WIN32
2080 see_if_sigaction_nodefer_works();
2082 sigemptyset(&deferrable_sigset
);
2083 sigemptyset(&blockable_sigset
);
2084 sigemptyset(&gc_sigset
);
2085 sigaddset_deferrable(&deferrable_sigset
);
2086 sigaddset_blockable(&blockable_sigset
);
2087 sigaddset_gc(&gc_sigset
);
2090 #ifndef LISP_FEATURE_WIN32
2091 /* Set up high level handler information. */
2092 for (i
= 0; i
< NSIG
; i
++) {
2093 interrupt_handlers
[i
].c
=
2094 /* (The cast here blasts away the distinction between
2095 * SA_SIGACTION-style three-argument handlers and
2096 * signal(..)-style one-argument handlers, which is OK
2097 * because it works to call the 1-argument form where the
2098 * 3-argument form is expected.) */
2099 (void (*)(int, siginfo_t
*, os_context_t
*))SIG_DFL
;
2101 undoably_install_low_level_interrupt_handler(SIGABRT
, sigabrt_handler
);
2103 SHOW("returning from interrupt_init()");
2106 #ifndef LISP_FEATURE_WIN32
2108 siginfo_code(siginfo_t
*info
)
2110 return info
->si_code
;
2112 os_vm_address_t current_memory_fault_address
;
2115 lisp_memory_fault_error(os_context_t
*context
, os_vm_address_t addr
)
2117 /* FIXME: This is lossy: if we get another memory fault (eg. from
2118 * another thread) before lisp has read this, we lose the information.
2119 * However, since this is mostly informative, we'll live with that for
2120 * now -- some address is better then no address in this case.
2122 current_memory_fault_address
= addr
;
2124 /* If we lose on corruption, provide LDB with debugging information. */
2125 fake_foreign_function_call(context
);
2127 /* To allow debugging memory faults in signal handlers and such. */
2128 corruption_warning_and_maybe_lose("Memory fault at %p (pc=%p, sp=%p)",
2130 *os_context_pc_addr(context
),
2131 #ifdef ARCH_HAS_STACK_POINTER
2132 *os_context_sp_addr(context
)
2137 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2138 undo_fake_foreign_function_call(context
);
2139 unblock_signals_in_context_and_maybe_warn(context
);
2140 arrange_return_to_lisp_function(context
,
2141 StaticSymbolFunction(MEMORY_FAULT_ERROR
));
2143 unblock_gc_signals(0, 0);
2144 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR
));
2145 undo_fake_foreign_function_call(context
);
2151 unhandled_trap_error(os_context_t
*context
)
2153 DX_ALLOC_SAP(context_sap
, context
);
2154 fake_foreign_function_call(context
);
2155 #ifndef LISP_FEATURE_SB_SAFEPOINT
2156 unblock_gc_signals(0, 0);
2159 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2160 thread_sigmask(SIG_SETMASK
, os_context_sigmask_addr(context
), 0);
2162 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR
), context_sap
);
2163 lose("UNHANDLED-TRAP-ERROR fell through");
2166 /* Common logic for trapping instructions. How we actually handle each
2167 * case is highly architecture dependent, but the overall shape is
2170 handle_trap(os_context_t
*context
, int trap
)
2173 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2174 case trap_PendingInterrupt
:
2175 FSHOW((stderr
, "/<trap pending interrupt>\n"));
2176 arch_skip_instruction(context
);
2177 interrupt_handle_pending(context
);
2182 #ifdef trap_InvalidArgCount
2183 case trap_InvalidArgCount
:
2185 FSHOW((stderr
, "/<trap error/cerror %d>\n", trap
));
2186 interrupt_internal_error(context
, trap
==trap_Cerror
);
2188 case trap_Breakpoint
:
2189 arch_handle_breakpoint(context
);
2191 case trap_FunEndBreakpoint
:
2192 arch_handle_fun_end_breakpoint(context
);
2194 #ifdef trap_AfterBreakpoint
2195 case trap_AfterBreakpoint
:
2196 arch_handle_after_breakpoint(context
);
2199 #ifdef trap_SingleStepAround
2200 case trap_SingleStepAround
:
2201 case trap_SingleStepBefore
:
2202 arch_handle_single_step_trap(context
, trap
);
2205 #ifdef trap_GlobalSafepoint
2206 case trap_GlobalSafepoint
:
2207 fake_foreign_function_call(context
);
2208 thread_in_lisp_raised(context
);
2209 undo_fake_foreign_function_call(context
);
2210 arch_skip_instruction(context
);
2212 case trap_CspSafepoint
:
2213 fake_foreign_function_call(context
);
2214 thread_in_safety_transition(context
);
2215 undo_fake_foreign_function_call(context
);
2216 arch_skip_instruction(context
);
2219 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2220 case trap_Allocation
:
2221 arch_handle_allocation_trap(context
);
2222 arch_skip_instruction(context
);
2226 fake_foreign_function_call(context
);
2227 lose("%%PRIMITIVE HALT called; the party is over.\n");
2229 unhandled_trap_error(context
);