De-flake 'traceroot' test
[sbcl.git] / src / runtime / interrupt.c
blobd379e67f8f40b4472c2e089ced8219ec6f2519dd
1 /*
2 * interrupt-handling magic
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
43 #include "sbcl.h"
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
51 #include <sys/wait.h>
52 #endif
53 #include <errno.h>
55 #include "runtime.h"
56 #include "arch.h"
57 #include "os.h"
58 #include "interrupt.h"
59 #include "globals.h"
60 #include "lispregs.h"
61 #include "validate.h"
62 #include "interr.h"
63 #include "gc.h"
64 #include "alloc.h"
65 #include "dynbind.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
72 * This is a workaround for some slightly silly Linux/GNU Libc
73 * behaviour: glibc defines sigset_t to support 1024 signals, which is
74 * more than the kernel. This is usually not a problem, but becomes
75 * one when we want to save a signal mask from a ucontext, and restore
76 * it later into another ucontext: the ucontext is allocated on the
77 * stack by the kernel, so copying a libc-sized sigset_t into it will
78 * overflow and cause other data on the stack to be corrupted */
79 /* FIXME: do not rely on NSIG being a multiple of 8 */
81 #ifdef LISP_FEATURE_WIN32
82 # define REAL_SIGSET_SIZE_BYTES (4)
83 #else
84 # define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
85 #endif
87 static inline void
88 sigcopyset(sigset_t *new, sigset_t *old)
90 memcpy(new, old, REAL_SIGSET_SIZE_BYTES);
93 /* When we catch an internal error, should we pass it back to Lisp to
94 * be handled in a high-level way? (Early in cold init, the answer is
95 * 'no', because Lisp is still too brain-dead to handle anything.
96 * After sufficient initialization has been completed, the answer
97 * becomes 'yes'.) */
98 boolean internal_errors_enabled = 0;
100 #ifndef LISP_FEATURE_WIN32
101 static
102 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
103 #endif
104 union interrupt_handler interrupt_handlers[NSIG];
106 /* Under Linux on some architectures, we appear to have to restore the
107 * FPU control word from the context, as after the signal is delivered
108 * we appear to have a null FPU control word. */
109 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
110 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
111 os_context_t *context = arch_os_get_context(&void_context); \
112 os_restore_fp_control(context);
113 #else
114 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
115 os_context_t *context = arch_os_get_context(&void_context);
116 #endif
118 /* Foreign code may want to start some threads on its own.
119 * Non-targetted, truly asynchronous signals can be delivered to
120 * basically any thread, but invoking Lisp handlers in such foregign
121 * threads is really bad, so let's resignal it.
123 * This should at least bring attention to the problem, but it cannot
124 * work for SIGSEGV and similar. It is good enough for timers, and
125 * maybe all deferrables. */
127 #ifndef LISP_FEATURE_WIN32
128 static void
129 add_handled_signals(sigset_t *sigset)
131 int i;
132 for(i = 1; i < NSIG; i++) {
133 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
134 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
135 sigaddset(sigset, i);
140 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
141 #endif
143 static boolean
144 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
146 #ifndef LISP_FEATURE_WIN32
147 if (!lisp_thread_p(context)) {
148 if (!(sigismember(&deferrable_sigset,signal))) {
149 corruption_warning_and_maybe_lose
150 #ifdef LISP_FEATURE_SB_THREAD
151 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
152 signal, pthread_self());
153 #else
154 ("Received signal %d in non-lisp thread, resignalling to a lisp thread.",
155 signal);
156 #endif
159 sigset_t sigset;
160 sigemptyset(&sigset);
161 add_handled_signals(&sigset);
162 block_signals(&sigset, 0, 0);
163 block_signals(&sigset, os_context_sigmask_addr(context), 0);
164 kill(getpid(), signal);
166 return 1;
167 } else
168 #endif
169 return 0;
172 /* These are to be used in signal handlers. Currently all handlers are
173 * called from one of:
175 * interrupt_handle_now_handler
176 * maybe_now_maybe_later
177 * unblock_me_trampoline
178 * low_level_handle_now_handler
179 * low_level_maybe_now_maybe_later
180 * low_level_unblock_me_trampoline
182 * This gives us a single point of control (or six) over errno, fp
183 * control word, and fixing up signal context on sparc.
185 * The SPARC/Linux platform doesn't quite do signals the way we want
186 * them done. The third argument in the handler isn't filled in by the
187 * kernel properly, so we fix it up ourselves in the
188 * arch_os_get_context(..) function. -- CSR, 2002-07-23
190 #define SAVE_ERRNO(signal,context,void_context) \
192 int _saved_errno = errno; \
193 RESTORE_FP_CONTROL_WORD(context,void_context); \
194 if (!maybe_resignal_to_lisp_thread(signal, context)) \
197 #define RESTORE_ERRNO \
199 errno = _saved_errno; \
202 static void run_deferred_handler(struct interrupt_data *data,
203 os_context_t *context);
204 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
205 static void store_signal_data_for_later (struct interrupt_data *data,
206 void *handler, int signal,
207 siginfo_t *info,
208 os_context_t *context);
211 /* Generic signal related utilities. */
213 void
214 get_current_sigmask(sigset_t *sigset)
216 /* Get the current sigmask, by blocking the empty set. */
217 thread_sigmask(SIG_BLOCK, 0, sigset);
220 void
221 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
223 if (where) {
224 int i;
225 if (old)
226 sigcopyset(old, where);
227 for(i = 1; i < NSIG; i++) {
228 if (sigismember(what, i))
229 sigaddset(where, i);
231 } else {
232 thread_sigmask(SIG_BLOCK, what, old);
236 void
237 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
239 if (where) {
240 int i;
241 if (old)
242 sigcopyset(old, where);
243 for(i = 1; i < NSIG; i++) {
244 if (sigismember(what, i))
245 sigdelset(where, i);
247 } else {
248 thread_sigmask(SIG_UNBLOCK, what, old);
252 static void
253 print_sigset(sigset_t *sigset)
255 int i;
256 for(i = 1; i < NSIG; i++) {
257 if (sigismember(sigset, i))
258 fprintf(stderr, "Signal %d masked\n", i);
262 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
263 * if all re unmasked else die. Passing NULL for sigset is a shorthand
264 * for the current sigmask. */
265 boolean
266 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
267 const char *name)
269 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
270 int i;
271 boolean has_blocked = 0, has_unblocked = 0;
272 sigset_t current;
273 if (sigset == 0) {
274 get_current_sigmask(&current);
275 sigset = &current;
277 for(i = 1; i < NSIG; i++) {
278 if (sigismember(sigset2, i)) {
279 if (sigismember(sigset, i))
280 has_blocked = 1;
281 else
282 has_unblocked = 1;
285 if (has_blocked && has_unblocked) {
286 print_sigset(sigset);
287 lose("some %s signals blocked, some unblocked\n", name);
289 if (has_blocked)
290 return 1;
291 else
292 return 0;
293 #endif
297 /* Deferrables, blockables, gc signals. */
299 void
300 sigaddset_deferrable(sigset_t *s)
302 sigaddset(s, SIGHUP);
303 sigaddset(s, SIGINT);
304 sigaddset(s, SIGTERM);
305 sigaddset(s, SIGQUIT);
306 sigaddset(s, SIGPIPE);
307 sigaddset(s, SIGALRM);
308 sigaddset(s, SIGURG);
309 sigaddset(s, SIGTSTP);
310 sigaddset(s, SIGCHLD);
311 sigaddset(s, SIGIO);
312 #ifndef LISP_FEATURE_HPUX
313 sigaddset(s, SIGXCPU);
314 sigaddset(s, SIGXFSZ);
315 #endif
316 sigaddset(s, SIGVTALRM);
317 sigaddset(s, SIGPROF);
318 sigaddset(s, SIGWINCH);
321 void
322 sigaddset_blockable(sigset_t *sigset)
324 sigaddset_deferrable(sigset);
325 sigaddset_gc(sigset);
328 void
329 sigaddset_gc(sigset_t *sigset)
331 #ifdef THREADS_USING_GCSIGNAL
332 sigaddset(sigset,SIG_STOP_FOR_GC);
333 #endif
336 /* initialized in interrupt_init */
337 sigset_t deferrable_sigset;
338 sigset_t blockable_sigset;
339 sigset_t gc_sigset;
341 #endif
343 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
344 boolean
345 deferrables_blocked_p(sigset_t *sigset)
347 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
349 #endif
351 void
352 check_deferrables_unblocked_or_lose(sigset_t *sigset)
354 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
355 if (deferrables_blocked_p(sigset))
356 lose("deferrables blocked\n");
357 #endif
360 void
361 check_deferrables_blocked_or_lose(sigset_t *sigset)
363 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
364 if (!deferrables_blocked_p(sigset))
365 lose("deferrables unblocked\n");
366 #endif
369 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
370 boolean
371 blockables_blocked_p(sigset_t *sigset)
373 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
375 #endif
377 void
378 check_blockables_unblocked_or_lose(sigset_t *sigset)
380 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
381 if (blockables_blocked_p(sigset))
382 lose("blockables blocked\n");
383 #endif
386 void
387 check_blockables_blocked_or_lose(sigset_t *sigset)
389 #if !defined(LISP_FEATURE_WIN32)
390 /* On Windows, there are no actual signals, but since the win32 port
391 * tracks the sigmask and checks it explicitly, some functions are
392 * still required to keep the mask set up properly. (After all, the
393 * goal of the sigmask emulation is to not have to change all the
394 * call sites in the first place.)
396 * However, this does not hold for all signals equally: While
397 * deferrables matter ("is interrupt-thread okay?"), it is not worth
398 * having to set up blockables properly (which include the
399 * non-existing GC signals).
401 * Yet, as the original comment explains it:
402 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
403 * fake_foreign_function_call machinery are sometimes useful here[...].
405 * So we merely skip this assertion.
406 * -- DFL, trying to expand on a comment by AK.
408 if (!blockables_blocked_p(sigset))
409 lose("blockables unblocked\n");
410 #endif
413 #ifndef LISP_FEATURE_SB_SAFEPOINT
414 #if !defined(LISP_FEATURE_WIN32)
415 boolean
416 gc_signals_blocked_p(sigset_t *sigset)
418 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
420 #endif
422 void
423 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
425 #if !defined(LISP_FEATURE_WIN32)
426 if (gc_signals_blocked_p(sigset))
427 lose("gc signals blocked\n");
428 #endif
431 void
432 check_gc_signals_blocked_or_lose(sigset_t *sigset)
434 #if !defined(LISP_FEATURE_WIN32)
435 if (!gc_signals_blocked_p(sigset))
436 lose("gc signals unblocked\n");
437 #endif
439 #endif
441 void
442 block_deferrable_signals(sigset_t *old)
444 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
445 block_signals(&deferrable_sigset, 0, old);
446 #endif
449 void
450 block_blockable_signals(sigset_t *old)
452 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
453 block_signals(&blockable_sigset, 0, old);
454 #endif
457 void
458 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
460 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
461 if (interrupt_handler_pending_p())
462 lose("unblock_deferrable_signals: losing proposition\n");
463 #ifndef LISP_FEATURE_SB_SAFEPOINT
464 check_gc_signals_unblocked_or_lose(where);
465 #endif
466 unblock_signals(&deferrable_sigset, where, old);
467 #endif
470 #ifndef LISP_FEATURE_SB_SAFEPOINT
471 void
472 unblock_gc_signals(sigset_t *where, sigset_t *old)
474 #ifndef LISP_FEATURE_WIN32
475 unblock_signals(&gc_sigset, where, old);
476 #endif
478 #endif
480 void
481 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
483 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
484 sigset_t *sigset = os_context_sigmask_addr(context);
485 #ifndef LISP_FEATURE_SB_SAFEPOINT
486 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
487 corruption_warning_and_maybe_lose(
488 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
489 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
490 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
491 unblock_gc_signals(sigset, 0);
493 #endif
494 if (!interrupt_handler_pending_p()) {
495 unblock_deferrable_signals(sigset, 0);
497 #endif
501 inline static void
502 check_interrupts_enabled_or_lose(os_context_t *context)
504 struct thread *thread=arch_os_get_current_thread();
505 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
506 lose("interrupts not enabled\n");
507 if (arch_pseudo_atomic_atomic(context))
508 lose ("in pseudo atomic section\n");
511 /* Save sigset (or the current sigmask if 0) if there is no pending
512 * handler, because that means that deferabbles are already blocked.
513 * The purpose is to avoid losing the pending gc signal if a
514 * deferrable interrupt async unwinds between clearing the pseudo
515 * atomic and trapping to GC.*/
516 #ifndef LISP_FEATURE_SB_SAFEPOINT
517 void
518 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
520 #ifndef LISP_FEATURE_WIN32
521 struct thread *thread = arch_os_get_current_thread();
522 struct interrupt_data *data = thread->interrupt_data;
523 sigset_t oldset;
524 /* Obviously, this function is called when signals may not be
525 * blocked. Let's make sure we are not interrupted. */
526 block_blockable_signals(&oldset);
527 #ifndef LISP_FEATURE_SB_THREAD
528 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
529 * block. */
530 if (data->gc_blocked_deferrables)
531 lose("gc_blocked_deferrables already true\n");
532 #endif
533 if ((!data->pending_handler) &&
534 (!data->gc_blocked_deferrables)) {
535 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
536 data->gc_blocked_deferrables = 1;
537 if (sigset) {
538 /* This is the sigmask of some context. */
539 sigcopyset(&data->pending_mask, sigset);
540 sigaddset_deferrable(sigset);
541 thread_sigmask(SIG_SETMASK,&oldset,0);
542 return;
543 } else {
544 /* Operating on the current sigmask. Save oldset and
545 * unblock gc signals. In the end, this is equivalent to
546 * blocking the deferrables. */
547 sigcopyset(&data->pending_mask, &oldset);
548 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
549 return;
552 thread_sigmask(SIG_SETMASK,&oldset,0);
553 #endif
555 #endif
557 /* Are we leaving WITH-GCING and already running with interrupts
558 * enabled, without the protection of *GC-INHIBIT* T and there is gc
559 * (or stop for gc) pending, but we haven't trapped yet? */
561 in_leaving_without_gcing_race_p(struct thread *thread)
563 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
564 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
565 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
566 ((SymbolValue(GC_PENDING,thread) != NIL)
567 #if defined(LISP_FEATURE_SB_THREAD)
568 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
569 #endif
573 /* Check our baroque invariants. */
574 void
575 check_interrupt_context_or_lose(os_context_t *context)
577 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
578 struct thread *thread = arch_os_get_current_thread();
579 struct interrupt_data *data = thread->interrupt_data;
580 int interrupt_deferred_p = (data->pending_handler != 0);
581 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
582 sigset_t *sigset = os_context_sigmask_addr(context);
583 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
584 * handle_allocation_trap. */
585 #if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
586 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
587 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
588 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
589 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
590 int in_race_p = in_leaving_without_gcing_race_p(thread);
591 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
592 * section and trapping, a SIG_STOP_FOR_GC would see the next
593 * check fail, for this reason sig_stop_for_gc handler does not
594 * call this function. */
595 if (interrupt_deferred_p) {
596 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
597 lose("Stray deferred interrupt.\n");
599 if (gc_pending)
600 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
601 lose("GC_PENDING, but why?\n");
602 #if defined(LISP_FEATURE_SB_THREAD)
604 int stop_for_gc_pending =
605 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
606 if (stop_for_gc_pending)
607 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
608 lose("STOP_FOR_GC_PENDING, but why?\n");
609 if (pseudo_atomic_interrupted)
610 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
611 lose("pseudo_atomic_interrupted, but why?\n");
613 #else
614 if (pseudo_atomic_interrupted)
615 if (!(gc_pending || interrupt_deferred_p))
616 lose("pseudo_atomic_interrupted, but why?\n");
617 #endif
618 #endif
619 if (interrupt_pending && !interrupt_deferred_p)
620 lose("INTERRUPT_PENDING but not pending handler.\n");
621 if ((data->gc_blocked_deferrables) && interrupt_pending)
622 lose("gc_blocked_deferrables and interrupt pending\n.");
623 if (data->gc_blocked_deferrables)
624 check_deferrables_blocked_or_lose(sigset);
625 if (interrupt_pending || interrupt_deferred_p ||
626 data->gc_blocked_deferrables)
627 check_deferrables_blocked_or_lose(sigset);
628 else {
629 check_deferrables_unblocked_or_lose(sigset);
630 #ifndef LISP_FEATURE_SB_SAFEPOINT
631 /* If deferrables are unblocked then we are open to signals
632 * that run lisp code. */
633 check_gc_signals_unblocked_or_lose(sigset);
634 #endif
636 #endif
640 * utility routines used by various signal handlers
643 static void
644 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
646 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
648 lispobj oldcont;
650 /* Build a fake stack frame or frames */
652 #if !defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_ARM64)
653 access_control_frame_pointer(th) =
654 (lispobj *)(uword_t)
655 (*os_context_register_addr(context, reg_CSP));
656 if ((lispobj *)(uword_t)
657 (*os_context_register_addr(context, reg_CFP))
658 == access_control_frame_pointer(th)) {
659 /* There is a small window during call where the callee's
660 * frame isn't built yet. */
661 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
662 == FUN_POINTER_LOWTAG) {
663 /* We have called, but not built the new frame, so
664 * build it for them. */
665 access_control_frame_pointer(th)[0] =
666 *os_context_register_addr(context, reg_OCFP);
667 access_control_frame_pointer(th)[1] =
668 *os_context_register_addr(context, reg_LRA);
669 access_control_frame_pointer(th) += 2;
670 /* Build our frame on top of it. */
671 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
673 else {
674 /* We haven't yet called, build our frame as if the
675 * partial frame wasn't there. */
676 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
678 } else
679 #elif defined (LISP_FEATURE_ARM)
680 access_control_frame_pointer(th) =
681 SymbolValue(CONTROL_STACK_POINTER, th);
682 #elif defined (LISP_FEATURE_ARM64)
683 access_control_frame_pointer(th) =
684 (lispobj *)(uword_t) (*os_context_register_addr(context, reg_CSP));
685 #endif
686 /* We can't tell whether we are still in the caller if it had to
687 * allocate a stack frame due to stack arguments. */
688 /* This observation provoked some past CMUCL maintainer to ask
689 * "Can anything strange happen during return?" */
691 /* normal case */
692 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
695 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 3;
697 access_control_frame_pointer(th)[0] = oldcont;
698 access_control_frame_pointer(th)[1] = NIL;
699 access_control_frame_pointer(th)[2] =
700 (lispobj)(*os_context_register_addr(context, reg_CODE));
701 #endif
704 /* Stores the context for gc to scavange and builds fake stack
705 * frames. */
706 void
707 fake_foreign_function_call(os_context_t *context)
709 int context_index;
710 struct thread *thread=arch_os_get_current_thread();
712 /* context_index incrementing must not be interrupted */
713 check_blockables_blocked_or_lose(0);
715 /* Get current Lisp state from context. */
716 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
717 dynamic_space_free_pointer = SymbolValue(ALLOCATION_POINTER, thread);
718 #endif
719 #ifdef reg_ALLOC
720 #ifdef LISP_FEATURE_SB_THREAD
721 thread->pseudo_atomic_bits =
722 #else
723 dynamic_space_free_pointer =
724 (lispobj *)(uword_t)
725 #endif
726 (*os_context_register_addr(context, reg_ALLOC));
727 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
728 /* dynamic_space_free_pointer); */
729 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
730 if ((sword_t)dynamic_space_free_pointer & 1) {
731 lose("dead in fake_foreign_function_call, context = %x\n", context);
733 #endif
734 /* why doesnt PPC and SPARC do something like this: */
735 #if defined(LISP_FEATURE_HPPA)
736 if ((sword_t)dynamic_space_free_pointer & 4) {
737 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
739 #endif
740 #endif
741 #ifdef reg_BSP
742 set_binding_stack_pointer(thread,
743 *os_context_register_addr(context, reg_BSP));
744 #endif
746 #if defined(LISP_FEATURE_ARM)
747 /* Stash our control stack pointer */
748 bind_variable(INTERRUPTED_CONTROL_STACK_POINTER,
749 SymbolValue(CONTROL_STACK_POINTER, thread),
750 thread);
751 #endif
753 build_fake_control_stack_frames(thread,context);
755 /* Do dynamic binding of the active interrupt context index
756 * and save the context in the context array. */
757 context_index =
758 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
760 if (context_index >= MAX_INTERRUPTS) {
761 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
764 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
765 make_fixnum(context_index + 1),thread);
767 thread->interrupt_contexts[context_index] = context;
769 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
770 /* x86oid targets don't maintain the foreign function call flag at
771 * all, so leave them to believe that they are never in foreign
772 * code. */
773 foreign_function_call_active_p(thread) = 1;
774 #endif
777 /* blocks all blockable signals. If you are calling from a signal handler,
778 * the usual signal mask will be restored from the context when the handler
779 * finishes. Otherwise, be careful */
780 void
781 undo_fake_foreign_function_call(os_context_t *context)
783 struct thread *thread=arch_os_get_current_thread();
784 /* Block all blockable signals. */
785 block_blockable_signals(0);
787 foreign_function_call_active_p(thread) = 0;
789 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
790 unbind(thread);
792 #if defined(LISP_FEATURE_ARM)
793 /* Restore our saved control stack pointer */
794 SetSymbolValue(CONTROL_STACK_POINTER,
795 SymbolValue(INTERRUPTED_CONTROL_STACK_POINTER,
796 thread),
797 thread);
798 unbind(thread);
799 #endif
801 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
802 /* Put the dynamic space free pointer back into the context. */
803 *os_context_register_addr(context, reg_ALLOC) =
804 (uword_t) dynamic_space_free_pointer
805 | (*os_context_register_addr(context, reg_ALLOC)
806 & LOWTAG_MASK);
808 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
809 & ~LOWTAG_MASK)
810 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
812 #endif
813 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
814 /* Put the pseudo-atomic bits and dynamic space free pointer back
815 * into the context (p-a-bits for p-a, and dynamic space free
816 * pointer for ROOM). */
817 *os_context_register_addr(context, reg_ALLOC) =
818 (uword_t) dynamic_space_free_pointer
819 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
820 /* And clear them so we don't get bit later by call-in/call-out
821 * not updating them. */
822 thread->pseudo_atomic_bits = 0;
823 #endif
824 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
825 SetSymbolValue(ALLOCATION_POINTER, dynamic_space_free_pointer, thread);
826 #endif
829 /* a handler for the signal caused by execution of a trap opcode
830 * signalling an internal error */
831 void
832 interrupt_internal_error(os_context_t *context, boolean continuable)
834 DX_ALLOC_SAP(context_sap, context);
836 fake_foreign_function_call(context);
838 if (!internal_errors_enabled) {
839 describe_internal_error(context);
840 /* There's no good way to recover from an internal error
841 * before the Lisp error handling mechanism is set up. */
842 lose("internal error too early in init, can't recover\n");
845 #ifndef LISP_FEATURE_SB_SAFEPOINT
846 unblock_gc_signals(0, 0);
847 #endif
849 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
850 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
851 #endif
853 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
854 /* Workaround for blocked SIGTRAP. */
856 sigset_t newset;
857 sigemptyset(&newset);
858 sigaddset(&newset, SIGTRAP);
859 thread_sigmask(SIG_UNBLOCK, &newset, 0);
861 #endif
863 SHOW("in interrupt_internal_error");
864 #if QSHOW == 2
865 /* Display some rudimentary debugging information about the
866 * error, so that even if the Lisp error handler gets badly
867 * confused, we have a chance to determine what's going on. */
868 describe_internal_error(context);
869 #endif
870 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
871 continuable ? T : NIL);
873 undo_fake_foreign_function_call(context); /* blocks signals again */
874 if (continuable)
875 arch_skip_instruction(context);
878 boolean
879 interrupt_handler_pending_p(void)
881 struct thread *thread = arch_os_get_current_thread();
882 struct interrupt_data *data = thread->interrupt_data;
883 return (data->pending_handler != 0);
886 void
887 interrupt_handle_pending(os_context_t *context)
889 /* There are three ways we can get here. First, if an interrupt
890 * occurs within pseudo-atomic, it will be deferred, and we'll
891 * trap to here at the end of the pseudo-atomic block. Second, if
892 * the GC (in alloc()) decides that a GC is required, it will set
893 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
894 * and alloc() is always called from within pseudo-atomic, and
895 * thus we end up here again. Third, when calling GC-ON or at the
896 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
897 * here if there is a pending GC. Fourth, ahem, at the end of
898 * WITHOUT-INTERRUPTS (bar complications with nesting).
900 * A fourth way happens with safepoints: In addition to a stop for
901 * GC that is pending, there are thruptions. Both mechanisms are
902 * mostly signal-free, yet also of an asynchronous nature, so it makes
903 * sense to let interrupt_handle_pending take care of running them:
904 * It gets run precisely at those places where it is safe to process
905 * pending asynchronous tasks. */
907 struct thread *thread = arch_os_get_current_thread();
908 struct interrupt_data *data = thread->interrupt_data;
910 if (arch_pseudo_atomic_atomic(context)) {
911 lose("Handling pending interrupt in pseudo atomic.");
914 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
916 check_blockables_blocked_or_lose(0);
917 #ifndef LISP_FEATURE_SB_SAFEPOINT
919 * (On safepoint builds, there is no gc_blocked_deferrables nor
920 * SIG_STOP_FOR_GC.)
922 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
923 * handler, then the pending mask was saved and
924 * gc_blocked_deferrables set. Hence, there can be no pending
925 * handler and it's safe to restore the pending mask.
927 * Note, that if gc_blocked_deferrables is false we may still have
928 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
929 * pseudo atomic was interrupt be a deferrable first. */
930 if (data->gc_blocked_deferrables) {
931 if (data->pending_handler)
932 lose("GC blocked deferrables but still got a pending handler.");
933 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
934 lose("GC blocked deferrables while GC is inhibited.");
935 /* Restore the saved signal mask from the original signal (the
936 * one that interrupted us during the critical section) into
937 * the os_context for the signal we're currently in the
938 * handler for. This should ensure that when we return from
939 * the handler the blocked signals are unblocked. */
940 #ifndef LISP_FEATURE_WIN32
941 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
942 #endif
943 data->gc_blocked_deferrables = 0;
945 #endif
947 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
948 void *original_pending_handler = data->pending_handler;
950 #ifdef LISP_FEATURE_SB_SAFEPOINT
951 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
952 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
953 # ifdef LISP_FEATURE_SB_THRUPTION
954 || (SymbolValue(THRUPTION_PENDING,thread) != NIL
955 && SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
956 # endif
958 /* We ought to take this chance to do a pitstop now. */
959 thread_in_lisp_raised(context);
960 #elif defined(LISP_FEATURE_SB_THREAD)
961 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
962 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
963 * the signal handler if it actually stops us. */
964 arch_clear_pseudo_atomic_interrupted(context);
965 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
966 } else
967 #endif
968 /* Test for T and not for != NIL since the value :IN-PROGRESS
969 * is used in SUB-GC as part of the mechanism to supress
970 * recursive gcs.*/
971 if (SymbolValue(GC_PENDING,thread) == T) {
973 /* Two reasons for doing this. First, if there is a
974 * pending handler we don't want to run. Second, we are
975 * going to clear pseudo atomic interrupted to avoid
976 * spurious trapping on every allocation in SUB_GC and
977 * having a pending handler with interrupts enabled and
978 * without pseudo atomic interrupted breaks an
979 * invariant. */
980 if (data->pending_handler) {
981 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
982 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
985 arch_clear_pseudo_atomic_interrupted(context);
987 /* GC_PENDING is cleared in SUB-GC, or if another thread
988 * is doing a gc already we will get a SIG_STOP_FOR_GC and
989 * that will clear it.
991 * If there is a pending handler or gc was triggerred in a
992 * signal handler then maybe_gc won't run POST_GC and will
993 * return normally. */
994 if (!maybe_gc(context))
995 lose("GC not inhibited but maybe_gc did not GC.");
997 if (data->pending_handler) {
998 unbind(thread);
999 unbind(thread);
1001 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
1002 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
1003 * GC-PENDING is not NIL then we cannot trap on pseudo
1004 * atomic due to GC (see if(GC_PENDING) logic in
1005 * cheneygc.c an gengcgc.c), plus there is a outer
1006 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
1007 * here? */
1008 lose("Trapping to run pending handler while GC in progress.");
1011 check_blockables_blocked_or_lose(0);
1013 /* No GC shall be lost. If SUB_GC triggers another GC then
1014 * that should be handled on the spot. */
1015 if (SymbolValue(GC_PENDING,thread) != NIL)
1016 lose("GC_PENDING after doing gc.");
1017 #ifdef THREADS_USING_GCSIGNAL
1018 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
1019 lose("STOP_FOR_GC_PENDING after doing gc.");
1020 #endif
1021 /* Check two things. First, that gc does not clobber a handler
1022 * that's already pending. Second, that there is no interrupt
1023 * lossage: if original_pending_handler was NULL then even if
1024 * an interrupt arrived during GC (POST-GC, really) it was
1025 * handled. */
1026 if (original_pending_handler != data->pending_handler)
1027 lose("pending handler changed in gc: %x -> %x.",
1028 original_pending_handler, data->pending_handler);
1031 #ifndef LISP_FEATURE_WIN32
1032 /* There may be no pending handler, because it was only a gc that
1033 * had to be executed or because Lisp is a bit too eager to call
1034 * DO-PENDING-INTERRUPT. */
1035 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
1036 (data->pending_handler)) {
1037 /* No matter how we ended up here, clear both
1038 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1039 * because we checked above that there is no GC pending. */
1040 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
1041 arch_clear_pseudo_atomic_interrupted(context);
1042 /* Restore the sigmask in the context. */
1043 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1044 run_deferred_handler(data, context);
1046 #ifdef LISP_FEATURE_SB_THRUPTION
1047 if (SymbolValue(THRUPTION_PENDING,thread)==T)
1048 /* Special case for the following situation: There is a
1049 * thruption pending, but a signal had been deferred. The
1050 * pitstop at the top of this function could only take care
1051 * of GC, and skipped the thruption, so we need to try again
1052 * now that INTERRUPT_PENDING and the sigmask have been
1053 * reset. */
1054 while (check_pending_thruptions(context))
1056 #endif
1057 #endif
1058 #ifdef LISP_FEATURE_GENCGC
1059 if (get_pseudo_atomic_interrupted(thread))
1060 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1061 #endif
1062 /* It is possible that the end of this function was reached
1063 * without never actually doing anything, the tests in Lisp for
1064 * when to call receive-pending-interrupt are not exact. */
1065 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1069 void
1070 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1072 boolean were_in_lisp;
1073 union interrupt_handler handler;
1075 check_blockables_blocked_or_lose(0);
1077 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1078 if (sigismember(&deferrable_sigset,signal))
1079 check_interrupts_enabled_or_lose(context);
1080 #endif
1082 handler = interrupt_handlers[signal];
1084 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1085 return;
1088 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1089 if (were_in_lisp)
1091 fake_foreign_function_call(context);
1094 FSHOW_SIGNAL((stderr,
1095 "/entering interrupt_handle_now(%d, info, context)\n",
1096 signal));
1098 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1100 /* This can happen if someone tries to ignore or default one
1101 * of the signals we need for runtime support, and the runtime
1102 * support decides to pass on it. */
1103 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1105 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1106 /* Once we've decided what to do about contexts in a
1107 * return-elsewhere world (the original context will no longer
1108 * be available; should we copy it or was nobody using it anyway?)
1109 * then we should convert this to return-elsewhere */
1111 /* CMUCL comment said "Allocate the SAPs while the interrupts
1112 * are still disabled.". I (dan, 2003.08.21) assume this is
1113 * because we're not in pseudoatomic and allocation shouldn't
1114 * be interrupted. In which case it's no longer an issue as
1115 * all our allocation from C now goes through a PA wrapper,
1116 * but still, doesn't hurt.
1118 * Yeah, but non-gencgc platforms don't really wrap allocation
1119 * in PA. MG - 2005-08-29 */
1122 #ifndef LISP_FEATURE_SB_SAFEPOINT
1123 /* Leave deferrable signals blocked, the handler itself will
1124 * allow signals again when it sees fit. */
1125 unblock_gc_signals(0, 0);
1126 #else
1127 WITH_GC_AT_SAFEPOINTS_ONLY()
1128 #endif
1129 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1130 DX_ALLOC_SAP(context_sap, context);
1131 DX_ALLOC_SAP(info_sap, info);
1133 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1135 funcall3(handler.lisp,
1136 make_fixnum(signal),
1137 info_sap,
1138 context_sap);
1140 } else {
1141 /* This cannot happen in sane circumstances. */
1143 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1145 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1146 /* Allow signals again. */
1147 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1148 (*handler.c)(signal, info, context);
1149 #endif
1152 if (were_in_lisp)
1154 undo_fake_foreign_function_call(context); /* block signals again */
1157 FSHOW_SIGNAL((stderr,
1158 "/returning from interrupt_handle_now(%d, info, context)\n",
1159 signal));
1162 /* This is called at the end of a critical section if the indications
1163 * are that some signal was deferred during the section. Note that as
1164 * far as C or the kernel is concerned we dealt with the signal
1165 * already; we're just doing the Lisp-level processing now that we
1166 * put off then */
1167 static void
1168 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1170 /* The pending_handler may enable interrupts and then another
1171 * interrupt may hit, overwrite interrupt_data, so reset the
1172 * pending handler before calling it. Trust the handler to finish
1173 * with the siginfo before enabling interrupts. */
1174 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1175 data->pending_handler;
1177 data->pending_handler=0;
1178 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1179 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1182 #ifndef LISP_FEATURE_WIN32
1183 boolean
1184 maybe_defer_handler(void *handler, struct interrupt_data *data,
1185 int signal, siginfo_t *info, os_context_t *context)
1187 struct thread *thread=arch_os_get_current_thread();
1189 check_blockables_blocked_or_lose(0);
1191 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1192 lose("interrupt already pending\n");
1193 if (thread->interrupt_data->pending_handler)
1194 lose("there is a pending handler already (PA)\n");
1195 if (data->gc_blocked_deferrables)
1196 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1197 check_interrupt_context_or_lose(context);
1198 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1199 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1200 * atomic section inside a WITHOUT-INTERRUPTS.
1202 * Also, if in_leaving_without_gcing_race_p then
1203 * interrupt_handle_pending is going to be called soon, so
1204 * stashing the signal away is safe.
1206 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1207 in_leaving_without_gcing_race_p(thread)) {
1208 FSHOW_SIGNAL((stderr,
1209 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1210 (unsigned int)handler,signal,
1211 in_leaving_without_gcing_race_p(thread)));
1212 store_signal_data_for_later(data,handler,signal,info,context);
1213 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1214 check_interrupt_context_or_lose(context);
1215 return 1;
1217 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1218 * actually use its argument for anything on x86, so this branch
1219 * may succeed even when context is null (gencgc alloc()) */
1220 if (arch_pseudo_atomic_atomic(context)) {
1221 FSHOW_SIGNAL((stderr,
1222 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1223 (unsigned int)handler,signal));
1224 store_signal_data_for_later(data,handler,signal,info,context);
1225 arch_set_pseudo_atomic_interrupted(context);
1226 check_interrupt_context_or_lose(context);
1227 return 1;
1229 FSHOW_SIGNAL((stderr,
1230 "/maybe_defer_handler(%x,%d): not deferred\n",
1231 (unsigned int)handler,signal));
1232 return 0;
1235 static void
1236 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1237 int signal,
1238 siginfo_t *info, os_context_t *context)
1240 if (data->pending_handler)
1241 lose("tried to overwrite pending interrupt handler %x with %x\n",
1242 data->pending_handler, handler);
1243 if (!handler)
1244 lose("tried to defer null interrupt handler\n");
1245 data->pending_handler = handler;
1246 data->pending_signal = signal;
1247 if(info)
1248 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1250 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1251 signal));
1253 if(!context)
1254 lose("Null context");
1256 /* the signal mask in the context (from before we were
1257 * interrupted) is copied to be restored when run_deferred_handler
1258 * happens. Then the usually-blocked signals are added to the mask
1259 * in the context so that we are running with blocked signals when
1260 * the handler returns */
1261 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1262 sigaddset_deferrable(os_context_sigmask_addr(context));
1265 static void
1266 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1268 SAVE_ERRNO(signal,context,void_context);
1269 struct thread *thread = arch_os_get_current_thread();
1270 struct interrupt_data *data = thread->interrupt_data;
1271 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1272 interrupt_handle_now(signal, info, context);
1273 RESTORE_ERRNO;
1276 static void
1277 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1278 os_context_t *context)
1280 /* No FP control fixage needed, caller has done that. */
1281 check_blockables_blocked_or_lose(0);
1282 check_interrupts_enabled_or_lose(context);
1283 (*interrupt_low_level_handlers[signal])(signal, info, context);
1284 /* No Darwin context fixage needed, caller does that. */
1287 static void
1288 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1290 SAVE_ERRNO(signal,context,void_context);
1291 struct thread *thread = arch_os_get_current_thread();
1292 struct interrupt_data *data = thread->interrupt_data;
1294 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1295 signal,info,context))
1296 low_level_interrupt_handle_now(signal, info, context);
1297 RESTORE_ERRNO;
1299 #endif
1301 #ifdef THREADS_USING_GCSIGNAL
1303 /* This function must not cons, because that may trigger a GC. */
1304 void
1305 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1307 struct thread *thread=arch_os_get_current_thread();
1308 boolean was_in_lisp;
1310 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1311 * pseudo atomic until gc is finally allowed. */
1312 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1313 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1314 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1315 return;
1316 } else if (arch_pseudo_atomic_atomic(context)) {
1317 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1318 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1319 arch_set_pseudo_atomic_interrupted(context);
1320 maybe_save_gc_mask_and_block_deferrables
1321 (os_context_sigmask_addr(context));
1322 return;
1325 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1327 /* Not PA and GC not inhibited -- we can stop now. */
1329 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1331 if (was_in_lisp) {
1332 /* need the context stored so it can have registers scavenged */
1333 fake_foreign_function_call(context);
1336 /* Not pending anymore. */
1337 SetSymbolValue(GC_PENDING,NIL,thread);
1338 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1340 /* Consider this: in a PA section GC is requested: GC_PENDING,
1341 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1342 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1343 * but a SIG_STOP_FOR_GC arrives before trapping to
1344 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1345 * pseudo_atomic_interrupted is not and we go on running with
1346 * pseudo_atomic_interrupted but without a pending interrupt or
1347 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1348 * up. */
1349 if (thread->interrupt_data->gc_blocked_deferrables) {
1350 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1351 clear_pseudo_atomic_interrupted(thread);
1352 sigcopyset(os_context_sigmask_addr(context),
1353 &thread->interrupt_data->pending_mask);
1354 thread->interrupt_data->gc_blocked_deferrables = 0;
1357 if(thread_state(thread)!=STATE_RUNNING) {
1358 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1359 fixnum_value(thread->state));
1362 set_thread_state(thread,STATE_STOPPED);
1363 FSHOW_SIGNAL((stderr,"suspended\n"));
1365 /* While waiting for gc to finish occupy ourselves with zeroing
1366 * the unused portion of the control stack to reduce conservatism.
1367 * On hypothetic platforms with threads and exact gc it is
1368 * actually a must. */
1369 scrub_control_stack();
1371 wait_for_thread_state_change(thread, STATE_STOPPED);
1372 FSHOW_SIGNAL((stderr,"resumed\n"));
1374 if(thread_state(thread)!=STATE_RUNNING) {
1375 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1376 fixnum_value(thread_state(thread)));
1379 if (was_in_lisp) {
1380 undo_fake_foreign_function_call(context);
1384 #endif
1386 void
1387 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1389 SAVE_ERRNO(signal,context,void_context);
1390 #ifndef LISP_FEATURE_WIN32
1391 if ((signal == SIGILL) || (signal == SIGBUS)
1392 #if !(defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_ANDROID))
1393 || (signal == SIGEMT)
1394 #endif
1396 corruption_warning_and_maybe_lose("Signal %d received (PC: %p)", signal,
1397 *os_context_pc_addr(context));
1398 #endif
1399 interrupt_handle_now(signal, info, context);
1400 RESTORE_ERRNO;
1403 /* manipulate the signal context and stack such that when the handler
1404 * returns, it will call function instead of whatever it was doing
1405 * previously
1408 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1409 extern int *context_eflags_addr(os_context_t *context);
1410 #endif
1412 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1413 extern void post_signal_tramp(void);
1414 extern void call_into_lisp_tramp(void);
1416 void
1417 arrange_return_to_c_function(os_context_t *context,
1418 call_into_lisp_lookalike funptr,
1419 lispobj function)
1421 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1422 check_gc_signals_unblocked_or_lose
1423 (os_context_sigmask_addr(context));
1424 #endif
1425 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1426 void * fun=native_pointer(function);
1427 void *code = &(((struct simple_fun *) fun)->code);
1428 #endif
1430 /* Build a stack frame showing `interrupted' so that the
1431 * user's backtrace makes (as much) sense (as usual) */
1433 /* fp state is saved and restored by call_into_lisp */
1434 /* FIXME: errno is not restored, but since current uses of this
1435 * function only call Lisp code that signals an error, it's not
1436 * much of a problem. In other words, running out of the control
1437 * stack between a syscall and (GET-ERRNO) may clobber errno if
1438 * something fails during signalling or in the handler. But I
1439 * can't see what can go wrong as long as there is no CONTINUE
1440 * like restart on them. */
1441 #ifdef LISP_FEATURE_X86
1442 /* Suppose the existence of some function that saved all
1443 * registers, called call_into_lisp, then restored GP registers and
1444 * returned. It would look something like this:
1446 push ebp
1447 mov ebp esp
1448 pushfl
1449 pushal
1450 push $0
1451 push $0
1452 pushl {address of function to call}
1453 call 0x8058db0 <call_into_lisp>
1454 addl $12,%esp
1455 popal
1456 popfl
1457 leave
1460 * What we do here is set up the stack that call_into_lisp would
1461 * expect to see if it had been called by this code, and frob the
1462 * signal context so that signal return goes directly to call_into_lisp,
1463 * and when that function (and the lisp function it invoked) returns,
1464 * it returns to the second half of this imaginary function which
1465 * restores all registers and returns to C
1467 * For this to work, the latter part of the imaginary function
1468 * must obviously exist in reality. That would be post_signal_tramp
1471 #ifndef LISP_FEATURE_DARWIN
1472 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1473 #endif
1475 #if defined(LISP_FEATURE_DARWIN)
1476 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1478 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function,
1479 *os_context_register_addr(context,reg_ESP)));
1480 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1482 /* 1. os_validate (malloc/mmap) register_save_block
1483 * 2. copy register state into register_save_block
1484 * 3. put a pointer to register_save_block in a register in the context
1485 * 4. set the context's EIP to point to a trampoline which:
1486 * a. builds the fake stack frame from the block
1487 * b. frees the block
1488 * c. calls the function
1491 *register_save_area = *os_context_pc_addr(context);
1492 *(register_save_area + 1) = function;
1493 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1494 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1495 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1496 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1497 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1498 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1499 *(register_save_area + 8) = *context_eflags_addr(context);
1501 *os_context_pc_addr(context) =
1502 (os_context_register_t) funptr;
1503 *os_context_register_addr(context,reg_ECX) =
1504 (os_context_register_t) register_save_area;
1505 #else
1507 /* return address for call_into_lisp: */
1508 *(sp-15) = (u32)post_signal_tramp;
1509 *(sp-14) = function; /* args for call_into_lisp : function*/
1510 *(sp-13) = 0; /* arg array */
1511 *(sp-12) = 0; /* no. args */
1512 /* this order matches that used in POPAD */
1513 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1514 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1516 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1517 /* POPAD ignores the value of ESP: */
1518 *(sp-8)=0;
1519 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1521 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1522 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1523 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1524 *(sp-3)=*context_eflags_addr(context);
1525 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1526 *(sp-1)=*os_context_pc_addr(context);
1528 #endif
1530 #elif defined(LISP_FEATURE_X86_64)
1531 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1533 /* return address for call_into_lisp: */
1534 *(sp-18) = (u64)post_signal_tramp;
1536 *(sp-17)=*os_context_register_addr(context,reg_R15);
1537 *(sp-16)=*os_context_register_addr(context,reg_R14);
1538 *(sp-15)=*os_context_register_addr(context,reg_R13);
1539 *(sp-14)=*os_context_register_addr(context,reg_R12);
1540 *(sp-13)=*os_context_register_addr(context,reg_R11);
1541 *(sp-12)=*os_context_register_addr(context,reg_R10);
1542 *(sp-11)=*os_context_register_addr(context,reg_R9);
1543 *(sp-10)=*os_context_register_addr(context,reg_R8);
1544 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1545 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1546 /* skip RBP and RSP */
1547 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1548 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1549 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1550 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1551 *(sp-3)=*context_eflags_addr(context);
1552 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1553 *(sp-1)=*os_context_pc_addr(context);
1555 *os_context_register_addr(context,reg_RDI) =
1556 (os_context_register_t)function; /* function */
1557 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1558 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1559 #else
1560 struct thread *th=arch_os_get_current_thread();
1561 build_fake_control_stack_frames(th,context);
1562 #endif
1564 #ifdef LISP_FEATURE_X86
1566 #if !defined(LISP_FEATURE_DARWIN)
1567 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1568 *os_context_register_addr(context,reg_ECX) = 0;
1569 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1570 #ifdef __NetBSD__
1571 *os_context_register_addr(context,reg_UESP) =
1572 (os_context_register_t)(sp-15);
1573 #else
1574 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1575 #endif /* __NETBSD__ */
1576 #endif /* LISP_FEATURE_DARWIN */
1578 #elif defined(LISP_FEATURE_X86_64)
1579 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1580 *os_context_register_addr(context,reg_RCX) = 0;
1581 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1582 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1583 #else
1584 /* this much of the calling convention is common to all
1585 non-x86 ports */
1586 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1587 *os_context_register_addr(context,reg_NARGS) = 0;
1588 #ifdef reg_LIP
1589 *os_context_register_addr(context,reg_LIP) =
1590 (os_context_register_t)(unsigned long)code;
1591 #endif
1592 *os_context_register_addr(context,reg_CFP) =
1593 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1594 #endif
1595 #ifdef ARCH_HAS_NPC_REGISTER
1596 *os_context_npc_addr(context) =
1597 4 + *os_context_pc_addr(context);
1598 #endif
1599 #if defined(LISP_FEATURE_SPARC) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64)
1600 *os_context_register_addr(context,reg_CODE) =
1601 (os_context_register_t)((char*)fun + FUN_POINTER_LOWTAG);
1602 #endif
1603 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1604 (long)function));
1607 void
1608 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1610 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1611 arrange_return_to_c_function(context,
1612 (call_into_lisp_lookalike)call_into_lisp_tramp,
1613 function);
1614 #else
1615 arrange_return_to_c_function(context, call_into_lisp, function);
1616 #endif
1619 // These have undefined_alien_function tramp in x-assem.S
1620 #if !(defined(LISP_FEATURE_X86_64) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64))
1621 /* KLUDGE: Theoretically the approach we use for undefined alien
1622 * variables should work for functions as well, but on PPC/Darwin
1623 * we get bus error at bogus addresses instead, hence this workaround,
1624 * that has the added benefit of automatically discriminating between
1625 * functions and variables.
1627 void
1628 undefined_alien_function(void)
1630 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUN_ERROR));
1632 #endif
1634 void lower_thread_control_stack_guard_page(struct thread *th)
1636 protect_control_stack_guard_page(0, th);
1637 protect_control_stack_return_guard_page(1, th);
1638 th->control_stack_guard_page_protected = NIL;
1639 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1642 void reset_thread_control_stack_guard_page(struct thread *th)
1644 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1645 protect_control_stack_guard_page(1, th);
1646 protect_control_stack_return_guard_page(0, th);
1647 th->control_stack_guard_page_protected = T;
1648 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1651 /* Called from the REPL, too. */
1652 void reset_control_stack_guard_page(void)
1654 struct thread *th=arch_os_get_current_thread();
1655 if (th->control_stack_guard_page_protected == NIL) {
1656 reset_thread_control_stack_guard_page(th);
1660 void lower_control_stack_guard_page(void)
1662 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1665 boolean
1666 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1668 struct thread *th=arch_os_get_current_thread();
1670 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1671 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1672 lose("Control stack exhausted, fault: %p, PC: %p",
1673 addr, *os_context_pc_addr(context));
1675 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1676 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1677 /* We hit the end of the control stack: disable guard page
1678 * protection so the error handler has some headroom, protect the
1679 * previous page so that we can catch returns from the guard page
1680 * and restore it. */
1681 if (th->control_stack_guard_page_protected == NIL)
1682 lose("control_stack_guard_page_protected NIL");
1683 lower_control_stack_guard_page();
1684 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1685 /* For the unfortunate case, when the control stack is
1686 * exhausted in a signal handler. */
1687 unblock_signals_in_context_and_maybe_warn(context);
1688 #endif
1689 arrange_return_to_lisp_function
1690 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1691 return 1;
1693 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1694 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1695 /* We're returning from the guard page: reprotect it, and
1696 * unprotect this one. This works even if we somehow missed
1697 * the return-guard-page, and hit it on our way to new
1698 * exhaustion instead. */
1699 if (th->control_stack_guard_page_protected != NIL)
1700 lose("control_stack_guard_page_protected not NIL");
1701 reset_control_stack_guard_page();
1702 return 1;
1704 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1705 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1706 lose("Binding stack exhausted");
1708 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1709 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1710 protect_binding_stack_guard_page(0, NULL);
1711 protect_binding_stack_return_guard_page(1, NULL);
1712 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1714 /* For the unfortunate case, when the binding stack is
1715 * exhausted in a signal handler. */
1716 unblock_signals_in_context_and_maybe_warn(context);
1717 arrange_return_to_lisp_function
1718 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1719 return 1;
1721 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1722 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1723 protect_binding_stack_guard_page(1, NULL);
1724 protect_binding_stack_return_guard_page(0, NULL);
1725 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1726 return 1;
1728 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1729 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1730 lose("Alien stack exhausted");
1732 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1733 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1734 protect_alien_stack_guard_page(0, NULL);
1735 protect_alien_stack_return_guard_page(1, NULL);
1736 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1738 /* For the unfortunate case, when the alien stack is
1739 * exhausted in a signal handler. */
1740 unblock_signals_in_context_and_maybe_warn(context);
1741 arrange_return_to_lisp_function
1742 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1743 return 1;
1745 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1746 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1747 protect_alien_stack_guard_page(1, NULL);
1748 protect_alien_stack_return_guard_page(0, NULL);
1749 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1750 return 1;
1752 else if (addr >= undefined_alien_address &&
1753 addr < undefined_alien_address + os_vm_page_size) {
1754 arrange_return_to_lisp_function
1755 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1756 return 1;
1758 else return 0;
1762 * noise to install handlers
1765 #ifndef LISP_FEATURE_WIN32
1766 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1767 * they are blocked, in Linux 2.6 the default handler is invoked
1768 * instead that usually coredumps. One might hastily think that adding
1769 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1770 * the whole sa_mask is ignored and instead of not adding the signal
1771 * in question to the mask. That means if it's not blockable the
1772 * signal must be unblocked at the beginning of signal handlers.
1774 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1775 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1776 * will be unblocked in the sigmask during the signal handler. -- RMK
1777 * X-mas day, 2005
1779 static volatile int sigaction_nodefer_works = -1;
1781 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1782 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1784 static void
1785 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1787 sigset_t current;
1788 int i;
1789 get_current_sigmask(&current);
1790 /* There should be exactly two blocked signals: the two we added
1791 * to sa_mask when setting up the handler. NetBSD doesn't block
1792 * the signal we're handling when SA_NODEFER is set; Linux before
1793 * 2.6.13 or so also doesn't block the other signal when
1794 * SA_NODEFER is set. */
1795 for(i = 1; i < NSIG; i++)
1796 if (sigismember(&current, i) !=
1797 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1798 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1799 sigaction_nodefer_works = 0;
1801 if (sigaction_nodefer_works == -1)
1802 sigaction_nodefer_works = 1;
1805 static void
1806 see_if_sigaction_nodefer_works(void)
1808 struct sigaction sa, old_sa;
1810 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1811 sa.sa_sigaction = sigaction_nodefer_test_handler;
1812 sigemptyset(&sa.sa_mask);
1813 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1814 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1815 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1816 /* Make sure no signals are blocked. */
1818 sigset_t empty;
1819 sigemptyset(&empty);
1820 thread_sigmask(SIG_SETMASK, &empty, 0);
1822 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1823 while (sigaction_nodefer_works == -1);
1824 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1827 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1828 #undef SA_NODEFER_TEST_KILL_SIGNAL
1830 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1832 static void *
1833 signal_thread_trampoline(void *pthread_arg)
1835 intptr_t signo = (intptr_t) pthread_arg;
1836 os_context_t fake_context;
1837 siginfo_t fake_info;
1838 #ifdef LISP_FEATURE_PPC
1839 mcontext_t uc_regs;
1840 #endif
1842 memset(&fake_info, 0, sizeof(fake_info));
1843 memset(&fake_context, 0, sizeof(fake_context));
1844 #ifdef LISP_FEATURE_PPC
1845 memset(&uc_regs, 0, sizeof(uc_regs));
1846 fake_context.uc_mcontext.uc_regs = &uc_regs;
1847 #endif
1849 *os_context_pc_addr(&fake_context) = (intptr_t) &signal_thread_trampoline;
1850 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1851 *os_context_sp_addr(&fake_context) = (intptr_t) __builtin_frame_address(0);
1852 #endif
1854 signal_handler_callback(interrupt_handlers[signo].lisp,
1855 signo, &fake_info, &fake_context);
1856 return 0;
1859 static void
1860 sigprof_handler_trampoline(int signal, siginfo_t *info, void *void_context)
1862 SAVE_ERRNO(signal,context,void_context);
1863 struct thread *self = arch_os_get_current_thread();
1865 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1866 * inline allocation does not). In this case, give up. */
1867 if (get_pseudo_atomic_atomic(self))
1868 goto cleanup;
1870 struct alloc_region tmp = self->alloc_region;
1871 self->alloc_region = self->sprof_alloc_region;
1872 self->sprof_alloc_region = tmp;
1874 interrupt_handle_now_handler(signal, info, void_context);
1876 /* And we're back. We know that the SIGPROF handler never unwinds
1877 * non-locally, and can simply swap things back: */
1879 tmp = self->alloc_region;
1880 self->alloc_region = self->sprof_alloc_region;
1881 self->sprof_alloc_region = tmp;
1883 cleanup:
1884 ; /* Dear C compiler, it's OK to have a label here. */
1885 RESTORE_ERRNO;
1888 static void
1889 spawn_signal_thread_handler(int signal, siginfo_t *info, void *void_context)
1891 SAVE_ERRNO(signal,context,void_context);
1893 pthread_attr_t attr;
1894 pthread_t th;
1896 if (pthread_attr_init(&attr))
1897 goto lost;
1898 if (pthread_attr_setstacksize(&attr, thread_control_stack_size))
1899 goto lost;
1900 if (pthread_create(&th, &attr, &signal_thread_trampoline, (void*)(intptr_t) signal))
1901 goto lost;
1902 if (pthread_attr_destroy(&attr))
1903 goto lost;
1905 RESTORE_ERRNO;
1906 return;
1908 lost:
1909 lose("spawn_signal_thread_handler");
1911 #endif
1913 static void
1914 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1916 SAVE_ERRNO(signal,context,void_context);
1917 sigset_t unblock;
1919 sigemptyset(&unblock);
1920 sigaddset(&unblock, signal);
1921 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1922 interrupt_handle_now(signal, info, context);
1923 RESTORE_ERRNO;
1926 static void
1927 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1929 SAVE_ERRNO(signal,context,void_context);
1930 sigset_t unblock;
1932 sigemptyset(&unblock);
1933 sigaddset(&unblock, signal);
1934 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1935 (*interrupt_low_level_handlers[signal])(signal, info, context);
1936 RESTORE_ERRNO;
1939 static void
1940 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1942 SAVE_ERRNO(signal,context,void_context);
1943 (*interrupt_low_level_handlers[signal])(signal, info, context);
1944 RESTORE_ERRNO;
1947 void
1948 undoably_install_low_level_interrupt_handler (int signal,
1949 interrupt_handler_t handler)
1951 struct sigaction sa;
1953 if (0 > signal || signal >= NSIG) {
1954 lose("bad signal number %d\n", signal);
1957 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1958 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1959 else if (sigismember(&deferrable_sigset,signal))
1960 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1961 else if (!sigaction_nodefer_works &&
1962 !sigismember(&blockable_sigset, signal))
1963 sa.sa_sigaction = low_level_unblock_me_trampoline;
1964 else
1965 sa.sa_sigaction = low_level_handle_now_handler;
1967 #ifdef LISP_FEATURE_SB_THRUPTION
1968 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1969 * but we don't actually want to defer it. And if we put it only
1970 * into blockable_sigset, we'd have to special-case it around thread
1971 * creation at least. */
1972 if (signal == SIGPIPE)
1973 sa.sa_sigaction = low_level_handle_now_handler;
1974 #endif
1976 sigcopyset(&sa.sa_mask, &blockable_sigset);
1977 sa.sa_flags = SA_SIGINFO | SA_RESTART
1978 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1979 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
1980 if(signal==SIG_MEMORY_FAULT) {
1981 sa.sa_flags |= SA_ONSTACK;
1982 # ifdef LISP_FEATURE_SB_SAFEPOINT
1983 sigaddset(&sa.sa_mask, SIGRTMIN);
1984 sigaddset(&sa.sa_mask, SIGRTMIN+1);
1985 # endif
1987 #endif
1989 sigaction(signal, &sa, NULL);
1990 interrupt_low_level_handlers[signal] =
1991 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1993 #endif
1995 /* This is called from Lisp. */
1996 uword_t
1997 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*),
1998 int synchronous)
2000 #ifndef LISP_FEATURE_WIN32
2001 struct sigaction sa;
2002 sigset_t old;
2003 union interrupt_handler oldhandler;
2005 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
2007 block_blockable_signals(&old);
2009 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%p\n",
2010 interrupt_low_level_handlers[signal]));
2011 if (interrupt_low_level_handlers[signal]==0) {
2012 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
2013 ARE_SAME_HANDLER(handler, SIG_IGN))
2014 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
2015 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
2016 else if (signal == SIGPROF)
2017 sa.sa_sigaction = sigprof_handler_trampoline;
2018 else if (!synchronous)
2019 sa.sa_sigaction = spawn_signal_thread_handler;
2020 #endif
2021 else if (sigismember(&deferrable_sigset, signal))
2022 sa.sa_sigaction = maybe_now_maybe_later;
2023 else if (!sigaction_nodefer_works &&
2024 !sigismember(&blockable_sigset, signal))
2025 sa.sa_sigaction = unblock_me_trampoline;
2026 else
2027 sa.sa_sigaction = interrupt_handle_now_handler;
2029 sigcopyset(&sa.sa_mask, &blockable_sigset);
2030 sa.sa_flags = SA_SIGINFO | SA_RESTART |
2031 (sigaction_nodefer_works ? SA_NODEFER : 0);
2032 sigaction(signal, &sa, NULL);
2035 oldhandler = interrupt_handlers[signal];
2036 interrupt_handlers[signal].c = handler;
2038 thread_sigmask(SIG_SETMASK, &old, 0);
2040 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
2042 return (uword_t)oldhandler.lisp;
2043 #else
2044 /* Probably-wrong Win32 hack */
2045 return 0;
2046 #endif
2049 /* This must not go through lisp as it's allowed anytime, even when on
2050 * the altstack. */
2051 void
2052 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
2054 /* Save the interrupt context. No need to undo it, since lose()
2055 * shouldn't return. */
2056 fake_foreign_function_call(context);
2057 lose("SIGABRT received.\n");
2060 void
2061 interrupt_init(void)
2063 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2064 int i;
2065 SHOW("entering interrupt_init()");
2066 #ifndef LISP_FEATURE_WIN32
2067 see_if_sigaction_nodefer_works();
2068 #endif
2069 sigemptyset(&deferrable_sigset);
2070 sigemptyset(&blockable_sigset);
2071 sigemptyset(&gc_sigset);
2072 sigaddset_deferrable(&deferrable_sigset);
2073 sigaddset_blockable(&blockable_sigset);
2074 sigaddset_gc(&gc_sigset);
2075 #endif
2077 #ifndef LISP_FEATURE_WIN32
2078 /* Set up high level handler information. */
2079 for (i = 0; i < NSIG; i++) {
2080 interrupt_handlers[i].c =
2081 /* (The cast here blasts away the distinction between
2082 * SA_SIGACTION-style three-argument handlers and
2083 * signal(..)-style one-argument handlers, which is OK
2084 * because it works to call the 1-argument form where the
2085 * 3-argument form is expected.) */
2086 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
2088 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
2089 #endif
2090 SHOW("returning from interrupt_init()");
2093 #ifndef LISP_FEATURE_WIN32
2095 siginfo_code(siginfo_t *info)
2097 return info->si_code;
2099 os_vm_address_t current_memory_fault_address;
2101 void
2102 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
2104 /* FIXME: This is lossy: if we get another memory fault (eg. from
2105 * another thread) before lisp has read this, we lose the information.
2106 * However, since this is mostly informative, we'll live with that for
2107 * now -- some address is better then no address in this case.
2109 current_memory_fault_address = addr;
2111 /* If we lose on corruption, provide LDB with debugging information. */
2112 fake_foreign_function_call(context);
2114 /* To allow debugging memory faults in signal handlers and such. */
2115 corruption_warning_and_maybe_lose("Memory fault at %p (pc=%p, sp=%p)",
2116 addr,
2117 *os_context_pc_addr(context),
2118 #ifdef ARCH_HAS_STACK_POINTER
2119 *os_context_sp_addr(context)
2120 #else
2122 #endif
2124 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2125 undo_fake_foreign_function_call(context);
2126 unblock_signals_in_context_and_maybe_warn(context);
2127 arrange_return_to_lisp_function(context,
2128 StaticSymbolFunction(MEMORY_FAULT_ERROR));
2129 #else
2130 unblock_gc_signals(0, 0);
2131 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
2132 undo_fake_foreign_function_call(context);
2133 #endif
2135 #endif
2137 static void
2138 unhandled_trap_error(os_context_t *context)
2140 DX_ALLOC_SAP(context_sap, context);
2141 fake_foreign_function_call(context);
2142 #ifndef LISP_FEATURE_SB_SAFEPOINT
2143 unblock_gc_signals(0, 0);
2144 #endif
2146 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2147 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2148 #endif
2149 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2150 lose("UNHANDLED-TRAP-ERROR fell through");
2153 /* Common logic for trapping instructions. How we actually handle each
2154 * case is highly architecture dependent, but the overall shape is
2155 * this. */
2156 void
2157 handle_trap(os_context_t *context, int trap)
2159 switch(trap) {
2160 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2161 case trap_PendingInterrupt:
2162 FSHOW((stderr, "/<trap pending interrupt>\n"));
2163 arch_skip_instruction(context);
2164 interrupt_handle_pending(context);
2165 break;
2166 #endif
2167 case trap_Error:
2168 case trap_Cerror:
2169 #ifdef trap_InvalidArgCount
2170 case trap_InvalidArgCount:
2171 #endif
2172 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2173 interrupt_internal_error(context, trap==trap_Cerror);
2174 break;
2175 case trap_Breakpoint:
2176 arch_handle_breakpoint(context);
2177 break;
2178 case trap_FunEndBreakpoint:
2179 arch_handle_fun_end_breakpoint(context);
2180 break;
2181 #ifdef trap_AfterBreakpoint
2182 case trap_AfterBreakpoint:
2183 arch_handle_after_breakpoint(context);
2184 break;
2185 #endif
2186 #ifdef trap_SingleStepAround
2187 case trap_SingleStepAround:
2188 case trap_SingleStepBefore:
2189 arch_handle_single_step_trap(context, trap);
2190 break;
2191 #endif
2192 #ifdef trap_GlobalSafepoint
2193 case trap_GlobalSafepoint:
2194 fake_foreign_function_call(context);
2195 thread_in_lisp_raised(context);
2196 undo_fake_foreign_function_call(context);
2197 arch_skip_instruction(context);
2198 break;
2199 case trap_CspSafepoint:
2200 fake_foreign_function_call(context);
2201 thread_in_safety_transition(context);
2202 undo_fake_foreign_function_call(context);
2203 arch_skip_instruction(context);
2204 break;
2205 #endif
2206 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2207 case trap_Allocation:
2208 arch_handle_allocation_trap(context);
2209 arch_skip_instruction(context);
2210 break;
2211 #endif
2212 case trap_Halt:
2213 fake_foreign_function_call(context);
2214 lose("%%PRIMITIVE HALT called; the party is over.\n");
2215 default:
2216 unhandled_trap_error(context);