Unbreak sparc build.
[sbcl.git] / src / runtime / interrupt.c
blobcad97525db3c4c587af1bfbe85e46a6d917bf45d
1 /*
2 * interrupt-handling magic
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
43 #include "sbcl.h"
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
51 #include <sys/wait.h>
52 #endif
53 #include <errno.h>
55 #include "runtime.h"
56 #include "arch.h"
57 #include "os.h"
58 #include "interrupt.h"
59 #include "globals.h"
60 #include "lispregs.h"
61 #include "validate.h"
62 #include "interr.h"
63 #include "gc.h"
64 #include "alloc.h"
65 #include "dynbind.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
72 * This is a workaround for some slightly silly Linux/GNU Libc
73 * behaviour: glibc defines sigset_t to support 1024 signals, which is
74 * more than the kernel. This is usually not a problem, but becomes
75 * one when we want to save a signal mask from a ucontext, and restore
76 * it later into another ucontext: the ucontext is allocated on the
77 * stack by the kernel, so copying a libc-sized sigset_t into it will
78 * overflow and cause other data on the stack to be corrupted */
79 /* FIXME: do not rely on NSIG being a multiple of 8 */
81 #ifdef LISP_FEATURE_WIN32
82 # define REAL_SIGSET_SIZE_BYTES (4)
83 #else
84 # define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
85 #endif
87 static inline void
88 sigcopyset(sigset_t *new, sigset_t *old)
90 memcpy(new, old, REAL_SIGSET_SIZE_BYTES);
93 /* When we catch an internal error, should we pass it back to Lisp to
94 * be handled in a high-level way? (Early in cold init, the answer is
95 * 'no', because Lisp is still too brain-dead to handle anything.
96 * After sufficient initialization has been completed, the answer
97 * becomes 'yes'.) */
98 boolean internal_errors_enabled = 0;
100 #ifndef LISP_FEATURE_WIN32
101 static
102 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
103 #endif
104 union interrupt_handler interrupt_handlers[NSIG];
106 /* Under Linux on some architectures, we appear to have to restore the
107 * FPU control word from the context, as after the signal is delivered
108 * we appear to have a null FPU control word. */
109 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
110 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
111 os_context_t *context = arch_os_get_context(&void_context); \
112 os_restore_fp_control(context);
113 #else
114 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
115 os_context_t *context = arch_os_get_context(&void_context);
116 #endif
118 /* Foreign code may want to start some threads on its own.
119 * Non-targetted, truly asynchronous signals can be delivered to
120 * basically any thread, but invoking Lisp handlers in such foregign
121 * threads is really bad, so let's resignal it.
123 * This should at least bring attention to the problem, but it cannot
124 * work for SIGSEGV and similar. It is good enough for timers, and
125 * maybe all deferrables. */
127 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
128 static void
129 add_handled_signals(sigset_t *sigset)
131 int i;
132 for(i = 1; i < NSIG; i++) {
133 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
134 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
135 sigaddset(sigset, i);
140 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
141 #endif
143 static boolean
144 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
146 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
147 if (!pthread_getspecific(lisp_thread)) {
148 if (!(sigismember(&deferrable_sigset,signal))) {
149 corruption_warning_and_maybe_lose
150 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
151 signal,
152 pthread_self());
155 sigset_t sigset;
156 sigemptyset(&sigset);
157 add_handled_signals(&sigset);
158 block_signals(&sigset, 0, 0);
159 block_signals(&sigset, os_context_sigmask_addr(context), 0);
160 kill(getpid(), signal);
162 return 1;
163 } else
164 #endif
165 return 0;
168 /* These are to be used in signal handlers. Currently all handlers are
169 * called from one of:
171 * interrupt_handle_now_handler
172 * maybe_now_maybe_later
173 * unblock_me_trampoline
174 * low_level_handle_now_handler
175 * low_level_maybe_now_maybe_later
176 * low_level_unblock_me_trampoline
178 * This gives us a single point of control (or six) over errno, fp
179 * control word, and fixing up signal context on sparc.
181 * The SPARC/Linux platform doesn't quite do signals the way we want
182 * them done. The third argument in the handler isn't filled in by the
183 * kernel properly, so we fix it up ourselves in the
184 * arch_os_get_context(..) function. -- CSR, 2002-07-23
186 #define SAVE_ERRNO(signal,context,void_context) \
188 int _saved_errno = errno; \
189 RESTORE_FP_CONTROL_WORD(context,void_context); \
190 if (!maybe_resignal_to_lisp_thread(signal, context)) \
193 #define RESTORE_ERRNO \
195 errno = _saved_errno; \
198 static void run_deferred_handler(struct interrupt_data *data,
199 os_context_t *context);
200 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
201 static void store_signal_data_for_later (struct interrupt_data *data,
202 void *handler, int signal,
203 siginfo_t *info,
204 os_context_t *context);
207 /* Generic signal related utilities. */
209 void
210 get_current_sigmask(sigset_t *sigset)
212 /* Get the current sigmask, by blocking the empty set. */
213 thread_sigmask(SIG_BLOCK, 0, sigset);
216 void
217 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
219 if (where) {
220 int i;
221 if (old)
222 sigcopyset(old, where);
223 for(i = 1; i < NSIG; i++) {
224 if (sigismember(what, i))
225 sigaddset(where, i);
227 } else {
228 thread_sigmask(SIG_BLOCK, what, old);
232 void
233 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
235 if (where) {
236 int i;
237 if (old)
238 sigcopyset(old, where);
239 for(i = 1; i < NSIG; i++) {
240 if (sigismember(what, i))
241 sigdelset(where, i);
243 } else {
244 thread_sigmask(SIG_UNBLOCK, what, old);
248 static void
249 print_sigset(sigset_t *sigset)
251 int i;
252 for(i = 1; i < NSIG; i++) {
253 if (sigismember(sigset, i))
254 fprintf(stderr, "Signal %d masked\n", i);
258 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
259 * if all re unmasked else die. Passing NULL for sigset is a shorthand
260 * for the current sigmask. */
261 boolean
262 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
263 const char *name)
265 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
266 int i;
267 boolean has_blocked = 0, has_unblocked = 0;
268 sigset_t current;
269 if (sigset == 0) {
270 get_current_sigmask(&current);
271 sigset = &current;
273 for(i = 1; i < NSIG; i++) {
274 if (sigismember(sigset2, i)) {
275 if (sigismember(sigset, i))
276 has_blocked = 1;
277 else
278 has_unblocked = 1;
281 if (has_blocked && has_unblocked) {
282 print_sigset(sigset);
283 lose("some %s signals blocked, some unblocked\n", name);
285 if (has_blocked)
286 return 1;
287 else
288 return 0;
289 #endif
293 /* Deferrables, blockables, gc signals. */
295 void
296 sigaddset_deferrable(sigset_t *s)
298 sigaddset(s, SIGHUP);
299 sigaddset(s, SIGINT);
300 sigaddset(s, SIGTERM);
301 sigaddset(s, SIGQUIT);
302 sigaddset(s, SIGPIPE);
303 sigaddset(s, SIGALRM);
304 sigaddset(s, SIGURG);
305 sigaddset(s, SIGTSTP);
306 sigaddset(s, SIGCHLD);
307 sigaddset(s, SIGIO);
308 #ifndef LISP_FEATURE_HPUX
309 sigaddset(s, SIGXCPU);
310 sigaddset(s, SIGXFSZ);
311 #endif
312 sigaddset(s, SIGVTALRM);
313 sigaddset(s, SIGPROF);
314 sigaddset(s, SIGWINCH);
317 void
318 sigaddset_blockable(sigset_t *sigset)
320 sigaddset_deferrable(sigset);
321 sigaddset_gc(sigset);
324 void
325 sigaddset_gc(sigset_t *sigset)
327 #ifdef THREADS_USING_GCSIGNAL
328 sigaddset(sigset,SIG_STOP_FOR_GC);
329 #endif
332 /* initialized in interrupt_init */
333 sigset_t deferrable_sigset;
334 sigset_t blockable_sigset;
335 sigset_t gc_sigset;
337 #endif
339 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
340 boolean
341 deferrables_blocked_p(sigset_t *sigset)
343 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
345 #endif
347 void
348 check_deferrables_unblocked_or_lose(sigset_t *sigset)
350 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
351 if (deferrables_blocked_p(sigset))
352 lose("deferrables blocked\n");
353 #endif
356 void
357 check_deferrables_blocked_or_lose(sigset_t *sigset)
359 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
360 if (!deferrables_blocked_p(sigset))
361 lose("deferrables unblocked\n");
362 #endif
365 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
366 boolean
367 blockables_blocked_p(sigset_t *sigset)
369 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
371 #endif
373 void
374 check_blockables_unblocked_or_lose(sigset_t *sigset)
376 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
377 if (blockables_blocked_p(sigset))
378 lose("blockables blocked\n");
379 #endif
382 void
383 check_blockables_blocked_or_lose(sigset_t *sigset)
385 #if !defined(LISP_FEATURE_WIN32)
386 /* On Windows, there are no actual signals, but since the win32 port
387 * tracks the sigmask and checks it explicitly, some functions are
388 * still required to keep the mask set up properly. (After all, the
389 * goal of the sigmask emulation is to not have to change all the
390 * call sites in the first place.)
392 * However, this does not hold for all signals equally: While
393 * deferrables matter ("is interrupt-thread okay?"), it is not worth
394 * having to set up blockables properly (which include the
395 * non-existing GC signals).
397 * Yet, as the original comment explains it:
398 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
399 * fake_foreign_function_call machinery are sometimes useful here[...].
401 * So we merely skip this assertion.
402 * -- DFL, trying to expand on a comment by AK.
404 if (!blockables_blocked_p(sigset))
405 lose("blockables unblocked\n");
406 #endif
409 #ifndef LISP_FEATURE_SB_SAFEPOINT
410 #if !defined(LISP_FEATURE_WIN32)
411 boolean
412 gc_signals_blocked_p(sigset_t *sigset)
414 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
416 #endif
418 void
419 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
421 #if !defined(LISP_FEATURE_WIN32)
422 if (gc_signals_blocked_p(sigset))
423 lose("gc signals blocked\n");
424 #endif
427 void
428 check_gc_signals_blocked_or_lose(sigset_t *sigset)
430 #if !defined(LISP_FEATURE_WIN32)
431 if (!gc_signals_blocked_p(sigset))
432 lose("gc signals unblocked\n");
433 #endif
435 #endif
437 void
438 block_deferrable_signals(sigset_t *old)
440 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
441 block_signals(&deferrable_sigset, 0, old);
442 #endif
445 void
446 block_blockable_signals(sigset_t *old)
448 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
449 block_signals(&blockable_sigset, 0, old);
450 #endif
453 void
454 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
456 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
457 if (interrupt_handler_pending_p())
458 lose("unblock_deferrable_signals: losing proposition\n");
459 #ifndef LISP_FEATURE_SB_SAFEPOINT
460 check_gc_signals_unblocked_or_lose(where);
461 #endif
462 unblock_signals(&deferrable_sigset, where, old);
463 #endif
466 #ifndef LISP_FEATURE_SB_SAFEPOINT
467 void
468 unblock_gc_signals(sigset_t *where, sigset_t *old)
470 #ifndef LISP_FEATURE_WIN32
471 unblock_signals(&gc_sigset, where, old);
472 #endif
474 #endif
476 void
477 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
479 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
480 sigset_t *sigset = os_context_sigmask_addr(context);
481 #ifndef LISP_FEATURE_SB_SAFEPOINT
482 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
483 corruption_warning_and_maybe_lose(
484 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
485 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
486 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
487 unblock_gc_signals(sigset, 0);
489 #endif
490 if (!interrupt_handler_pending_p()) {
491 unblock_deferrable_signals(sigset, 0);
493 #endif
497 inline static void
498 check_interrupts_enabled_or_lose(os_context_t *context)
500 struct thread *thread=arch_os_get_current_thread();
501 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
502 lose("interrupts not enabled\n");
503 if (arch_pseudo_atomic_atomic(context))
504 lose ("in pseudo atomic section\n");
507 /* Save sigset (or the current sigmask if 0) if there is no pending
508 * handler, because that means that deferabbles are already blocked.
509 * The purpose is to avoid losing the pending gc signal if a
510 * deferrable interrupt async unwinds between clearing the pseudo
511 * atomic and trapping to GC.*/
512 #ifndef LISP_FEATURE_SB_SAFEPOINT
513 void
514 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
516 #ifndef LISP_FEATURE_WIN32
517 struct thread *thread = arch_os_get_current_thread();
518 struct interrupt_data *data = thread->interrupt_data;
519 sigset_t oldset;
520 /* Obviously, this function is called when signals may not be
521 * blocked. Let's make sure we are not interrupted. */
522 block_blockable_signals(&oldset);
523 #ifndef LISP_FEATURE_SB_THREAD
524 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
525 * block. */
526 if (data->gc_blocked_deferrables)
527 lose("gc_blocked_deferrables already true\n");
528 #endif
529 if ((!data->pending_handler) &&
530 (!data->gc_blocked_deferrables)) {
531 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
532 data->gc_blocked_deferrables = 1;
533 if (sigset) {
534 /* This is the sigmask of some context. */
535 sigcopyset(&data->pending_mask, sigset);
536 sigaddset_deferrable(sigset);
537 thread_sigmask(SIG_SETMASK,&oldset,0);
538 return;
539 } else {
540 /* Operating on the current sigmask. Save oldset and
541 * unblock gc signals. In the end, this is equivalent to
542 * blocking the deferrables. */
543 sigcopyset(&data->pending_mask, &oldset);
544 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
545 return;
548 thread_sigmask(SIG_SETMASK,&oldset,0);
549 #endif
551 #endif
553 /* Are we leaving WITH-GCING and already running with interrupts
554 * enabled, without the protection of *GC-INHIBIT* T and there is gc
555 * (or stop for gc) pending, but we haven't trapped yet? */
557 in_leaving_without_gcing_race_p(struct thread *thread)
559 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
560 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
561 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
562 ((SymbolValue(GC_PENDING,thread) != NIL)
563 #if defined(LISP_FEATURE_SB_THREAD)
564 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
565 #endif
569 /* Check our baroque invariants. */
570 void
571 check_interrupt_context_or_lose(os_context_t *context)
573 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
574 struct thread *thread = arch_os_get_current_thread();
575 struct interrupt_data *data = thread->interrupt_data;
576 int interrupt_deferred_p = (data->pending_handler != 0);
577 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
578 sigset_t *sigset = os_context_sigmask_addr(context);
579 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
580 * handle_allocation_trap. */
581 #if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
582 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
583 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
584 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
585 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
586 int in_race_p = in_leaving_without_gcing_race_p(thread);
587 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
588 * section and trapping, a SIG_STOP_FOR_GC would see the next
589 * check fail, for this reason sig_stop_for_gc handler does not
590 * call this function. */
591 if (interrupt_deferred_p) {
592 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
593 lose("Stray deferred interrupt.\n");
595 if (gc_pending)
596 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
597 lose("GC_PENDING, but why?\n");
598 #if defined(LISP_FEATURE_SB_THREAD)
600 int stop_for_gc_pending =
601 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
602 if (stop_for_gc_pending)
603 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
604 lose("STOP_FOR_GC_PENDING, but why?\n");
605 if (pseudo_atomic_interrupted)
606 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
607 lose("pseudo_atomic_interrupted, but why?\n");
609 #else
610 if (pseudo_atomic_interrupted)
611 if (!(gc_pending || interrupt_deferred_p))
612 lose("pseudo_atomic_interrupted, but why?\n");
613 #endif
614 #endif
615 if (interrupt_pending && !interrupt_deferred_p)
616 lose("INTERRUPT_PENDING but not pending handler.\n");
617 if ((data->gc_blocked_deferrables) && interrupt_pending)
618 lose("gc_blocked_deferrables and interrupt pending\n.");
619 if (data->gc_blocked_deferrables)
620 check_deferrables_blocked_or_lose(sigset);
621 if (interrupt_pending || interrupt_deferred_p ||
622 data->gc_blocked_deferrables)
623 check_deferrables_blocked_or_lose(sigset);
624 else {
625 check_deferrables_unblocked_or_lose(sigset);
626 #ifndef LISP_FEATURE_SB_SAFEPOINT
627 /* If deferrables are unblocked then we are open to signals
628 * that run lisp code. */
629 check_gc_signals_unblocked_or_lose(sigset);
630 #endif
632 #endif
636 * utility routines used by various signal handlers
639 static void
640 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
642 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
644 lispobj oldcont;
646 /* Build a fake stack frame or frames */
648 #if !defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_ARM64)
649 access_control_frame_pointer(th) =
650 (lispobj *)(uword_t)
651 (*os_context_register_addr(context, reg_CSP));
652 if ((lispobj *)(uword_t)
653 (*os_context_register_addr(context, reg_CFP))
654 == access_control_frame_pointer(th)) {
655 /* There is a small window during call where the callee's
656 * frame isn't built yet. */
657 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
658 == FUN_POINTER_LOWTAG) {
659 /* We have called, but not built the new frame, so
660 * build it for them. */
661 access_control_frame_pointer(th)[0] =
662 *os_context_register_addr(context, reg_OCFP);
663 access_control_frame_pointer(th)[1] =
664 *os_context_register_addr(context, reg_LRA);
665 access_control_frame_pointer(th) += 2;
666 /* Build our frame on top of it. */
667 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
669 else {
670 /* We haven't yet called, build our frame as if the
671 * partial frame wasn't there. */
672 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
674 } else
675 #elif defined (LISP_FEATURE_ARM)
676 access_control_frame_pointer(th) =
677 SymbolValue(CONTROL_STACK_POINTER, th);
678 #elif defined (LISP_FEATURE_ARM64)
679 access_control_frame_pointer(th) =
680 (lispobj *)(uword_t) (*os_context_register_addr(context, reg_CSP));
681 #endif
682 /* We can't tell whether we are still in the caller if it had to
683 * allocate a stack frame due to stack arguments. */
684 /* This observation provoked some past CMUCL maintainer to ask
685 * "Can anything strange happen during return?" */
687 /* normal case */
688 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
691 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 3;
693 access_control_frame_pointer(th)[0] = oldcont;
694 access_control_frame_pointer(th)[1] = NIL;
695 access_control_frame_pointer(th)[2] =
696 (lispobj)(*os_context_register_addr(context, reg_CODE));
697 #endif
700 /* Stores the context for gc to scavange and builds fake stack
701 * frames. */
702 void
703 fake_foreign_function_call(os_context_t *context)
705 int context_index;
706 struct thread *thread=arch_os_get_current_thread();
708 /* context_index incrementing must not be interrupted */
709 check_blockables_blocked_or_lose(0);
711 /* Get current Lisp state from context. */
712 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
713 dynamic_space_free_pointer = SymbolValue(ALLOCATION_POINTER, thread);
714 #endif
715 #ifdef reg_ALLOC
716 #ifdef LISP_FEATURE_SB_THREAD
717 thread->pseudo_atomic_bits =
718 #else
719 dynamic_space_free_pointer =
720 (lispobj *)(uword_t)
721 #endif
722 (*os_context_register_addr(context, reg_ALLOC));
723 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
724 /* dynamic_space_free_pointer); */
725 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
726 if ((sword_t)dynamic_space_free_pointer & 1) {
727 lose("dead in fake_foreign_function_call, context = %x\n", context);
729 #endif
730 /* why doesnt PPC and SPARC do something like this: */
731 #if defined(LISP_FEATURE_HPPA)
732 if ((sword_t)dynamic_space_free_pointer & 4) {
733 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
735 #endif
736 #endif
737 #ifdef reg_BSP
738 set_binding_stack_pointer(thread,
739 *os_context_register_addr(context, reg_BSP));
740 #endif
742 #if defined(LISP_FEATURE_ARM)
743 /* Stash our control stack pointer */
744 bind_variable(INTERRUPTED_CONTROL_STACK_POINTER,
745 SymbolValue(CONTROL_STACK_POINTER, thread),
746 thread);
747 #endif
749 build_fake_control_stack_frames(thread,context);
751 /* Do dynamic binding of the active interrupt context index
752 * and save the context in the context array. */
753 context_index =
754 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
756 if (context_index >= MAX_INTERRUPTS) {
757 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
760 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
761 make_fixnum(context_index + 1),thread);
763 thread->interrupt_contexts[context_index] = context;
765 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
766 /* x86oid targets don't maintain the foreign function call flag at
767 * all, so leave them to believe that they are never in foreign
768 * code. */
769 foreign_function_call_active_p(thread) = 1;
770 #endif
773 /* blocks all blockable signals. If you are calling from a signal handler,
774 * the usual signal mask will be restored from the context when the handler
775 * finishes. Otherwise, be careful */
776 void
777 undo_fake_foreign_function_call(os_context_t *context)
779 struct thread *thread=arch_os_get_current_thread();
780 /* Block all blockable signals. */
781 block_blockable_signals(0);
783 foreign_function_call_active_p(thread) = 0;
785 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
786 unbind(thread);
788 #if defined(LISP_FEATURE_ARM)
789 /* Restore our saved control stack pointer */
790 SetSymbolValue(CONTROL_STACK_POINTER,
791 SymbolValue(INTERRUPTED_CONTROL_STACK_POINTER,
792 thread),
793 thread);
794 unbind(thread);
795 #endif
797 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
798 /* Put the dynamic space free pointer back into the context. */
799 *os_context_register_addr(context, reg_ALLOC) =
800 (uword_t) dynamic_space_free_pointer
801 | (*os_context_register_addr(context, reg_ALLOC)
802 & LOWTAG_MASK);
804 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
805 & ~LOWTAG_MASK)
806 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
808 #endif
809 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
810 /* Put the pseudo-atomic bits and dynamic space free pointer back
811 * into the context (p-a-bits for p-a, and dynamic space free
812 * pointer for ROOM). */
813 *os_context_register_addr(context, reg_ALLOC) =
814 (uword_t) dynamic_space_free_pointer
815 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
816 /* And clear them so we don't get bit later by call-in/call-out
817 * not updating them. */
818 thread->pseudo_atomic_bits = 0;
819 #endif
820 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
821 SetSymbolValue(ALLOCATION_POINTER, dynamic_space_free_pointer, thread);
822 #endif
825 /* a handler for the signal caused by execution of a trap opcode
826 * signalling an internal error */
827 void
828 interrupt_internal_error(os_context_t *context, boolean continuable)
830 DX_ALLOC_SAP(context_sap, context);
832 fake_foreign_function_call(context);
834 if (!internal_errors_enabled) {
835 describe_internal_error(context);
836 /* There's no good way to recover from an internal error
837 * before the Lisp error handling mechanism is set up. */
838 lose("internal error too early in init, can't recover\n");
841 #ifndef LISP_FEATURE_SB_SAFEPOINT
842 unblock_gc_signals(0, 0);
843 #endif
845 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
846 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
847 #endif
849 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
850 /* Workaround for blocked SIGTRAP. */
852 sigset_t newset;
853 sigemptyset(&newset);
854 sigaddset(&newset, SIGTRAP);
855 thread_sigmask(SIG_UNBLOCK, &newset, 0);
857 #endif
859 SHOW("in interrupt_internal_error");
860 #if QSHOW == 2
861 /* Display some rudimentary debugging information about the
862 * error, so that even if the Lisp error handler gets badly
863 * confused, we have a chance to determine what's going on. */
864 describe_internal_error(context);
865 #endif
866 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
867 continuable ? T : NIL);
869 undo_fake_foreign_function_call(context); /* blocks signals again */
870 if (continuable)
871 arch_skip_instruction(context);
874 boolean
875 interrupt_handler_pending_p(void)
877 struct thread *thread = arch_os_get_current_thread();
878 struct interrupt_data *data = thread->interrupt_data;
879 return (data->pending_handler != 0);
882 void
883 interrupt_handle_pending(os_context_t *context)
885 /* There are three ways we can get here. First, if an interrupt
886 * occurs within pseudo-atomic, it will be deferred, and we'll
887 * trap to here at the end of the pseudo-atomic block. Second, if
888 * the GC (in alloc()) decides that a GC is required, it will set
889 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
890 * and alloc() is always called from within pseudo-atomic, and
891 * thus we end up here again. Third, when calling GC-ON or at the
892 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
893 * here if there is a pending GC. Fourth, ahem, at the end of
894 * WITHOUT-INTERRUPTS (bar complications with nesting).
896 * A fourth way happens with safepoints: In addition to a stop for
897 * GC that is pending, there are thruptions. Both mechanisms are
898 * mostly signal-free, yet also of an asynchronous nature, so it makes
899 * sense to let interrupt_handle_pending take care of running them:
900 * It gets run precisely at those places where it is safe to process
901 * pending asynchronous tasks. */
903 struct thread *thread = arch_os_get_current_thread();
904 struct interrupt_data *data = thread->interrupt_data;
906 if (arch_pseudo_atomic_atomic(context)) {
907 lose("Handling pending interrupt in pseudo atomic.");
910 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
912 check_blockables_blocked_or_lose(0);
913 #ifndef LISP_FEATURE_SB_SAFEPOINT
915 * (On safepoint builds, there is no gc_blocked_deferrables nor
916 * SIG_STOP_FOR_GC.)
918 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
919 * handler, then the pending mask was saved and
920 * gc_blocked_deferrables set. Hence, there can be no pending
921 * handler and it's safe to restore the pending mask.
923 * Note, that if gc_blocked_deferrables is false we may still have
924 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
925 * pseudo atomic was interrupt be a deferrable first. */
926 if (data->gc_blocked_deferrables) {
927 if (data->pending_handler)
928 lose("GC blocked deferrables but still got a pending handler.");
929 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
930 lose("GC blocked deferrables while GC is inhibited.");
931 /* Restore the saved signal mask from the original signal (the
932 * one that interrupted us during the critical section) into
933 * the os_context for the signal we're currently in the
934 * handler for. This should ensure that when we return from
935 * the handler the blocked signals are unblocked. */
936 #ifndef LISP_FEATURE_WIN32
937 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
938 #endif
939 data->gc_blocked_deferrables = 0;
941 #endif
943 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
944 void *original_pending_handler = data->pending_handler;
946 #ifdef LISP_FEATURE_SB_SAFEPOINT
947 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
948 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
949 # ifdef LISP_FEATURE_SB_THRUPTION
950 || (SymbolValue(THRUPTION_PENDING,thread) != NIL
951 && SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
952 # endif
954 /* We ought to take this chance to do a pitstop now. */
955 thread_in_lisp_raised(context);
956 #elif defined(LISP_FEATURE_SB_THREAD)
957 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
958 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
959 * the signal handler if it actually stops us. */
960 arch_clear_pseudo_atomic_interrupted(context);
961 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
962 } else
963 #endif
964 /* Test for T and not for != NIL since the value :IN-PROGRESS
965 * is used in SUB-GC as part of the mechanism to supress
966 * recursive gcs.*/
967 if (SymbolValue(GC_PENDING,thread) == T) {
969 /* Two reasons for doing this. First, if there is a
970 * pending handler we don't want to run. Second, we are
971 * going to clear pseudo atomic interrupted to avoid
972 * spurious trapping on every allocation in SUB_GC and
973 * having a pending handler with interrupts enabled and
974 * without pseudo atomic interrupted breaks an
975 * invariant. */
976 if (data->pending_handler) {
977 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
978 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
981 arch_clear_pseudo_atomic_interrupted(context);
983 /* GC_PENDING is cleared in SUB-GC, or if another thread
984 * is doing a gc already we will get a SIG_STOP_FOR_GC and
985 * that will clear it.
987 * If there is a pending handler or gc was triggerred in a
988 * signal handler then maybe_gc won't run POST_GC and will
989 * return normally. */
990 if (!maybe_gc(context))
991 lose("GC not inhibited but maybe_gc did not GC.");
993 if (data->pending_handler) {
994 unbind(thread);
995 unbind(thread);
997 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
998 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
999 * GC-PENDING is not NIL then we cannot trap on pseudo
1000 * atomic due to GC (see if(GC_PENDING) logic in
1001 * cheneygc.c an gengcgc.c), plus there is a outer
1002 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
1003 * here? */
1004 lose("Trapping to run pending handler while GC in progress.");
1007 check_blockables_blocked_or_lose(0);
1009 /* No GC shall be lost. If SUB_GC triggers another GC then
1010 * that should be handled on the spot. */
1011 if (SymbolValue(GC_PENDING,thread) != NIL)
1012 lose("GC_PENDING after doing gc.");
1013 #ifdef THREADS_USING_GCSIGNAL
1014 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
1015 lose("STOP_FOR_GC_PENDING after doing gc.");
1016 #endif
1017 /* Check two things. First, that gc does not clobber a handler
1018 * that's already pending. Second, that there is no interrupt
1019 * lossage: if original_pending_handler was NULL then even if
1020 * an interrupt arrived during GC (POST-GC, really) it was
1021 * handled. */
1022 if (original_pending_handler != data->pending_handler)
1023 lose("pending handler changed in gc: %x -> %x.",
1024 original_pending_handler, data->pending_handler);
1027 #ifndef LISP_FEATURE_WIN32
1028 /* There may be no pending handler, because it was only a gc that
1029 * had to be executed or because Lisp is a bit too eager to call
1030 * DO-PENDING-INTERRUPT. */
1031 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
1032 (data->pending_handler)) {
1033 /* No matter how we ended up here, clear both
1034 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1035 * because we checked above that there is no GC pending. */
1036 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
1037 arch_clear_pseudo_atomic_interrupted(context);
1038 /* Restore the sigmask in the context. */
1039 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1040 run_deferred_handler(data, context);
1042 #ifdef LISP_FEATURE_SB_THRUPTION
1043 if (SymbolValue(THRUPTION_PENDING,thread)==T)
1044 /* Special case for the following situation: There is a
1045 * thruption pending, but a signal had been deferred. The
1046 * pitstop at the top of this function could only take care
1047 * of GC, and skipped the thruption, so we need to try again
1048 * now that INTERRUPT_PENDING and the sigmask have been
1049 * reset. */
1050 while (check_pending_thruptions(context))
1052 #endif
1053 #endif
1054 #ifdef LISP_FEATURE_GENCGC
1055 if (get_pseudo_atomic_interrupted(thread))
1056 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1057 #endif
1058 /* It is possible that the end of this function was reached
1059 * without never actually doing anything, the tests in Lisp for
1060 * when to call receive-pending-interrupt are not exact. */
1061 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1065 void
1066 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1068 boolean were_in_lisp;
1069 union interrupt_handler handler;
1071 check_blockables_blocked_or_lose(0);
1073 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1074 if (sigismember(&deferrable_sigset,signal))
1075 check_interrupts_enabled_or_lose(context);
1076 #endif
1078 handler = interrupt_handlers[signal];
1080 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1081 return;
1084 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1085 if (were_in_lisp)
1087 fake_foreign_function_call(context);
1090 FSHOW_SIGNAL((stderr,
1091 "/entering interrupt_handle_now(%d, info, context)\n",
1092 signal));
1094 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1096 /* This can happen if someone tries to ignore or default one
1097 * of the signals we need for runtime support, and the runtime
1098 * support decides to pass on it. */
1099 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1101 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1102 /* Once we've decided what to do about contexts in a
1103 * return-elsewhere world (the original context will no longer
1104 * be available; should we copy it or was nobody using it anyway?)
1105 * then we should convert this to return-elsewhere */
1107 /* CMUCL comment said "Allocate the SAPs while the interrupts
1108 * are still disabled.". I (dan, 2003.08.21) assume this is
1109 * because we're not in pseudoatomic and allocation shouldn't
1110 * be interrupted. In which case it's no longer an issue as
1111 * all our allocation from C now goes through a PA wrapper,
1112 * but still, doesn't hurt.
1114 * Yeah, but non-gencgc platforms don't really wrap allocation
1115 * in PA. MG - 2005-08-29 */
1118 #ifndef LISP_FEATURE_SB_SAFEPOINT
1119 /* Leave deferrable signals blocked, the handler itself will
1120 * allow signals again when it sees fit. */
1121 unblock_gc_signals(0, 0);
1122 #else
1123 WITH_GC_AT_SAFEPOINTS_ONLY()
1124 #endif
1125 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1126 DX_ALLOC_SAP(context_sap, context);
1127 DX_ALLOC_SAP(info_sap, info);
1129 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1131 funcall3(handler.lisp,
1132 make_fixnum(signal),
1133 info_sap,
1134 context_sap);
1136 } else {
1137 /* This cannot happen in sane circumstances. */
1139 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1141 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1142 /* Allow signals again. */
1143 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1144 (*handler.c)(signal, info, context);
1145 #endif
1148 if (were_in_lisp)
1150 undo_fake_foreign_function_call(context); /* block signals again */
1153 FSHOW_SIGNAL((stderr,
1154 "/returning from interrupt_handle_now(%d, info, context)\n",
1155 signal));
1158 /* This is called at the end of a critical section if the indications
1159 * are that some signal was deferred during the section. Note that as
1160 * far as C or the kernel is concerned we dealt with the signal
1161 * already; we're just doing the Lisp-level processing now that we
1162 * put off then */
1163 static void
1164 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1166 /* The pending_handler may enable interrupts and then another
1167 * interrupt may hit, overwrite interrupt_data, so reset the
1168 * pending handler before calling it. Trust the handler to finish
1169 * with the siginfo before enabling interrupts. */
1170 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1171 data->pending_handler;
1173 data->pending_handler=0;
1174 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1175 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1178 #ifndef LISP_FEATURE_WIN32
1179 boolean
1180 maybe_defer_handler(void *handler, struct interrupt_data *data,
1181 int signal, siginfo_t *info, os_context_t *context)
1183 struct thread *thread=arch_os_get_current_thread();
1185 check_blockables_blocked_or_lose(0);
1187 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1188 lose("interrupt already pending\n");
1189 if (thread->interrupt_data->pending_handler)
1190 lose("there is a pending handler already (PA)\n");
1191 if (data->gc_blocked_deferrables)
1192 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1193 check_interrupt_context_or_lose(context);
1194 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1195 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1196 * atomic section inside a WITHOUT-INTERRUPTS.
1198 * Also, if in_leaving_without_gcing_race_p then
1199 * interrupt_handle_pending is going to be called soon, so
1200 * stashing the signal away is safe.
1202 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1203 in_leaving_without_gcing_race_p(thread)) {
1204 FSHOW_SIGNAL((stderr,
1205 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1206 (unsigned int)handler,signal,
1207 in_leaving_without_gcing_race_p(thread)));
1208 store_signal_data_for_later(data,handler,signal,info,context);
1209 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1210 check_interrupt_context_or_lose(context);
1211 return 1;
1213 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1214 * actually use its argument for anything on x86, so this branch
1215 * may succeed even when context is null (gencgc alloc()) */
1216 if (arch_pseudo_atomic_atomic(context)) {
1217 FSHOW_SIGNAL((stderr,
1218 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1219 (unsigned int)handler,signal));
1220 store_signal_data_for_later(data,handler,signal,info,context);
1221 arch_set_pseudo_atomic_interrupted(context);
1222 check_interrupt_context_or_lose(context);
1223 return 1;
1225 FSHOW_SIGNAL((stderr,
1226 "/maybe_defer_handler(%x,%d): not deferred\n",
1227 (unsigned int)handler,signal));
1228 return 0;
1231 static void
1232 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1233 int signal,
1234 siginfo_t *info, os_context_t *context)
1236 if (data->pending_handler)
1237 lose("tried to overwrite pending interrupt handler %x with %x\n",
1238 data->pending_handler, handler);
1239 if (!handler)
1240 lose("tried to defer null interrupt handler\n");
1241 data->pending_handler = handler;
1242 data->pending_signal = signal;
1243 if(info)
1244 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1246 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1247 signal));
1249 if(!context)
1250 lose("Null context");
1252 /* the signal mask in the context (from before we were
1253 * interrupted) is copied to be restored when run_deferred_handler
1254 * happens. Then the usually-blocked signals are added to the mask
1255 * in the context so that we are running with blocked signals when
1256 * the handler returns */
1257 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1258 sigaddset_deferrable(os_context_sigmask_addr(context));
1261 static void
1262 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1264 SAVE_ERRNO(signal,context,void_context);
1265 struct thread *thread = arch_os_get_current_thread();
1266 struct interrupt_data *data = thread->interrupt_data;
1267 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1268 interrupt_handle_now(signal, info, context);
1269 RESTORE_ERRNO;
1272 static void
1273 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1274 os_context_t *context)
1276 /* No FP control fixage needed, caller has done that. */
1277 check_blockables_blocked_or_lose(0);
1278 check_interrupts_enabled_or_lose(context);
1279 (*interrupt_low_level_handlers[signal])(signal, info, context);
1280 /* No Darwin context fixage needed, caller does that. */
1283 static void
1284 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1286 SAVE_ERRNO(signal,context,void_context);
1287 struct thread *thread = arch_os_get_current_thread();
1288 struct interrupt_data *data = thread->interrupt_data;
1290 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1291 signal,info,context))
1292 low_level_interrupt_handle_now(signal, info, context);
1293 RESTORE_ERRNO;
1295 #endif
1297 #ifdef THREADS_USING_GCSIGNAL
1299 /* This function must not cons, because that may trigger a GC. */
1300 void
1301 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1303 struct thread *thread=arch_os_get_current_thread();
1304 boolean was_in_lisp;
1306 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1307 * pseudo atomic until gc is finally allowed. */
1308 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1309 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1310 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1311 return;
1312 } else if (arch_pseudo_atomic_atomic(context)) {
1313 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1314 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1315 arch_set_pseudo_atomic_interrupted(context);
1316 maybe_save_gc_mask_and_block_deferrables
1317 (os_context_sigmask_addr(context));
1318 return;
1321 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1323 /* Not PA and GC not inhibited -- we can stop now. */
1325 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1327 if (was_in_lisp) {
1328 /* need the context stored so it can have registers scavenged */
1329 fake_foreign_function_call(context);
1332 /* Not pending anymore. */
1333 SetSymbolValue(GC_PENDING,NIL,thread);
1334 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1336 /* Consider this: in a PA section GC is requested: GC_PENDING,
1337 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1338 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1339 * but a SIG_STOP_FOR_GC arrives before trapping to
1340 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1341 * pseudo_atomic_interrupted is not and we go on running with
1342 * pseudo_atomic_interrupted but without a pending interrupt or
1343 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1344 * up. */
1345 if (thread->interrupt_data->gc_blocked_deferrables) {
1346 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1347 clear_pseudo_atomic_interrupted(thread);
1348 sigcopyset(os_context_sigmask_addr(context),
1349 &thread->interrupt_data->pending_mask);
1350 thread->interrupt_data->gc_blocked_deferrables = 0;
1353 if(thread_state(thread)!=STATE_RUNNING) {
1354 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1355 fixnum_value(thread->state));
1358 set_thread_state(thread,STATE_STOPPED);
1359 FSHOW_SIGNAL((stderr,"suspended\n"));
1361 /* While waiting for gc to finish occupy ourselves with zeroing
1362 * the unused portion of the control stack to reduce conservatism.
1363 * On hypothetic platforms with threads and exact gc it is
1364 * actually a must. */
1365 scrub_control_stack();
1367 wait_for_thread_state_change(thread, STATE_STOPPED);
1368 FSHOW_SIGNAL((stderr,"resumed\n"));
1370 if(thread_state(thread)!=STATE_RUNNING) {
1371 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1372 fixnum_value(thread_state(thread)));
1375 if (was_in_lisp) {
1376 undo_fake_foreign_function_call(context);
1380 #endif
1382 void
1383 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1385 SAVE_ERRNO(signal,context,void_context);
1386 #ifndef LISP_FEATURE_WIN32
1387 if ((signal == SIGILL) || (signal == SIGBUS)
1388 #if !(defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_ANDROID))
1389 || (signal == SIGEMT)
1390 #endif
1392 corruption_warning_and_maybe_lose("Signal %d received (PC: %p)", signal,
1393 *os_context_pc_addr(context));
1394 #endif
1395 interrupt_handle_now(signal, info, context);
1396 RESTORE_ERRNO;
1399 /* manipulate the signal context and stack such that when the handler
1400 * returns, it will call function instead of whatever it was doing
1401 * previously
1404 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1405 extern int *context_eflags_addr(os_context_t *context);
1406 #endif
1408 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1409 extern void post_signal_tramp(void);
1410 extern void call_into_lisp_tramp(void);
1412 void
1413 arrange_return_to_c_function(os_context_t *context,
1414 call_into_lisp_lookalike funptr,
1415 lispobj function)
1417 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1418 check_gc_signals_unblocked_or_lose
1419 (os_context_sigmask_addr(context));
1420 #endif
1421 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1422 void * fun=native_pointer(function);
1423 void *code = &(((struct simple_fun *) fun)->code);
1424 #endif
1426 /* Build a stack frame showing `interrupted' so that the
1427 * user's backtrace makes (as much) sense (as usual) */
1429 /* fp state is saved and restored by call_into_lisp */
1430 /* FIXME: errno is not restored, but since current uses of this
1431 * function only call Lisp code that signals an error, it's not
1432 * much of a problem. In other words, running out of the control
1433 * stack between a syscall and (GET-ERRNO) may clobber errno if
1434 * something fails during signalling or in the handler. But I
1435 * can't see what can go wrong as long as there is no CONTINUE
1436 * like restart on them. */
1437 #ifdef LISP_FEATURE_X86
1438 /* Suppose the existence of some function that saved all
1439 * registers, called call_into_lisp, then restored GP registers and
1440 * returned. It would look something like this:
1442 push ebp
1443 mov ebp esp
1444 pushfl
1445 pushal
1446 push $0
1447 push $0
1448 pushl {address of function to call}
1449 call 0x8058db0 <call_into_lisp>
1450 addl $12,%esp
1451 popal
1452 popfl
1453 leave
1456 * What we do here is set up the stack that call_into_lisp would
1457 * expect to see if it had been called by this code, and frob the
1458 * signal context so that signal return goes directly to call_into_lisp,
1459 * and when that function (and the lisp function it invoked) returns,
1460 * it returns to the second half of this imaginary function which
1461 * restores all registers and returns to C
1463 * For this to work, the latter part of the imaginary function
1464 * must obviously exist in reality. That would be post_signal_tramp
1467 #ifndef LISP_FEATURE_DARWIN
1468 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1469 #endif
1471 #if defined(LISP_FEATURE_DARWIN)
1472 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1474 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function,
1475 *os_context_register_addr(context,reg_ESP)));
1476 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1478 /* 1. os_validate (malloc/mmap) register_save_block
1479 * 2. copy register state into register_save_block
1480 * 3. put a pointer to register_save_block in a register in the context
1481 * 4. set the context's EIP to point to a trampoline which:
1482 * a. builds the fake stack frame from the block
1483 * b. frees the block
1484 * c. calls the function
1487 *register_save_area = *os_context_pc_addr(context);
1488 *(register_save_area + 1) = function;
1489 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1490 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1491 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1492 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1493 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1494 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1495 *(register_save_area + 8) = *context_eflags_addr(context);
1497 *os_context_pc_addr(context) =
1498 (os_context_register_t) funptr;
1499 *os_context_register_addr(context,reg_ECX) =
1500 (os_context_register_t) register_save_area;
1501 #else
1503 /* return address for call_into_lisp: */
1504 *(sp-15) = (u32)post_signal_tramp;
1505 *(sp-14) = function; /* args for call_into_lisp : function*/
1506 *(sp-13) = 0; /* arg array */
1507 *(sp-12) = 0; /* no. args */
1508 /* this order matches that used in POPAD */
1509 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1510 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1512 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1513 /* POPAD ignores the value of ESP: */
1514 *(sp-8)=0;
1515 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1517 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1518 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1519 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1520 *(sp-3)=*context_eflags_addr(context);
1521 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1522 *(sp-1)=*os_context_pc_addr(context);
1524 #endif
1526 #elif defined(LISP_FEATURE_X86_64)
1527 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1529 /* return address for call_into_lisp: */
1530 *(sp-18) = (u64)post_signal_tramp;
1532 *(sp-17)=*os_context_register_addr(context,reg_R15);
1533 *(sp-16)=*os_context_register_addr(context,reg_R14);
1534 *(sp-15)=*os_context_register_addr(context,reg_R13);
1535 *(sp-14)=*os_context_register_addr(context,reg_R12);
1536 *(sp-13)=*os_context_register_addr(context,reg_R11);
1537 *(sp-12)=*os_context_register_addr(context,reg_R10);
1538 *(sp-11)=*os_context_register_addr(context,reg_R9);
1539 *(sp-10)=*os_context_register_addr(context,reg_R8);
1540 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1541 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1542 /* skip RBP and RSP */
1543 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1544 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1545 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1546 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1547 *(sp-3)=*context_eflags_addr(context);
1548 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1549 *(sp-1)=*os_context_pc_addr(context);
1551 *os_context_register_addr(context,reg_RDI) =
1552 (os_context_register_t)function; /* function */
1553 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1554 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1555 #else
1556 struct thread *th=arch_os_get_current_thread();
1557 build_fake_control_stack_frames(th,context);
1558 #endif
1560 #ifdef LISP_FEATURE_X86
1562 #if !defined(LISP_FEATURE_DARWIN)
1563 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1564 *os_context_register_addr(context,reg_ECX) = 0;
1565 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1566 #ifdef __NetBSD__
1567 *os_context_register_addr(context,reg_UESP) =
1568 (os_context_register_t)(sp-15);
1569 #else
1570 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1571 #endif /* __NETBSD__ */
1572 #endif /* LISP_FEATURE_DARWIN */
1574 #elif defined(LISP_FEATURE_X86_64)
1575 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1576 *os_context_register_addr(context,reg_RCX) = 0;
1577 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1578 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1579 #else
1580 /* this much of the calling convention is common to all
1581 non-x86 ports */
1582 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1583 *os_context_register_addr(context,reg_NARGS) = 0;
1584 #ifdef reg_LIP
1585 *os_context_register_addr(context,reg_LIP) =
1586 (os_context_register_t)(unsigned long)code;
1587 #endif
1588 *os_context_register_addr(context,reg_CFP) =
1589 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1590 #endif
1591 #ifdef ARCH_HAS_NPC_REGISTER
1592 *os_context_npc_addr(context) =
1593 4 + *os_context_pc_addr(context);
1594 #endif
1595 #if defined(LISP_FEATURE_SPARC) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64)
1596 *os_context_register_addr(context,reg_CODE) =
1597 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1598 #endif
1599 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1600 (long)function));
1603 void
1604 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1606 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1607 arrange_return_to_c_function(context,
1608 (call_into_lisp_lookalike)call_into_lisp_tramp,
1609 function);
1610 #else
1611 arrange_return_to_c_function(context, call_into_lisp, function);
1612 #endif
1615 // These have undefined_alien_function tramp in x-assem.S
1616 #if !(defined(LISP_FEATURE_X86_64) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64))
1617 /* KLUDGE: Theoretically the approach we use for undefined alien
1618 * variables should work for functions as well, but on PPC/Darwin
1619 * we get bus error at bogus addresses instead, hence this workaround,
1620 * that has the added benefit of automatically discriminating between
1621 * functions and variables.
1623 void
1624 undefined_alien_function(void)
1626 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUN_ERROR));
1628 #endif
1630 void lower_thread_control_stack_guard_page(struct thread *th)
1632 protect_control_stack_guard_page(0, th);
1633 protect_control_stack_return_guard_page(1, th);
1634 th->control_stack_guard_page_protected = NIL;
1635 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1638 void reset_thread_control_stack_guard_page(struct thread *th)
1640 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1641 protect_control_stack_guard_page(1, th);
1642 protect_control_stack_return_guard_page(0, th);
1643 th->control_stack_guard_page_protected = T;
1644 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1647 /* Called from the REPL, too. */
1648 void reset_control_stack_guard_page(void)
1650 struct thread *th=arch_os_get_current_thread();
1651 if (th->control_stack_guard_page_protected == NIL) {
1652 reset_thread_control_stack_guard_page(th);
1656 void lower_control_stack_guard_page(void)
1658 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1661 boolean
1662 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1664 struct thread *th=arch_os_get_current_thread();
1666 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1667 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1668 lose("Control stack exhausted, fault: %p, PC: %p",
1669 addr, *os_context_pc_addr(context));
1671 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1672 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1673 /* We hit the end of the control stack: disable guard page
1674 * protection so the error handler has some headroom, protect the
1675 * previous page so that we can catch returns from the guard page
1676 * and restore it. */
1677 if (th->control_stack_guard_page_protected == NIL)
1678 lose("control_stack_guard_page_protected NIL");
1679 lower_control_stack_guard_page();
1680 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1681 /* For the unfortunate case, when the control stack is
1682 * exhausted in a signal handler. */
1683 unblock_signals_in_context_and_maybe_warn(context);
1684 #endif
1685 arrange_return_to_lisp_function
1686 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1687 return 1;
1689 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1690 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1691 /* We're returning from the guard page: reprotect it, and
1692 * unprotect this one. This works even if we somehow missed
1693 * the return-guard-page, and hit it on our way to new
1694 * exhaustion instead. */
1695 if (th->control_stack_guard_page_protected != NIL)
1696 lose("control_stack_guard_page_protected not NIL");
1697 reset_control_stack_guard_page();
1698 return 1;
1700 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1701 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1702 lose("Binding stack exhausted");
1704 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1705 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1706 protect_binding_stack_guard_page(0, NULL);
1707 protect_binding_stack_return_guard_page(1, NULL);
1708 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1710 /* For the unfortunate case, when the binding stack is
1711 * exhausted in a signal handler. */
1712 unblock_signals_in_context_and_maybe_warn(context);
1713 arrange_return_to_lisp_function
1714 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1715 return 1;
1717 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1718 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1719 protect_binding_stack_guard_page(1, NULL);
1720 protect_binding_stack_return_guard_page(0, NULL);
1721 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1722 return 1;
1724 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1725 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1726 lose("Alien stack exhausted");
1728 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1729 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1730 protect_alien_stack_guard_page(0, NULL);
1731 protect_alien_stack_return_guard_page(1, NULL);
1732 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1734 /* For the unfortunate case, when the alien stack is
1735 * exhausted in a signal handler. */
1736 unblock_signals_in_context_and_maybe_warn(context);
1737 arrange_return_to_lisp_function
1738 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1739 return 1;
1741 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1742 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1743 protect_alien_stack_guard_page(1, NULL);
1744 protect_alien_stack_return_guard_page(0, NULL);
1745 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1746 return 1;
1748 else if (addr >= undefined_alien_address &&
1749 addr < undefined_alien_address + os_vm_page_size) {
1750 arrange_return_to_lisp_function
1751 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1752 return 1;
1754 else return 0;
1758 * noise to install handlers
1761 #ifndef LISP_FEATURE_WIN32
1762 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1763 * they are blocked, in Linux 2.6 the default handler is invoked
1764 * instead that usually coredumps. One might hastily think that adding
1765 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1766 * the whole sa_mask is ignored and instead of not adding the signal
1767 * in question to the mask. That means if it's not blockable the
1768 * signal must be unblocked at the beginning of signal handlers.
1770 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1771 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1772 * will be unblocked in the sigmask during the signal handler. -- RMK
1773 * X-mas day, 2005
1775 static volatile int sigaction_nodefer_works = -1;
1777 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1778 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1780 static void
1781 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1783 sigset_t current;
1784 int i;
1785 get_current_sigmask(&current);
1786 /* There should be exactly two blocked signals: the two we added
1787 * to sa_mask when setting up the handler. NetBSD doesn't block
1788 * the signal we're handling when SA_NODEFER is set; Linux before
1789 * 2.6.13 or so also doesn't block the other signal when
1790 * SA_NODEFER is set. */
1791 for(i = 1; i < NSIG; i++)
1792 if (sigismember(&current, i) !=
1793 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1794 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1795 sigaction_nodefer_works = 0;
1797 if (sigaction_nodefer_works == -1)
1798 sigaction_nodefer_works = 1;
1801 static void
1802 see_if_sigaction_nodefer_works(void)
1804 struct sigaction sa, old_sa;
1806 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1807 sa.sa_sigaction = sigaction_nodefer_test_handler;
1808 sigemptyset(&sa.sa_mask);
1809 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1810 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1811 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1812 /* Make sure no signals are blocked. */
1814 sigset_t empty;
1815 sigemptyset(&empty);
1816 thread_sigmask(SIG_SETMASK, &empty, 0);
1818 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1819 while (sigaction_nodefer_works == -1);
1820 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1823 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1824 #undef SA_NODEFER_TEST_KILL_SIGNAL
1826 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1828 static void *
1829 signal_thread_trampoline(void *pthread_arg)
1831 intptr_t signo = (intptr_t) pthread_arg;
1832 os_context_t fake_context;
1833 siginfo_t fake_info;
1834 #ifdef LISP_FEATURE_PPC
1835 mcontext_t uc_regs;
1836 #endif
1838 memset(&fake_info, 0, sizeof(fake_info));
1839 memset(&fake_context, 0, sizeof(fake_context));
1840 #ifdef LISP_FEATURE_PPC
1841 memset(&uc_regs, 0, sizeof(uc_regs));
1842 fake_context.uc_mcontext.uc_regs = &uc_regs;
1843 #endif
1845 *os_context_pc_addr(&fake_context) = (intptr_t) &signal_thread_trampoline;
1846 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1847 *os_context_sp_addr(&fake_context) = (intptr_t) __builtin_frame_address(0);
1848 #endif
1850 signal_handler_callback(interrupt_handlers[signo].lisp,
1851 signo, &fake_info, &fake_context);
1852 return 0;
1855 static void
1856 sigprof_handler_trampoline(int signal, siginfo_t *info, void *void_context)
1858 SAVE_ERRNO(signal,context,void_context);
1859 struct thread *self = arch_os_get_current_thread();
1861 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1862 * inline allocation does not). In this case, give up. */
1863 if (get_pseudo_atomic_atomic(self))
1864 goto cleanup;
1866 struct alloc_region tmp = self->alloc_region;
1867 self->alloc_region = self->sprof_alloc_region;
1868 self->sprof_alloc_region = tmp;
1870 interrupt_handle_now_handler(signal, info, void_context);
1872 /* And we're back. We know that the SIGPROF handler never unwinds
1873 * non-locally, and can simply swap things back: */
1875 tmp = self->alloc_region;
1876 self->alloc_region = self->sprof_alloc_region;
1877 self->sprof_alloc_region = tmp;
1879 cleanup:
1880 ; /* Dear C compiler, it's OK to have a label here. */
1881 RESTORE_ERRNO;
1884 static void
1885 spawn_signal_thread_handler(int signal, siginfo_t *info, void *void_context)
1887 SAVE_ERRNO(signal,context,void_context);
1889 pthread_attr_t attr;
1890 pthread_t th;
1892 if (pthread_attr_init(&attr))
1893 goto lost;
1894 if (pthread_attr_setstacksize(&attr, thread_control_stack_size))
1895 goto lost;
1896 if (pthread_create(&th, &attr, &signal_thread_trampoline, (void*)(intptr_t) signal))
1897 goto lost;
1898 if (pthread_attr_destroy(&attr))
1899 goto lost;
1901 RESTORE_ERRNO;
1902 return;
1904 lost:
1905 lose("spawn_signal_thread_handler");
1907 #endif
1909 static void
1910 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1912 SAVE_ERRNO(signal,context,void_context);
1913 sigset_t unblock;
1915 sigemptyset(&unblock);
1916 sigaddset(&unblock, signal);
1917 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1918 interrupt_handle_now(signal, info, context);
1919 RESTORE_ERRNO;
1922 static void
1923 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1925 SAVE_ERRNO(signal,context,void_context);
1926 sigset_t unblock;
1928 sigemptyset(&unblock);
1929 sigaddset(&unblock, signal);
1930 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1931 (*interrupt_low_level_handlers[signal])(signal, info, context);
1932 RESTORE_ERRNO;
1935 static void
1936 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1938 SAVE_ERRNO(signal,context,void_context);
1939 (*interrupt_low_level_handlers[signal])(signal, info, context);
1940 RESTORE_ERRNO;
1943 void
1944 undoably_install_low_level_interrupt_handler (int signal,
1945 interrupt_handler_t handler)
1947 struct sigaction sa;
1949 if (0 > signal || signal >= NSIG) {
1950 lose("bad signal number %d\n", signal);
1953 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1954 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1955 else if (sigismember(&deferrable_sigset,signal))
1956 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1957 else if (!sigaction_nodefer_works &&
1958 !sigismember(&blockable_sigset, signal))
1959 sa.sa_sigaction = low_level_unblock_me_trampoline;
1960 else
1961 sa.sa_sigaction = low_level_handle_now_handler;
1963 #ifdef LISP_FEATURE_SB_THRUPTION
1964 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1965 * but we don't actually want to defer it. And if we put it only
1966 * into blockable_sigset, we'd have to special-case it around thread
1967 * creation at least. */
1968 if (signal == SIGPIPE)
1969 sa.sa_sigaction = low_level_handle_now_handler;
1970 #endif
1972 sigcopyset(&sa.sa_mask, &blockable_sigset);
1973 sa.sa_flags = SA_SIGINFO | SA_RESTART
1974 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1975 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
1976 if(signal==SIG_MEMORY_FAULT) {
1977 sa.sa_flags |= SA_ONSTACK;
1978 # ifdef LISP_FEATURE_SB_SAFEPOINT
1979 sigaddset(&sa.sa_mask, SIGRTMIN);
1980 sigaddset(&sa.sa_mask, SIGRTMIN+1);
1981 # endif
1983 #endif
1985 sigaction(signal, &sa, NULL);
1986 interrupt_low_level_handlers[signal] =
1987 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1989 #endif
1991 /* This is called from Lisp. */
1992 uword_t
1993 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*),
1994 int synchronous)
1996 #ifndef LISP_FEATURE_WIN32
1997 struct sigaction sa;
1998 sigset_t old;
1999 union interrupt_handler oldhandler;
2001 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
2003 block_blockable_signals(&old);
2005 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%p\n",
2006 interrupt_low_level_handlers[signal]));
2007 if (interrupt_low_level_handlers[signal]==0) {
2008 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
2009 ARE_SAME_HANDLER(handler, SIG_IGN))
2010 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
2011 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
2012 else if (signal == SIGPROF)
2013 sa.sa_sigaction = sigprof_handler_trampoline;
2014 else if (!synchronous)
2015 sa.sa_sigaction = spawn_signal_thread_handler;
2016 #endif
2017 else if (sigismember(&deferrable_sigset, signal))
2018 sa.sa_sigaction = maybe_now_maybe_later;
2019 else if (!sigaction_nodefer_works &&
2020 !sigismember(&blockable_sigset, signal))
2021 sa.sa_sigaction = unblock_me_trampoline;
2022 else
2023 sa.sa_sigaction = interrupt_handle_now_handler;
2025 sigcopyset(&sa.sa_mask, &blockable_sigset);
2026 sa.sa_flags = SA_SIGINFO | SA_RESTART |
2027 (sigaction_nodefer_works ? SA_NODEFER : 0);
2028 sigaction(signal, &sa, NULL);
2031 oldhandler = interrupt_handlers[signal];
2032 interrupt_handlers[signal].c = handler;
2034 thread_sigmask(SIG_SETMASK, &old, 0);
2036 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
2038 return (uword_t)oldhandler.lisp;
2039 #else
2040 /* Probably-wrong Win32 hack */
2041 return 0;
2042 #endif
2045 /* This must not go through lisp as it's allowed anytime, even when on
2046 * the altstack. */
2047 void
2048 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
2050 /* Save the interrupt context. No need to undo it, since lose()
2051 * shouldn't return. */
2052 fake_foreign_function_call(context);
2053 lose("SIGABRT received.\n");
2056 void
2057 interrupt_init(void)
2059 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2060 int i;
2061 SHOW("entering interrupt_init()");
2062 #ifndef LISP_FEATURE_WIN32
2063 see_if_sigaction_nodefer_works();
2064 #endif
2065 sigemptyset(&deferrable_sigset);
2066 sigemptyset(&blockable_sigset);
2067 sigemptyset(&gc_sigset);
2068 sigaddset_deferrable(&deferrable_sigset);
2069 sigaddset_blockable(&blockable_sigset);
2070 sigaddset_gc(&gc_sigset);
2071 #endif
2073 #ifndef LISP_FEATURE_WIN32
2074 /* Set up high level handler information. */
2075 for (i = 0; i < NSIG; i++) {
2076 interrupt_handlers[i].c =
2077 /* (The cast here blasts away the distinction between
2078 * SA_SIGACTION-style three-argument handlers and
2079 * signal(..)-style one-argument handlers, which is OK
2080 * because it works to call the 1-argument form where the
2081 * 3-argument form is expected.) */
2082 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
2084 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
2085 #endif
2086 SHOW("returning from interrupt_init()");
2089 #ifndef LISP_FEATURE_WIN32
2091 siginfo_code(siginfo_t *info)
2093 return info->si_code;
2095 os_vm_address_t current_memory_fault_address;
2097 void
2098 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
2100 /* FIXME: This is lossy: if we get another memory fault (eg. from
2101 * another thread) before lisp has read this, we lose the information.
2102 * However, since this is mostly informative, we'll live with that for
2103 * now -- some address is better then no address in this case.
2105 current_memory_fault_address = addr;
2107 /* If we lose on corruption, provide LDB with debugging information. */
2108 fake_foreign_function_call(context);
2110 /* To allow debugging memory faults in signal handlers and such. */
2111 corruption_warning_and_maybe_lose("Memory fault at %p (pc=%p, sp=%p)",
2112 addr,
2113 *os_context_pc_addr(context),
2114 #ifdef ARCH_HAS_STACK_POINTER
2115 *os_context_sp_addr(context)
2116 #else
2118 #endif
2120 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2121 undo_fake_foreign_function_call(context);
2122 unblock_signals_in_context_and_maybe_warn(context);
2123 arrange_return_to_lisp_function(context,
2124 StaticSymbolFunction(MEMORY_FAULT_ERROR));
2125 #else
2126 unblock_gc_signals(0, 0);
2127 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
2128 undo_fake_foreign_function_call(context);
2129 #endif
2131 #endif
2133 static void
2134 unhandled_trap_error(os_context_t *context)
2136 DX_ALLOC_SAP(context_sap, context);
2137 fake_foreign_function_call(context);
2138 #ifndef LISP_FEATURE_SB_SAFEPOINT
2139 unblock_gc_signals(0, 0);
2140 #endif
2142 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2143 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2144 #endif
2145 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2146 lose("UNHANDLED-TRAP-ERROR fell through");
2149 /* Common logic for trapping instructions. How we actually handle each
2150 * case is highly architecture dependent, but the overall shape is
2151 * this. */
2152 void
2153 handle_trap(os_context_t *context, int trap)
2155 switch(trap) {
2156 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2157 case trap_PendingInterrupt:
2158 FSHOW((stderr, "/<trap pending interrupt>\n"));
2159 arch_skip_instruction(context);
2160 interrupt_handle_pending(context);
2161 break;
2162 #endif
2163 case trap_Error:
2164 case trap_Cerror:
2165 #ifdef trap_InvalidArgCount
2166 case trap_InvalidArgCount:
2167 #endif
2168 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2169 interrupt_internal_error(context, trap==trap_Cerror);
2170 break;
2171 case trap_Breakpoint:
2172 arch_handle_breakpoint(context);
2173 break;
2174 case trap_FunEndBreakpoint:
2175 arch_handle_fun_end_breakpoint(context);
2176 break;
2177 #ifdef trap_AfterBreakpoint
2178 case trap_AfterBreakpoint:
2179 arch_handle_after_breakpoint(context);
2180 break;
2181 #endif
2182 #ifdef trap_SingleStepAround
2183 case trap_SingleStepAround:
2184 case trap_SingleStepBefore:
2185 arch_handle_single_step_trap(context, trap);
2186 break;
2187 #endif
2188 #ifdef trap_GlobalSafepoint
2189 case trap_GlobalSafepoint:
2190 fake_foreign_function_call(context);
2191 thread_in_lisp_raised(context);
2192 undo_fake_foreign_function_call(context);
2193 arch_skip_instruction(context);
2194 break;
2195 case trap_CspSafepoint:
2196 fake_foreign_function_call(context);
2197 thread_in_safety_transition(context);
2198 undo_fake_foreign_function_call(context);
2199 arch_skip_instruction(context);
2200 break;
2201 #endif
2202 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2203 case trap_Allocation:
2204 arch_handle_allocation_trap(context);
2205 arch_skip_instruction(context);
2206 break;
2207 #endif
2208 case trap_Halt:
2209 fake_foreign_function_call(context);
2210 lose("%%PRIMITIVE HALT called; the party is over.\n");
2211 default:
2212 unhandled_trap_error(context);