Eliminate late-type-cold-init2
[sbcl.git] / src / runtime / interrupt.c
blobb5b69abd9e6fd675e9f388b75b6b31c96b654bb8
1 /*
2 * interrupt-handling magic
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
43 #include "sbcl.h"
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
51 #include <sys/wait.h>
52 #endif
53 #include <errno.h>
55 #include "runtime.h"
56 #include "arch.h"
57 #include "os.h"
58 #include "interrupt.h"
59 #include "globals.h"
60 #include "lispregs.h"
61 #include "validate.h"
62 #include "interr.h"
63 #include "gc.h"
64 #include "alloc.h"
65 #include "dynbind.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* When we catch an internal error, should we pass it back to Lisp to
72 * be handled in a high-level way? (Early in cold init, the answer is
73 * 'no', because Lisp is still too brain-dead to handle anything.
74 * After sufficient initialization has been completed, the answer
75 * becomes 'yes'.) */
76 boolean internal_errors_enabled = 0;
78 #ifndef LISP_FEATURE_WIN32
79 static
80 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
81 #endif
82 union interrupt_handler interrupt_handlers[NSIG];
84 /* Under Linux on some architectures, we appear to have to restore the
85 * FPU control word from the context, as after the signal is delivered
86 * we appear to have a null FPU control word. */
87 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
88 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
89 os_context_t *context = arch_os_get_context(&void_context); \
90 os_restore_fp_control(context);
91 #else
92 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
93 os_context_t *context = arch_os_get_context(&void_context);
94 #endif
96 /* Foreign code may want to start some threads on its own.
97 * Non-targetted, truly asynchronous signals can be delivered to
98 * basically any thread, but invoking Lisp handlers in such foregign
99 * threads is really bad, so let's resignal it.
101 * This should at least bring attention to the problem, but it cannot
102 * work for SIGSEGV and similar. It is good enough for timers, and
103 * maybe all deferrables. */
105 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
106 static void
107 add_handled_signals(sigset_t *sigset)
109 int i;
110 for(i = 1; i < NSIG; i++) {
111 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
112 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
113 sigaddset(sigset, i);
118 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
119 #endif
121 static boolean
122 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
124 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
125 if (!pthread_getspecific(lisp_thread)) {
126 if (!(sigismember(&deferrable_sigset,signal))) {
127 corruption_warning_and_maybe_lose
128 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
129 signal,
130 pthread_self());
133 sigset_t sigset;
134 sigemptyset(&sigset);
135 add_handled_signals(&sigset);
136 block_signals(&sigset, 0, 0);
137 block_signals(&sigset, os_context_sigmask_addr(context), 0);
138 kill(getpid(), signal);
140 return 1;
141 } else
142 #endif
143 return 0;
146 /* These are to be used in signal handlers. Currently all handlers are
147 * called from one of:
149 * interrupt_handle_now_handler
150 * maybe_now_maybe_later
151 * unblock_me_trampoline
152 * low_level_handle_now_handler
153 * low_level_maybe_now_maybe_later
154 * low_level_unblock_me_trampoline
156 * This gives us a single point of control (or six) over errno, fp
157 * control word, and fixing up signal context on sparc.
159 * The SPARC/Linux platform doesn't quite do signals the way we want
160 * them done. The third argument in the handler isn't filled in by the
161 * kernel properly, so we fix it up ourselves in the
162 * arch_os_get_context(..) function. -- CSR, 2002-07-23
164 #define SAVE_ERRNO(signal,context,void_context) \
166 int _saved_errno = errno; \
167 RESTORE_FP_CONTROL_WORD(context,void_context); \
168 if (!maybe_resignal_to_lisp_thread(signal, context)) \
171 #define RESTORE_ERRNO \
173 errno = _saved_errno; \
176 static void run_deferred_handler(struct interrupt_data *data,
177 os_context_t *context);
178 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
179 static void store_signal_data_for_later (struct interrupt_data *data,
180 void *handler, int signal,
181 siginfo_t *info,
182 os_context_t *context);
185 /* Generic signal related utilities. */
187 void
188 get_current_sigmask(sigset_t *sigset)
190 /* Get the current sigmask, by blocking the empty set. */
191 thread_sigmask(SIG_BLOCK, 0, sigset);
194 void
195 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
197 if (where) {
198 int i;
199 if (old)
200 sigcopyset(old, where);
201 for(i = 1; i < NSIG; i++) {
202 if (sigismember(what, i))
203 sigaddset(where, i);
205 } else {
206 thread_sigmask(SIG_BLOCK, what, old);
210 void
211 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
213 if (where) {
214 int i;
215 if (old)
216 sigcopyset(old, where);
217 for(i = 1; i < NSIG; i++) {
218 if (sigismember(what, i))
219 sigdelset(where, i);
221 } else {
222 thread_sigmask(SIG_UNBLOCK, what, old);
226 static void
227 print_sigset(sigset_t *sigset)
229 int i;
230 for(i = 1; i < NSIG; i++) {
231 if (sigismember(sigset, i))
232 fprintf(stderr, "Signal %d masked\n", i);
236 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
237 * if all re unmasked else die. Passing NULL for sigset is a shorthand
238 * for the current sigmask. */
239 boolean
240 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
241 const char *name)
243 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
244 int i;
245 boolean has_blocked = 0, has_unblocked = 0;
246 sigset_t current;
247 if (sigset == 0) {
248 get_current_sigmask(&current);
249 sigset = &current;
251 for(i = 1; i < NSIG; i++) {
252 if (sigismember(sigset2, i)) {
253 if (sigismember(sigset, i))
254 has_blocked = 1;
255 else
256 has_unblocked = 1;
259 if (has_blocked && has_unblocked) {
260 print_sigset(sigset);
261 lose("some %s signals blocked, some unblocked\n", name);
263 if (has_blocked)
264 return 1;
265 else
266 return 0;
267 #endif
271 /* Deferrables, blockables, gc signals. */
273 void
274 sigaddset_deferrable(sigset_t *s)
276 sigaddset(s, SIGHUP);
277 sigaddset(s, SIGINT);
278 sigaddset(s, SIGTERM);
279 sigaddset(s, SIGQUIT);
280 sigaddset(s, SIGPIPE);
281 sigaddset(s, SIGALRM);
282 sigaddset(s, SIGURG);
283 sigaddset(s, SIGTSTP);
284 sigaddset(s, SIGCHLD);
285 sigaddset(s, SIGIO);
286 #ifndef LISP_FEATURE_HPUX
287 sigaddset(s, SIGXCPU);
288 sigaddset(s, SIGXFSZ);
289 #endif
290 sigaddset(s, SIGVTALRM);
291 sigaddset(s, SIGPROF);
292 sigaddset(s, SIGWINCH);
295 void
296 sigaddset_blockable(sigset_t *sigset)
298 sigaddset_deferrable(sigset);
299 sigaddset_gc(sigset);
302 void
303 sigaddset_gc(sigset_t *sigset)
305 #ifdef THREADS_USING_GCSIGNAL
306 sigaddset(sigset,SIG_STOP_FOR_GC);
307 #endif
310 /* initialized in interrupt_init */
311 sigset_t deferrable_sigset;
312 sigset_t blockable_sigset;
313 sigset_t gc_sigset;
315 #endif
317 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
318 boolean
319 deferrables_blocked_p(sigset_t *sigset)
321 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
323 #endif
325 void
326 check_deferrables_unblocked_or_lose(sigset_t *sigset)
328 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
329 if (deferrables_blocked_p(sigset))
330 lose("deferrables blocked\n");
331 #endif
334 void
335 check_deferrables_blocked_or_lose(sigset_t *sigset)
337 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
338 if (!deferrables_blocked_p(sigset))
339 lose("deferrables unblocked\n");
340 #endif
343 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
344 boolean
345 blockables_blocked_p(sigset_t *sigset)
347 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
349 #endif
351 void
352 check_blockables_unblocked_or_lose(sigset_t *sigset)
354 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
355 if (blockables_blocked_p(sigset))
356 lose("blockables blocked\n");
357 #endif
360 void
361 check_blockables_blocked_or_lose(sigset_t *sigset)
363 #if !defined(LISP_FEATURE_WIN32)
364 /* On Windows, there are no actual signals, but since the win32 port
365 * tracks the sigmask and checks it explicitly, some functions are
366 * still required to keep the mask set up properly. (After all, the
367 * goal of the sigmask emulation is to not have to change all the
368 * call sites in the first place.)
370 * However, this does not hold for all signals equally: While
371 * deferrables matter ("is interrupt-thread okay?"), it is not worth
372 * having to set up blockables properly (which include the
373 * non-existing GC signals).
375 * Yet, as the original comment explains it:
376 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
377 * fake_foreign_function_call machinery are sometimes useful here[...].
379 * So we merely skip this assertion.
380 * -- DFL, trying to expand on a comment by AK.
382 if (!blockables_blocked_p(sigset))
383 lose("blockables unblocked\n");
384 #endif
387 #ifndef LISP_FEATURE_SB_SAFEPOINT
388 #if !defined(LISP_FEATURE_WIN32)
389 boolean
390 gc_signals_blocked_p(sigset_t *sigset)
392 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
394 #endif
396 void
397 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
399 #if !defined(LISP_FEATURE_WIN32)
400 if (gc_signals_blocked_p(sigset))
401 lose("gc signals blocked\n");
402 #endif
405 void
406 check_gc_signals_blocked_or_lose(sigset_t *sigset)
408 #if !defined(LISP_FEATURE_WIN32)
409 if (!gc_signals_blocked_p(sigset))
410 lose("gc signals unblocked\n");
411 #endif
413 #endif
415 void
416 block_deferrable_signals(sigset_t *where, sigset_t *old)
418 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
419 block_signals(&deferrable_sigset, where, old);
420 #endif
423 void
424 block_blockable_signals(sigset_t *where, sigset_t *old)
426 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
427 block_signals(&blockable_sigset, where, old);
428 #endif
431 #ifndef LISP_FEATURE_SB_SAFEPOINT
432 void
433 block_gc_signals(sigset_t *where, sigset_t *old)
435 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
436 block_signals(&gc_sigset, where, old);
437 #endif
439 #endif
441 void
442 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
444 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
445 if (interrupt_handler_pending_p())
446 lose("unblock_deferrable_signals: losing proposition\n");
447 #ifndef LISP_FEATURE_SB_SAFEPOINT
448 check_gc_signals_unblocked_or_lose(where);
449 #endif
450 unblock_signals(&deferrable_sigset, where, old);
451 #endif
454 void
455 unblock_blockable_signals(sigset_t *where, sigset_t *old)
457 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
458 unblock_signals(&blockable_sigset, where, old);
459 #endif
462 #ifndef LISP_FEATURE_SB_SAFEPOINT
463 void
464 unblock_gc_signals(sigset_t *where, sigset_t *old)
466 #ifndef LISP_FEATURE_WIN32
467 unblock_signals(&gc_sigset, where, old);
468 #endif
470 #endif
472 void
473 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
475 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
476 sigset_t *sigset = os_context_sigmask_addr(context);
477 #ifndef LISP_FEATURE_SB_SAFEPOINT
478 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
479 corruption_warning_and_maybe_lose(
480 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
481 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
482 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
483 unblock_gc_signals(sigset, 0);
485 #endif
486 if (!interrupt_handler_pending_p()) {
487 unblock_deferrable_signals(sigset, 0);
489 #endif
493 inline static void
494 check_interrupts_enabled_or_lose(os_context_t *context)
496 struct thread *thread=arch_os_get_current_thread();
497 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
498 lose("interrupts not enabled\n");
499 if (arch_pseudo_atomic_atomic(context))
500 lose ("in pseudo atomic section\n");
503 /* Save sigset (or the current sigmask if 0) if there is no pending
504 * handler, because that means that deferabbles are already blocked.
505 * The purpose is to avoid losing the pending gc signal if a
506 * deferrable interrupt async unwinds between clearing the pseudo
507 * atomic and trapping to GC.*/
508 #ifndef LISP_FEATURE_SB_SAFEPOINT
509 void
510 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
512 #ifndef LISP_FEATURE_WIN32
513 struct thread *thread = arch_os_get_current_thread();
514 struct interrupt_data *data = thread->interrupt_data;
515 sigset_t oldset;
516 /* Obviously, this function is called when signals may not be
517 * blocked. Let's make sure we are not interrupted. */
518 block_blockable_signals(0, &oldset);
519 #ifndef LISP_FEATURE_SB_THREAD
520 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
521 * block. */
522 if (data->gc_blocked_deferrables)
523 lose("gc_blocked_deferrables already true\n");
524 #endif
525 if ((!data->pending_handler) &&
526 (!data->gc_blocked_deferrables)) {
527 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
528 data->gc_blocked_deferrables = 1;
529 if (sigset) {
530 /* This is the sigmask of some context. */
531 sigcopyset(&data->pending_mask, sigset);
532 sigaddset_deferrable(sigset);
533 thread_sigmask(SIG_SETMASK,&oldset,0);
534 return;
535 } else {
536 /* Operating on the current sigmask. Save oldset and
537 * unblock gc signals. In the end, this is equivalent to
538 * blocking the deferrables. */
539 sigcopyset(&data->pending_mask, &oldset);
540 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
541 return;
544 thread_sigmask(SIG_SETMASK,&oldset,0);
545 #endif
547 #endif
549 /* Are we leaving WITH-GCING and already running with interrupts
550 * enabled, without the protection of *GC-INHIBIT* T and there is gc
551 * (or stop for gc) pending, but we haven't trapped yet? */
553 in_leaving_without_gcing_race_p(struct thread *thread)
555 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
556 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
557 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
558 ((SymbolValue(GC_PENDING,thread) != NIL)
559 #if defined(LISP_FEATURE_SB_THREAD)
560 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
561 #endif
565 /* Check our baroque invariants. */
566 void
567 check_interrupt_context_or_lose(os_context_t *context)
569 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
570 struct thread *thread = arch_os_get_current_thread();
571 struct interrupt_data *data = thread->interrupt_data;
572 int interrupt_deferred_p = (data->pending_handler != 0);
573 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
574 sigset_t *sigset = os_context_sigmask_addr(context);
575 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
576 * handle_allocation_trap. */
577 #if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
578 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
579 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
580 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
581 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
582 int in_race_p = in_leaving_without_gcing_race_p(thread);
583 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
584 * section and trapping, a SIG_STOP_FOR_GC would see the next
585 * check fail, for this reason sig_stop_for_gc handler does not
586 * call this function. */
587 if (interrupt_deferred_p) {
588 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
589 lose("Stray deferred interrupt.\n");
591 if (gc_pending)
592 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
593 lose("GC_PENDING, but why?\n");
594 #if defined(LISP_FEATURE_SB_THREAD)
596 int stop_for_gc_pending =
597 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
598 if (stop_for_gc_pending)
599 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
600 lose("STOP_FOR_GC_PENDING, but why?\n");
601 if (pseudo_atomic_interrupted)
602 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
603 lose("pseudo_atomic_interrupted, but why?\n");
605 #else
606 if (pseudo_atomic_interrupted)
607 if (!(gc_pending || interrupt_deferred_p))
608 lose("pseudo_atomic_interrupted, but why?\n");
609 #endif
610 #endif
611 if (interrupt_pending && !interrupt_deferred_p)
612 lose("INTERRUPT_PENDING but not pending handler.\n");
613 if ((data->gc_blocked_deferrables) && interrupt_pending)
614 lose("gc_blocked_deferrables and interrupt pending\n.");
615 if (data->gc_blocked_deferrables)
616 check_deferrables_blocked_or_lose(sigset);
617 if (interrupt_pending || interrupt_deferred_p ||
618 data->gc_blocked_deferrables)
619 check_deferrables_blocked_or_lose(sigset);
620 else {
621 check_deferrables_unblocked_or_lose(sigset);
622 #ifndef LISP_FEATURE_SB_SAFEPOINT
623 /* If deferrables are unblocked then we are open to signals
624 * that run lisp code. */
625 check_gc_signals_unblocked_or_lose(sigset);
626 #endif
628 #endif
632 * utility routines used by various signal handlers
635 static void
636 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
638 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
640 lispobj oldcont;
642 /* Build a fake stack frame or frames */
644 #if !defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_ARM64)
645 access_control_frame_pointer(th) =
646 (lispobj *)(uword_t)
647 (*os_context_register_addr(context, reg_CSP));
648 if ((lispobj *)(uword_t)
649 (*os_context_register_addr(context, reg_CFP))
650 == access_control_frame_pointer(th)) {
651 /* There is a small window during call where the callee's
652 * frame isn't built yet. */
653 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
654 == FUN_POINTER_LOWTAG) {
655 /* We have called, but not built the new frame, so
656 * build it for them. */
657 access_control_frame_pointer(th)[0] =
658 *os_context_register_addr(context, reg_OCFP);
659 access_control_frame_pointer(th)[1] =
660 *os_context_register_addr(context, reg_LRA);
661 access_control_frame_pointer(th) += 2;
662 /* Build our frame on top of it. */
663 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
665 else {
666 /* We haven't yet called, build our frame as if the
667 * partial frame wasn't there. */
668 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
670 } else
671 #elif defined (LISP_FEATURE_ARM)
672 access_control_frame_pointer(th) =
673 SymbolValue(CONTROL_STACK_POINTER, th);
674 #elif defined (LISP_FEATURE_ARM64)
675 access_control_frame_pointer(th) =
676 (lispobj *)(uword_t) (*os_context_register_addr(context, reg_CSP));
677 #endif
678 /* We can't tell whether we are still in the caller if it had to
679 * allocate a stack frame due to stack arguments. */
680 /* This observation provoked some past CMUCL maintainer to ask
681 * "Can anything strange happen during return?" */
683 /* normal case */
684 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
687 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 3;
689 access_control_frame_pointer(th)[0] = oldcont;
690 access_control_frame_pointer(th)[1] = NIL;
691 access_control_frame_pointer(th)[2] =
692 (lispobj)(*os_context_register_addr(context, reg_CODE));
693 #endif
696 /* Stores the context for gc to scavange and builds fake stack
697 * frames. */
698 void
699 fake_foreign_function_call(os_context_t *context)
701 int context_index;
702 struct thread *thread=arch_os_get_current_thread();
704 /* context_index incrementing must not be interrupted */
705 check_blockables_blocked_or_lose(0);
707 /* Get current Lisp state from context. */
708 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
709 dynamic_space_free_pointer = SymbolValue(ALLOCATION_POINTER, thread);
710 #endif
711 #ifdef reg_ALLOC
712 #ifdef LISP_FEATURE_SB_THREAD
713 thread->pseudo_atomic_bits =
714 #else
715 dynamic_space_free_pointer =
716 (lispobj *)(uword_t)
717 #endif
718 (*os_context_register_addr(context, reg_ALLOC));
719 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
720 /* dynamic_space_free_pointer); */
721 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
722 if ((sword_t)dynamic_space_free_pointer & 1) {
723 lose("dead in fake_foreign_function_call, context = %x\n", context);
725 #endif
726 /* why doesnt PPC and SPARC do something like this: */
727 #if defined(LISP_FEATURE_HPPA)
728 if ((sword_t)dynamic_space_free_pointer & 4) {
729 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
731 #endif
732 #endif
733 #ifdef reg_BSP
734 set_binding_stack_pointer(thread,
735 *os_context_register_addr(context, reg_BSP));
736 #endif
738 #if defined(LISP_FEATURE_ARM)
739 /* Stash our control stack pointer */
740 bind_variable(INTERRUPTED_CONTROL_STACK_POINTER,
741 SymbolValue(CONTROL_STACK_POINTER, thread),
742 thread);
743 #endif
745 build_fake_control_stack_frames(thread,context);
747 /* Do dynamic binding of the active interrupt context index
748 * and save the context in the context array. */
749 context_index =
750 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
752 if (context_index >= MAX_INTERRUPTS) {
753 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
756 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
757 make_fixnum(context_index + 1),thread);
759 thread->interrupt_contexts[context_index] = context;
761 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
762 /* x86oid targets don't maintain the foreign function call flag at
763 * all, so leave them to believe that they are never in foreign
764 * code. */
765 foreign_function_call_active_p(thread) = 1;
766 #endif
769 /* blocks all blockable signals. If you are calling from a signal handler,
770 * the usual signal mask will be restored from the context when the handler
771 * finishes. Otherwise, be careful */
772 void
773 undo_fake_foreign_function_call(os_context_t *context)
775 struct thread *thread=arch_os_get_current_thread();
776 /* Block all blockable signals. */
777 block_blockable_signals(0, 0);
779 foreign_function_call_active_p(thread) = 0;
781 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
782 unbind(thread);
784 #if defined(LISP_FEATURE_ARM)
785 /* Restore our saved control stack pointer */
786 SetSymbolValue(CONTROL_STACK_POINTER,
787 SymbolValue(INTERRUPTED_CONTROL_STACK_POINTER,
788 thread),
789 thread);
790 unbind(thread);
791 #endif
793 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
794 /* Put the dynamic space free pointer back into the context. */
795 *os_context_register_addr(context, reg_ALLOC) =
796 (uword_t) dynamic_space_free_pointer
797 | (*os_context_register_addr(context, reg_ALLOC)
798 & LOWTAG_MASK);
800 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
801 & ~LOWTAG_MASK)
802 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
804 #endif
805 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
806 /* Put the pseudo-atomic bits and dynamic space free pointer back
807 * into the context (p-a-bits for p-a, and dynamic space free
808 * pointer for ROOM). */
809 *os_context_register_addr(context, reg_ALLOC) =
810 (uword_t) dynamic_space_free_pointer
811 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
812 /* And clear them so we don't get bit later by call-in/call-out
813 * not updating them. */
814 thread->pseudo_atomic_bits = 0;
815 #endif
816 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
817 SetSymbolValue(ALLOCATION_POINTER, dynamic_space_free_pointer, thread);
818 #endif
821 /* a handler for the signal caused by execution of a trap opcode
822 * signalling an internal error */
823 void
824 interrupt_internal_error(os_context_t *context, boolean continuable)
826 DX_ALLOC_SAP(context_sap, context);
828 fake_foreign_function_call(context);
830 if (!internal_errors_enabled) {
831 describe_internal_error(context);
832 /* There's no good way to recover from an internal error
833 * before the Lisp error handling mechanism is set up. */
834 lose("internal error too early in init, can't recover\n");
837 #ifndef LISP_FEATURE_SB_SAFEPOINT
838 unblock_gc_signals(0, 0);
839 #endif
841 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
842 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
843 #endif
845 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
846 /* Workaround for blocked SIGTRAP. */
848 sigset_t newset;
849 sigemptyset(&newset);
850 sigaddset(&newset, SIGTRAP);
851 thread_sigmask(SIG_UNBLOCK, &newset, 0);
853 #endif
855 SHOW("in interrupt_internal_error");
856 #if QSHOW == 2
857 /* Display some rudimentary debugging information about the
858 * error, so that even if the Lisp error handler gets badly
859 * confused, we have a chance to determine what's going on. */
860 describe_internal_error(context);
861 #endif
862 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
863 continuable ? T : NIL);
865 undo_fake_foreign_function_call(context); /* blocks signals again */
866 if (continuable)
867 arch_skip_instruction(context);
870 boolean
871 interrupt_handler_pending_p(void)
873 struct thread *thread = arch_os_get_current_thread();
874 struct interrupt_data *data = thread->interrupt_data;
875 return (data->pending_handler != 0);
878 void
879 interrupt_handle_pending(os_context_t *context)
881 /* There are three ways we can get here. First, if an interrupt
882 * occurs within pseudo-atomic, it will be deferred, and we'll
883 * trap to here at the end of the pseudo-atomic block. Second, if
884 * the GC (in alloc()) decides that a GC is required, it will set
885 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
886 * and alloc() is always called from within pseudo-atomic, and
887 * thus we end up here again. Third, when calling GC-ON or at the
888 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
889 * here if there is a pending GC. Fourth, ahem, at the end of
890 * WITHOUT-INTERRUPTS (bar complications with nesting).
892 * A fourth way happens with safepoints: In addition to a stop for
893 * GC that is pending, there are thruptions. Both mechanisms are
894 * mostly signal-free, yet also of an asynchronous nature, so it makes
895 * sense to let interrupt_handle_pending take care of running them:
896 * It gets run precisely at those places where it is safe to process
897 * pending asynchronous tasks. */
899 struct thread *thread = arch_os_get_current_thread();
900 struct interrupt_data *data = thread->interrupt_data;
902 if (arch_pseudo_atomic_atomic(context)) {
903 lose("Handling pending interrupt in pseudo atomic.");
906 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
908 check_blockables_blocked_or_lose(0);
909 #ifndef LISP_FEATURE_SB_SAFEPOINT
911 * (On safepoint builds, there is no gc_blocked_deferrables nor
912 * SIG_STOP_FOR_GC.)
914 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
915 * handler, then the pending mask was saved and
916 * gc_blocked_deferrables set. Hence, there can be no pending
917 * handler and it's safe to restore the pending mask.
919 * Note, that if gc_blocked_deferrables is false we may still have
920 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
921 * pseudo atomic was interrupt be a deferrable first. */
922 if (data->gc_blocked_deferrables) {
923 if (data->pending_handler)
924 lose("GC blocked deferrables but still got a pending handler.");
925 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
926 lose("GC blocked deferrables while GC is inhibited.");
927 /* Restore the saved signal mask from the original signal (the
928 * one that interrupted us during the critical section) into
929 * the os_context for the signal we're currently in the
930 * handler for. This should ensure that when we return from
931 * the handler the blocked signals are unblocked. */
932 #ifndef LISP_FEATURE_WIN32
933 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
934 #endif
935 data->gc_blocked_deferrables = 0;
937 #endif
939 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
940 void *original_pending_handler = data->pending_handler;
942 #ifdef LISP_FEATURE_SB_SAFEPOINT
943 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
944 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
945 # ifdef LISP_FEATURE_SB_THRUPTION
946 || (SymbolValue(THRUPTION_PENDING,thread) != NIL
947 && SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
948 # endif
950 /* We ought to take this chance to do a pitstop now. */
951 thread_in_lisp_raised(context);
952 #elif defined(LISP_FEATURE_SB_THREAD)
953 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
954 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
955 * the signal handler if it actually stops us. */
956 arch_clear_pseudo_atomic_interrupted(context);
957 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
958 } else
959 #endif
960 /* Test for T and not for != NIL since the value :IN-PROGRESS
961 * is used in SUB-GC as part of the mechanism to supress
962 * recursive gcs.*/
963 if (SymbolValue(GC_PENDING,thread) == T) {
965 /* Two reasons for doing this. First, if there is a
966 * pending handler we don't want to run. Second, we are
967 * going to clear pseudo atomic interrupted to avoid
968 * spurious trapping on every allocation in SUB_GC and
969 * having a pending handler with interrupts enabled and
970 * without pseudo atomic interrupted breaks an
971 * invariant. */
972 if (data->pending_handler) {
973 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
974 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
977 arch_clear_pseudo_atomic_interrupted(context);
979 /* GC_PENDING is cleared in SUB-GC, or if another thread
980 * is doing a gc already we will get a SIG_STOP_FOR_GC and
981 * that will clear it.
983 * If there is a pending handler or gc was triggerred in a
984 * signal handler then maybe_gc won't run POST_GC and will
985 * return normally. */
986 if (!maybe_gc(context))
987 lose("GC not inhibited but maybe_gc did not GC.");
989 if (data->pending_handler) {
990 unbind(thread);
991 unbind(thread);
993 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
994 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
995 * GC-PENDING is not NIL then we cannot trap on pseudo
996 * atomic due to GC (see if(GC_PENDING) logic in
997 * cheneygc.c an gengcgc.c), plus there is a outer
998 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
999 * here? */
1000 lose("Trapping to run pending handler while GC in progress.");
1003 check_blockables_blocked_or_lose(0);
1005 /* No GC shall be lost. If SUB_GC triggers another GC then
1006 * that should be handled on the spot. */
1007 if (SymbolValue(GC_PENDING,thread) != NIL)
1008 lose("GC_PENDING after doing gc.");
1009 #ifdef THREADS_USING_GCSIGNAL
1010 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
1011 lose("STOP_FOR_GC_PENDING after doing gc.");
1012 #endif
1013 /* Check two things. First, that gc does not clobber a handler
1014 * that's already pending. Second, that there is no interrupt
1015 * lossage: if original_pending_handler was NULL then even if
1016 * an interrupt arrived during GC (POST-GC, really) it was
1017 * handled. */
1018 if (original_pending_handler != data->pending_handler)
1019 lose("pending handler changed in gc: %x -> %x.",
1020 original_pending_handler, data->pending_handler);
1023 #ifndef LISP_FEATURE_WIN32
1024 /* There may be no pending handler, because it was only a gc that
1025 * had to be executed or because Lisp is a bit too eager to call
1026 * DO-PENDING-INTERRUPT. */
1027 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
1028 (data->pending_handler)) {
1029 /* No matter how we ended up here, clear both
1030 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1031 * because we checked above that there is no GC pending. */
1032 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
1033 arch_clear_pseudo_atomic_interrupted(context);
1034 /* Restore the sigmask in the context. */
1035 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1036 run_deferred_handler(data, context);
1038 #ifdef LISP_FEATURE_SB_THRUPTION
1039 if (SymbolValue(THRUPTION_PENDING,thread)==T)
1040 /* Special case for the following situation: There is a
1041 * thruption pending, but a signal had been deferred. The
1042 * pitstop at the top of this function could only take care
1043 * of GC, and skipped the thruption, so we need to try again
1044 * now that INTERRUPT_PENDING and the sigmask have been
1045 * reset. */
1046 while (check_pending_thruptions(context))
1048 #endif
1049 #endif
1050 #ifdef LISP_FEATURE_GENCGC
1051 if (get_pseudo_atomic_interrupted(thread))
1052 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1053 #endif
1054 /* It is possible that the end of this function was reached
1055 * without never actually doing anything, the tests in Lisp for
1056 * when to call receive-pending-interrupt are not exact. */
1057 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1061 void
1062 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1064 boolean were_in_lisp;
1065 union interrupt_handler handler;
1067 check_blockables_blocked_or_lose(0);
1069 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1070 if (sigismember(&deferrable_sigset,signal))
1071 check_interrupts_enabled_or_lose(context);
1072 #endif
1074 handler = interrupt_handlers[signal];
1076 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1077 return;
1080 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1081 if (were_in_lisp)
1083 fake_foreign_function_call(context);
1086 FSHOW_SIGNAL((stderr,
1087 "/entering interrupt_handle_now(%d, info, context)\n",
1088 signal));
1090 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1092 /* This can happen if someone tries to ignore or default one
1093 * of the signals we need for runtime support, and the runtime
1094 * support decides to pass on it. */
1095 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1097 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1098 /* Once we've decided what to do about contexts in a
1099 * return-elsewhere world (the original context will no longer
1100 * be available; should we copy it or was nobody using it anyway?)
1101 * then we should convert this to return-elsewhere */
1103 /* CMUCL comment said "Allocate the SAPs while the interrupts
1104 * are still disabled.". I (dan, 2003.08.21) assume this is
1105 * because we're not in pseudoatomic and allocation shouldn't
1106 * be interrupted. In which case it's no longer an issue as
1107 * all our allocation from C now goes through a PA wrapper,
1108 * but still, doesn't hurt.
1110 * Yeah, but non-gencgc platforms don't really wrap allocation
1111 * in PA. MG - 2005-08-29 */
1114 #ifndef LISP_FEATURE_SB_SAFEPOINT
1115 /* Leave deferrable signals blocked, the handler itself will
1116 * allow signals again when it sees fit. */
1117 unblock_gc_signals(0, 0);
1118 #else
1119 WITH_GC_AT_SAFEPOINTS_ONLY()
1120 #endif
1121 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1122 DX_ALLOC_SAP(context_sap, context);
1123 DX_ALLOC_SAP(info_sap, info);
1125 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1127 funcall3(handler.lisp,
1128 make_fixnum(signal),
1129 info_sap,
1130 context_sap);
1132 } else {
1133 /* This cannot happen in sane circumstances. */
1135 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1137 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1138 /* Allow signals again. */
1139 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1140 (*handler.c)(signal, info, context);
1141 #endif
1144 if (were_in_lisp)
1146 undo_fake_foreign_function_call(context); /* block signals again */
1149 FSHOW_SIGNAL((stderr,
1150 "/returning from interrupt_handle_now(%d, info, context)\n",
1151 signal));
1154 /* This is called at the end of a critical section if the indications
1155 * are that some signal was deferred during the section. Note that as
1156 * far as C or the kernel is concerned we dealt with the signal
1157 * already; we're just doing the Lisp-level processing now that we
1158 * put off then */
1159 static void
1160 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1162 /* The pending_handler may enable interrupts and then another
1163 * interrupt may hit, overwrite interrupt_data, so reset the
1164 * pending handler before calling it. Trust the handler to finish
1165 * with the siginfo before enabling interrupts. */
1166 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1167 data->pending_handler;
1169 data->pending_handler=0;
1170 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1171 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1174 #ifndef LISP_FEATURE_WIN32
1175 boolean
1176 maybe_defer_handler(void *handler, struct interrupt_data *data,
1177 int signal, siginfo_t *info, os_context_t *context)
1179 struct thread *thread=arch_os_get_current_thread();
1181 check_blockables_blocked_or_lose(0);
1183 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1184 lose("interrupt already pending\n");
1185 if (thread->interrupt_data->pending_handler)
1186 lose("there is a pending handler already (PA)\n");
1187 if (data->gc_blocked_deferrables)
1188 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1189 check_interrupt_context_or_lose(context);
1190 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1191 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1192 * atomic section inside a WITHOUT-INTERRUPTS.
1194 * Also, if in_leaving_without_gcing_race_p then
1195 * interrupt_handle_pending is going to be called soon, so
1196 * stashing the signal away is safe.
1198 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1199 in_leaving_without_gcing_race_p(thread)) {
1200 FSHOW_SIGNAL((stderr,
1201 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1202 (unsigned int)handler,signal,
1203 in_leaving_without_gcing_race_p(thread)));
1204 store_signal_data_for_later(data,handler,signal,info,context);
1205 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1206 check_interrupt_context_or_lose(context);
1207 return 1;
1209 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1210 * actually use its argument for anything on x86, so this branch
1211 * may succeed even when context is null (gencgc alloc()) */
1212 if (arch_pseudo_atomic_atomic(context)) {
1213 FSHOW_SIGNAL((stderr,
1214 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1215 (unsigned int)handler,signal));
1216 store_signal_data_for_later(data,handler,signal,info,context);
1217 arch_set_pseudo_atomic_interrupted(context);
1218 check_interrupt_context_or_lose(context);
1219 return 1;
1221 FSHOW_SIGNAL((stderr,
1222 "/maybe_defer_handler(%x,%d): not deferred\n",
1223 (unsigned int)handler,signal));
1224 return 0;
1227 static void
1228 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1229 int signal,
1230 siginfo_t *info, os_context_t *context)
1232 if (data->pending_handler)
1233 lose("tried to overwrite pending interrupt handler %x with %x\n",
1234 data->pending_handler, handler);
1235 if (!handler)
1236 lose("tried to defer null interrupt handler\n");
1237 data->pending_handler = handler;
1238 data->pending_signal = signal;
1239 if(info)
1240 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1242 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1243 signal));
1245 if(!context)
1246 lose("Null context");
1248 /* the signal mask in the context (from before we were
1249 * interrupted) is copied to be restored when run_deferred_handler
1250 * happens. Then the usually-blocked signals are added to the mask
1251 * in the context so that we are running with blocked signals when
1252 * the handler returns */
1253 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1254 sigaddset_deferrable(os_context_sigmask_addr(context));
1257 static void
1258 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1260 SAVE_ERRNO(signal,context,void_context);
1261 struct thread *thread = arch_os_get_current_thread();
1262 struct interrupt_data *data = thread->interrupt_data;
1263 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1264 interrupt_handle_now(signal, info, context);
1265 RESTORE_ERRNO;
1268 static void
1269 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1270 os_context_t *context)
1272 /* No FP control fixage needed, caller has done that. */
1273 check_blockables_blocked_or_lose(0);
1274 check_interrupts_enabled_or_lose(context);
1275 (*interrupt_low_level_handlers[signal])(signal, info, context);
1276 /* No Darwin context fixage needed, caller does that. */
1279 static void
1280 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1282 SAVE_ERRNO(signal,context,void_context);
1283 struct thread *thread = arch_os_get_current_thread();
1284 struct interrupt_data *data = thread->interrupt_data;
1286 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1287 signal,info,context))
1288 low_level_interrupt_handle_now(signal, info, context);
1289 RESTORE_ERRNO;
1291 #endif
1293 #ifdef THREADS_USING_GCSIGNAL
1295 /* This function must not cons, because that may trigger a GC. */
1296 void
1297 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1299 struct thread *thread=arch_os_get_current_thread();
1300 boolean was_in_lisp;
1302 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1303 * pseudo atomic until gc is finally allowed. */
1304 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1305 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1306 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1307 return;
1308 } else if (arch_pseudo_atomic_atomic(context)) {
1309 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1310 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1311 arch_set_pseudo_atomic_interrupted(context);
1312 maybe_save_gc_mask_and_block_deferrables
1313 (os_context_sigmask_addr(context));
1314 return;
1317 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1319 /* Not PA and GC not inhibited -- we can stop now. */
1321 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1323 if (was_in_lisp) {
1324 /* need the context stored so it can have registers scavenged */
1325 fake_foreign_function_call(context);
1328 /* Not pending anymore. */
1329 SetSymbolValue(GC_PENDING,NIL,thread);
1330 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1332 /* Consider this: in a PA section GC is requested: GC_PENDING,
1333 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1334 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1335 * but a SIG_STOP_FOR_GC arrives before trapping to
1336 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1337 * pseudo_atomic_interrupted is not and we go on running with
1338 * pseudo_atomic_interrupted but without a pending interrupt or
1339 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1340 * up. */
1341 if (thread->interrupt_data->gc_blocked_deferrables) {
1342 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1343 clear_pseudo_atomic_interrupted(thread);
1344 sigcopyset(os_context_sigmask_addr(context),
1345 &thread->interrupt_data->pending_mask);
1346 thread->interrupt_data->gc_blocked_deferrables = 0;
1349 if(thread_state(thread)!=STATE_RUNNING) {
1350 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1351 fixnum_value(thread->state));
1354 set_thread_state(thread,STATE_STOPPED);
1355 FSHOW_SIGNAL((stderr,"suspended\n"));
1357 /* While waiting for gc to finish occupy ourselves with zeroing
1358 * the unused portion of the control stack to reduce conservatism.
1359 * On hypothetic platforms with threads and exact gc it is
1360 * actually a must. */
1361 scrub_control_stack();
1363 wait_for_thread_state_change(thread, STATE_STOPPED);
1364 FSHOW_SIGNAL((stderr,"resumed\n"));
1366 if(thread_state(thread)!=STATE_RUNNING) {
1367 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1368 fixnum_value(thread_state(thread)));
1371 if (was_in_lisp) {
1372 undo_fake_foreign_function_call(context);
1376 #endif
1378 void
1379 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1381 SAVE_ERRNO(signal,context,void_context);
1382 #ifndef LISP_FEATURE_WIN32
1383 if ((signal == SIGILL) || (signal == SIGBUS)
1384 #if !(defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_ANDROID))
1385 || (signal == SIGEMT)
1386 #endif
1388 corruption_warning_and_maybe_lose("Signal %d received (PC: %p)", signal,
1389 *os_context_pc_addr(context));
1390 #endif
1391 interrupt_handle_now(signal, info, context);
1392 RESTORE_ERRNO;
1395 /* manipulate the signal context and stack such that when the handler
1396 * returns, it will call function instead of whatever it was doing
1397 * previously
1400 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1401 extern int *context_eflags_addr(os_context_t *context);
1402 #endif
1404 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1405 extern void post_signal_tramp(void);
1406 extern void call_into_lisp_tramp(void);
1408 void
1409 arrange_return_to_c_function(os_context_t *context,
1410 call_into_lisp_lookalike funptr,
1411 lispobj function)
1413 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1414 check_gc_signals_unblocked_or_lose
1415 (os_context_sigmask_addr(context));
1416 #endif
1417 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1418 void * fun=native_pointer(function);
1419 void *code = &(((struct simple_fun *) fun)->code);
1420 #endif
1422 /* Build a stack frame showing `interrupted' so that the
1423 * user's backtrace makes (as much) sense (as usual) */
1425 /* fp state is saved and restored by call_into_lisp */
1426 /* FIXME: errno is not restored, but since current uses of this
1427 * function only call Lisp code that signals an error, it's not
1428 * much of a problem. In other words, running out of the control
1429 * stack between a syscall and (GET-ERRNO) may clobber errno if
1430 * something fails during signalling or in the handler. But I
1431 * can't see what can go wrong as long as there is no CONTINUE
1432 * like restart on them. */
1433 #ifdef LISP_FEATURE_X86
1434 /* Suppose the existence of some function that saved all
1435 * registers, called call_into_lisp, then restored GP registers and
1436 * returned. It would look something like this:
1438 push ebp
1439 mov ebp esp
1440 pushfl
1441 pushal
1442 push $0
1443 push $0
1444 pushl {address of function to call}
1445 call 0x8058db0 <call_into_lisp>
1446 addl $12,%esp
1447 popal
1448 popfl
1449 leave
1452 * What we do here is set up the stack that call_into_lisp would
1453 * expect to see if it had been called by this code, and frob the
1454 * signal context so that signal return goes directly to call_into_lisp,
1455 * and when that function (and the lisp function it invoked) returns,
1456 * it returns to the second half of this imaginary function which
1457 * restores all registers and returns to C
1459 * For this to work, the latter part of the imaginary function
1460 * must obviously exist in reality. That would be post_signal_tramp
1463 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1465 #if defined(LISP_FEATURE_DARWIN)
1466 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1468 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1469 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1471 /* 1. os_validate (malloc/mmap) register_save_block
1472 * 2. copy register state into register_save_block
1473 * 3. put a pointer to register_save_block in a register in the context
1474 * 4. set the context's EIP to point to a trampoline which:
1475 * a. builds the fake stack frame from the block
1476 * b. frees the block
1477 * c. calls the function
1480 *register_save_area = *os_context_pc_addr(context);
1481 *(register_save_area + 1) = function;
1482 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1483 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1484 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1485 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1486 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1487 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1488 *(register_save_area + 8) = *context_eflags_addr(context);
1490 *os_context_pc_addr(context) =
1491 (os_context_register_t) funptr;
1492 *os_context_register_addr(context,reg_ECX) =
1493 (os_context_register_t) register_save_area;
1494 #else
1496 /* return address for call_into_lisp: */
1497 *(sp-15) = (u32)post_signal_tramp;
1498 *(sp-14) = function; /* args for call_into_lisp : function*/
1499 *(sp-13) = 0; /* arg array */
1500 *(sp-12) = 0; /* no. args */
1501 /* this order matches that used in POPAD */
1502 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1503 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1505 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1506 /* POPAD ignores the value of ESP: */
1507 *(sp-8)=0;
1508 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1510 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1511 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1512 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1513 *(sp-3)=*context_eflags_addr(context);
1514 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1515 *(sp-1)=*os_context_pc_addr(context);
1517 #endif
1519 #elif defined(LISP_FEATURE_X86_64)
1520 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1522 /* return address for call_into_lisp: */
1523 *(sp-18) = (u64)post_signal_tramp;
1525 *(sp-17)=*os_context_register_addr(context,reg_R15);
1526 *(sp-16)=*os_context_register_addr(context,reg_R14);
1527 *(sp-15)=*os_context_register_addr(context,reg_R13);
1528 *(sp-14)=*os_context_register_addr(context,reg_R12);
1529 *(sp-13)=*os_context_register_addr(context,reg_R11);
1530 *(sp-12)=*os_context_register_addr(context,reg_R10);
1531 *(sp-11)=*os_context_register_addr(context,reg_R9);
1532 *(sp-10)=*os_context_register_addr(context,reg_R8);
1533 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1534 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1535 /* skip RBP and RSP */
1536 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1537 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1538 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1539 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1540 *(sp-3)=*context_eflags_addr(context);
1541 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1542 *(sp-1)=*os_context_pc_addr(context);
1544 *os_context_register_addr(context,reg_RDI) =
1545 (os_context_register_t)function; /* function */
1546 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1547 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1548 #else
1549 struct thread *th=arch_os_get_current_thread();
1550 build_fake_control_stack_frames(th,context);
1551 #endif
1553 #ifdef LISP_FEATURE_X86
1555 #if !defined(LISP_FEATURE_DARWIN)
1556 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1557 *os_context_register_addr(context,reg_ECX) = 0;
1558 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1559 #ifdef __NetBSD__
1560 *os_context_register_addr(context,reg_UESP) =
1561 (os_context_register_t)(sp-15);
1562 #else
1563 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1564 #endif /* __NETBSD__ */
1565 #endif /* LISP_FEATURE_DARWIN */
1567 #elif defined(LISP_FEATURE_X86_64)
1568 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1569 *os_context_register_addr(context,reg_RCX) = 0;
1570 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1571 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1572 #else
1573 /* this much of the calling convention is common to all
1574 non-x86 ports */
1575 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1576 *os_context_register_addr(context,reg_NARGS) = 0;
1577 #ifdef reg_LIP
1578 *os_context_register_addr(context,reg_LIP) =
1579 (os_context_register_t)(unsigned long)code;
1580 #endif
1581 *os_context_register_addr(context,reg_CFP) =
1582 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1583 #endif
1584 #ifdef ARCH_HAS_NPC_REGISTER
1585 *os_context_npc_addr(context) =
1586 4 + *os_context_pc_addr(context);
1587 #endif
1588 #if defined(LISP_FEATURE_SPARC) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64)
1589 *os_context_register_addr(context,reg_CODE) =
1590 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1591 #endif
1592 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1593 (long)function));
1596 void
1597 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1599 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1600 arrange_return_to_c_function(context, call_into_lisp_tramp, function);
1601 #else
1602 arrange_return_to_c_function(context, call_into_lisp, function);
1603 #endif
1606 // These have undefined_alien_function tramp in x-assem.S
1607 #if !(defined(LISP_FEATURE_X86_64) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64))
1608 /* KLUDGE: Theoretically the approach we use for undefined alien
1609 * variables should work for functions as well, but on PPC/Darwin
1610 * we get bus error at bogus addresses instead, hence this workaround,
1611 * that has the added benefit of automatically discriminating between
1612 * functions and variables.
1614 void
1615 undefined_alien_function(void)
1617 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUN_ERROR));
1619 #endif
1621 void lower_thread_control_stack_guard_page(struct thread *th)
1623 protect_control_stack_guard_page(0, th);
1624 protect_control_stack_return_guard_page(1, th);
1625 th->control_stack_guard_page_protected = NIL;
1626 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1629 void reset_thread_control_stack_guard_page(struct thread *th)
1631 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1632 protect_control_stack_guard_page(1, th);
1633 protect_control_stack_return_guard_page(0, th);
1634 th->control_stack_guard_page_protected = T;
1635 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1638 /* Called from the REPL, too. */
1639 void reset_control_stack_guard_page(void)
1641 struct thread *th=arch_os_get_current_thread();
1642 if (th->control_stack_guard_page_protected == NIL) {
1643 reset_thread_control_stack_guard_page(th);
1647 void lower_control_stack_guard_page(void)
1649 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1652 boolean
1653 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1655 struct thread *th=arch_os_get_current_thread();
1657 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1658 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1659 lose("Control stack exhausted, fault: %p, PC: %p",
1660 addr, *os_context_pc_addr(context));
1662 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1663 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1664 /* We hit the end of the control stack: disable guard page
1665 * protection so the error handler has some headroom, protect the
1666 * previous page so that we can catch returns from the guard page
1667 * and restore it. */
1668 if (th->control_stack_guard_page_protected == NIL)
1669 lose("control_stack_guard_page_protected NIL");
1670 lower_control_stack_guard_page();
1671 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1672 /* For the unfortunate case, when the control stack is
1673 * exhausted in a signal handler. */
1674 unblock_signals_in_context_and_maybe_warn(context);
1675 #endif
1676 arrange_return_to_lisp_function
1677 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1678 return 1;
1680 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1681 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1682 /* We're returning from the guard page: reprotect it, and
1683 * unprotect this one. This works even if we somehow missed
1684 * the return-guard-page, and hit it on our way to new
1685 * exhaustion instead. */
1686 if (th->control_stack_guard_page_protected != NIL)
1687 lose("control_stack_guard_page_protected not NIL");
1688 reset_control_stack_guard_page();
1689 return 1;
1691 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1692 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1693 lose("Binding stack exhausted");
1695 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1696 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1697 protect_binding_stack_guard_page(0, NULL);
1698 protect_binding_stack_return_guard_page(1, NULL);
1699 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1701 /* For the unfortunate case, when the binding stack is
1702 * exhausted in a signal handler. */
1703 unblock_signals_in_context_and_maybe_warn(context);
1704 arrange_return_to_lisp_function
1705 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1706 return 1;
1708 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1709 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1710 protect_binding_stack_guard_page(1, NULL);
1711 protect_binding_stack_return_guard_page(0, NULL);
1712 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1713 return 1;
1715 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1716 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1717 lose("Alien stack exhausted");
1719 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1720 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1721 protect_alien_stack_guard_page(0, NULL);
1722 protect_alien_stack_return_guard_page(1, NULL);
1723 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1725 /* For the unfortunate case, when the alien stack is
1726 * exhausted in a signal handler. */
1727 unblock_signals_in_context_and_maybe_warn(context);
1728 arrange_return_to_lisp_function
1729 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1730 return 1;
1732 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1733 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1734 protect_alien_stack_guard_page(1, NULL);
1735 protect_alien_stack_return_guard_page(0, NULL);
1736 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1737 return 1;
1739 else if (addr >= undefined_alien_address &&
1740 addr < undefined_alien_address + os_vm_page_size) {
1741 arrange_return_to_lisp_function
1742 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1743 return 1;
1745 else return 0;
1749 * noise to install handlers
1752 #ifndef LISP_FEATURE_WIN32
1753 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1754 * they are blocked, in Linux 2.6 the default handler is invoked
1755 * instead that usually coredumps. One might hastily think that adding
1756 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1757 * the whole sa_mask is ignored and instead of not adding the signal
1758 * in question to the mask. That means if it's not blockable the
1759 * signal must be unblocked at the beginning of signal handlers.
1761 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1762 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1763 * will be unblocked in the sigmask during the signal handler. -- RMK
1764 * X-mas day, 2005
1766 static volatile int sigaction_nodefer_works = -1;
1768 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1769 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1771 static void
1772 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1774 sigset_t current;
1775 int i;
1776 get_current_sigmask(&current);
1777 /* There should be exactly two blocked signals: the two we added
1778 * to sa_mask when setting up the handler. NetBSD doesn't block
1779 * the signal we're handling when SA_NODEFER is set; Linux before
1780 * 2.6.13 or so also doesn't block the other signal when
1781 * SA_NODEFER is set. */
1782 for(i = 1; i < NSIG; i++)
1783 if (sigismember(&current, i) !=
1784 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1785 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1786 sigaction_nodefer_works = 0;
1788 if (sigaction_nodefer_works == -1)
1789 sigaction_nodefer_works = 1;
1792 static void
1793 see_if_sigaction_nodefer_works(void)
1795 struct sigaction sa, old_sa;
1797 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1798 sa.sa_sigaction = sigaction_nodefer_test_handler;
1799 sigemptyset(&sa.sa_mask);
1800 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1801 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1802 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1803 /* Make sure no signals are blocked. */
1805 sigset_t empty;
1806 sigemptyset(&empty);
1807 thread_sigmask(SIG_SETMASK, &empty, 0);
1809 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1810 while (sigaction_nodefer_works == -1);
1811 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1814 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1815 #undef SA_NODEFER_TEST_KILL_SIGNAL
1817 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1819 static void *
1820 signal_thread_trampoline(void *pthread_arg)
1822 intptr_t signo = (intptr_t) pthread_arg;
1823 os_context_t fake_context;
1824 siginfo_t fake_info;
1825 #ifdef LISP_FEATURE_PPC
1826 mcontext_t uc_regs;
1827 #endif
1829 memset(&fake_info, 0, sizeof(fake_info));
1830 memset(&fake_context, 0, sizeof(fake_context));
1831 #ifdef LISP_FEATURE_PPC
1832 memset(&uc_regs, 0, sizeof(uc_regs));
1833 fake_context.uc_mcontext.uc_regs = &uc_regs;
1834 #endif
1836 *os_context_pc_addr(&fake_context) = (intptr_t) &signal_thread_trampoline;
1837 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1838 *os_context_sp_addr(&fake_context) = (intptr_t) __builtin_frame_address(0);
1839 #endif
1841 signal_handler_callback(interrupt_handlers[signo].lisp,
1842 signo, &fake_info, &fake_context);
1843 return 0;
1846 static void
1847 sigprof_handler_trampoline(int signal, siginfo_t *info, void *void_context)
1849 SAVE_ERRNO(signal,context,void_context);
1850 struct thread *self = arch_os_get_current_thread();
1852 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1853 * inline allocation does not). In this case, give up. */
1854 if (get_pseudo_atomic_atomic(self))
1855 goto cleanup;
1857 struct alloc_region tmp = self->alloc_region;
1858 self->alloc_region = self->sprof_alloc_region;
1859 self->sprof_alloc_region = tmp;
1861 interrupt_handle_now_handler(signal, info, void_context);
1863 /* And we're back. We know that the SIGPROF handler never unwinds
1864 * non-locally, and can simply swap things back: */
1866 tmp = self->alloc_region;
1867 self->alloc_region = self->sprof_alloc_region;
1868 self->sprof_alloc_region = tmp;
1870 cleanup:
1871 ; /* Dear C compiler, it's OK to have a label here. */
1872 RESTORE_ERRNO;
1875 static void
1876 spawn_signal_thread_handler(int signal, siginfo_t *info, void *void_context)
1878 SAVE_ERRNO(signal,context,void_context);
1880 pthread_attr_t attr;
1881 pthread_t th;
1883 if (pthread_attr_init(&attr))
1884 goto lost;
1885 if (pthread_attr_setstacksize(&attr, thread_control_stack_size))
1886 goto lost;
1887 if (pthread_create(&th, &attr, &signal_thread_trampoline, (void*)(intptr_t) signal))
1888 goto lost;
1889 if (pthread_attr_destroy(&attr))
1890 goto lost;
1892 RESTORE_ERRNO;
1893 return;
1895 lost:
1896 lose("spawn_signal_thread_handler");
1898 #endif
1900 static void
1901 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1903 SAVE_ERRNO(signal,context,void_context);
1904 sigset_t unblock;
1906 sigemptyset(&unblock);
1907 sigaddset(&unblock, signal);
1908 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1909 interrupt_handle_now(signal, info, context);
1910 RESTORE_ERRNO;
1913 static void
1914 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1916 SAVE_ERRNO(signal,context,void_context);
1917 sigset_t unblock;
1919 sigemptyset(&unblock);
1920 sigaddset(&unblock, signal);
1921 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1922 (*interrupt_low_level_handlers[signal])(signal, info, context);
1923 RESTORE_ERRNO;
1926 static void
1927 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1929 SAVE_ERRNO(signal,context,void_context);
1930 (*interrupt_low_level_handlers[signal])(signal, info, context);
1931 RESTORE_ERRNO;
1934 void
1935 undoably_install_low_level_interrupt_handler (int signal,
1936 interrupt_handler_t handler)
1938 struct sigaction sa;
1940 if (0 > signal || signal >= NSIG) {
1941 lose("bad signal number %d\n", signal);
1944 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1945 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1946 else if (sigismember(&deferrable_sigset,signal))
1947 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1948 else if (!sigaction_nodefer_works &&
1949 !sigismember(&blockable_sigset, signal))
1950 sa.sa_sigaction = low_level_unblock_me_trampoline;
1951 else
1952 sa.sa_sigaction = low_level_handle_now_handler;
1954 #ifdef LISP_FEATURE_SB_THRUPTION
1955 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1956 * but we don't actually want to defer it. And if we put it only
1957 * into blockable_sigset, we'd have to special-case it around thread
1958 * creation at least. */
1959 if (signal == SIGPIPE)
1960 sa.sa_sigaction = low_level_handle_now_handler;
1961 #endif
1963 sigcopyset(&sa.sa_mask, &blockable_sigset);
1964 sa.sa_flags = SA_SIGINFO | SA_RESTART
1965 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1966 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
1967 if(signal==SIG_MEMORY_FAULT) {
1968 sa.sa_flags |= SA_ONSTACK;
1969 # ifdef LISP_FEATURE_SB_SAFEPOINT
1970 sigaddset(&sa.sa_mask, SIGRTMIN);
1971 sigaddset(&sa.sa_mask, SIGRTMIN+1);
1972 # endif
1974 #endif
1976 sigaction(signal, &sa, NULL);
1977 interrupt_low_level_handlers[signal] =
1978 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1980 #endif
1982 /* This is called from Lisp. */
1983 uword_t
1984 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*),
1985 int synchronous)
1987 #ifndef LISP_FEATURE_WIN32
1988 struct sigaction sa;
1989 sigset_t old;
1990 union interrupt_handler oldhandler;
1992 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1994 block_blockable_signals(0, &old);
1996 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%p\n",
1997 interrupt_low_level_handlers[signal]));
1998 if (interrupt_low_level_handlers[signal]==0) {
1999 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
2000 ARE_SAME_HANDLER(handler, SIG_IGN))
2001 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
2002 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
2003 else if (signal == SIGPROF)
2004 sa.sa_sigaction = sigprof_handler_trampoline;
2005 else if (!synchronous)
2006 sa.sa_sigaction = spawn_signal_thread_handler;
2007 #endif
2008 else if (sigismember(&deferrable_sigset, signal))
2009 sa.sa_sigaction = maybe_now_maybe_later;
2010 else if (!sigaction_nodefer_works &&
2011 !sigismember(&blockable_sigset, signal))
2012 sa.sa_sigaction = unblock_me_trampoline;
2013 else
2014 sa.sa_sigaction = interrupt_handle_now_handler;
2016 sigcopyset(&sa.sa_mask, &blockable_sigset);
2017 sa.sa_flags = SA_SIGINFO | SA_RESTART |
2018 (sigaction_nodefer_works ? SA_NODEFER : 0);
2019 sigaction(signal, &sa, NULL);
2022 oldhandler = interrupt_handlers[signal];
2023 interrupt_handlers[signal].c = handler;
2025 thread_sigmask(SIG_SETMASK, &old, 0);
2027 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
2029 return (uword_t)oldhandler.lisp;
2030 #else
2031 /* Probably-wrong Win32 hack */
2032 return 0;
2033 #endif
2036 /* This must not go through lisp as it's allowed anytime, even when on
2037 * the altstack. */
2038 void
2039 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
2041 /* Save the interrupt context. No need to undo it, since lose()
2042 * shouldn't return. */
2043 fake_foreign_function_call(context);
2044 lose("SIGABRT received.\n");
2047 void
2048 interrupt_init(void)
2050 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2051 int i;
2052 SHOW("entering interrupt_init()");
2053 #ifndef LISP_FEATURE_WIN32
2054 see_if_sigaction_nodefer_works();
2055 #endif
2056 sigemptyset(&deferrable_sigset);
2057 sigemptyset(&blockable_sigset);
2058 sigemptyset(&gc_sigset);
2059 sigaddset_deferrable(&deferrable_sigset);
2060 sigaddset_blockable(&blockable_sigset);
2061 sigaddset_gc(&gc_sigset);
2062 #endif
2064 #ifndef LISP_FEATURE_WIN32
2065 /* Set up high level handler information. */
2066 for (i = 0; i < NSIG; i++) {
2067 interrupt_handlers[i].c =
2068 /* (The cast here blasts away the distinction between
2069 * SA_SIGACTION-style three-argument handlers and
2070 * signal(..)-style one-argument handlers, which is OK
2071 * because it works to call the 1-argument form where the
2072 * 3-argument form is expected.) */
2073 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
2075 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
2076 #endif
2077 SHOW("returning from interrupt_init()");
2080 #ifndef LISP_FEATURE_WIN32
2082 siginfo_code(siginfo_t *info)
2084 return info->si_code;
2086 os_vm_address_t current_memory_fault_address;
2088 void
2089 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
2091 /* FIXME: This is lossy: if we get another memory fault (eg. from
2092 * another thread) before lisp has read this, we lose the information.
2093 * However, since this is mostly informative, we'll live with that for
2094 * now -- some address is better then no address in this case.
2096 current_memory_fault_address = addr;
2098 /* If we lose on corruption, provide LDB with debugging information. */
2099 fake_foreign_function_call(context);
2101 /* To allow debugging memory faults in signal handlers and such. */
2102 corruption_warning_and_maybe_lose("Memory fault at %p (pc=%p, sp=%p)",
2103 addr,
2104 *os_context_pc_addr(context),
2105 #ifdef ARCH_HAS_STACK_POINTER
2106 *os_context_sp_addr(context)
2107 #else
2109 #endif
2111 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2112 undo_fake_foreign_function_call(context);
2113 unblock_signals_in_context_and_maybe_warn(context);
2114 arrange_return_to_lisp_function(context,
2115 StaticSymbolFunction(MEMORY_FAULT_ERROR));
2116 #else
2117 unblock_gc_signals(0, 0);
2118 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
2119 undo_fake_foreign_function_call(context);
2120 #endif
2122 #endif
2124 static void
2125 unhandled_trap_error(os_context_t *context)
2127 DX_ALLOC_SAP(context_sap, context);
2128 fake_foreign_function_call(context);
2129 #ifndef LISP_FEATURE_SB_SAFEPOINT
2130 unblock_gc_signals(0, 0);
2131 #endif
2133 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2134 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2135 #endif
2136 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2137 lose("UNHANDLED-TRAP-ERROR fell through");
2140 /* Common logic for trapping instructions. How we actually handle each
2141 * case is highly architecture dependent, but the overall shape is
2142 * this. */
2143 void
2144 handle_trap(os_context_t *context, int trap)
2146 switch(trap) {
2147 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2148 case trap_PendingInterrupt:
2149 FSHOW((stderr, "/<trap pending interrupt>\n"));
2150 arch_skip_instruction(context);
2151 interrupt_handle_pending(context);
2152 break;
2153 #endif
2154 case trap_Error:
2155 case trap_Cerror:
2156 #ifdef trap_InvalidArgCount
2157 case trap_InvalidArgCount:
2158 #endif
2159 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2160 interrupt_internal_error(context, trap==trap_Cerror);
2161 break;
2162 case trap_Breakpoint:
2163 arch_handle_breakpoint(context);
2164 break;
2165 case trap_FunEndBreakpoint:
2166 arch_handle_fun_end_breakpoint(context);
2167 break;
2168 #ifdef trap_AfterBreakpoint
2169 case trap_AfterBreakpoint:
2170 arch_handle_after_breakpoint(context);
2171 break;
2172 #endif
2173 #ifdef trap_SingleStepAround
2174 case trap_SingleStepAround:
2175 case trap_SingleStepBefore:
2176 arch_handle_single_step_trap(context, trap);
2177 break;
2178 #endif
2179 #ifdef trap_GlobalSafepoint
2180 case trap_GlobalSafepoint:
2181 fake_foreign_function_call(context);
2182 thread_in_lisp_raised(context);
2183 undo_fake_foreign_function_call(context);
2184 arch_skip_instruction(context);
2185 break;
2186 case trap_CspSafepoint:
2187 fake_foreign_function_call(context);
2188 thread_in_safety_transition(context);
2189 undo_fake_foreign_function_call(context);
2190 arch_skip_instruction(context);
2191 break;
2192 #endif
2193 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2194 case trap_Allocation:
2195 arch_handle_allocation_trap(context);
2196 arch_skip_instruction(context);
2197 break;
2198 #endif
2199 case trap_Halt:
2200 fake_foreign_function_call(context);
2201 lose("%%PRIMITIVE HALT called; the party is over.\n");
2202 default:
2203 unhandled_trap_error(context);