Try to make the :lurking-threads test more robust.
[sbcl.git] / src / runtime / interrupt.c
blob51989a9ac79ccbc0a6863ff16392796adaa11d62
1 /*
2 * interrupt-handling magic
3 */
5 /*
6 * This software is part of the SBCL system. See the README file for
7 * more information.
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
43 #include "sbcl.h"
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <signal.h>
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
51 #include <sys/wait.h>
52 #endif
53 #include <errno.h>
55 #include "runtime.h"
56 #include "arch.h"
57 #include "os.h"
58 #include "interrupt.h"
59 #include "globals.h"
60 #include "lispregs.h"
61 #include "validate.h"
62 #include "interr.h"
63 #include "gc.h"
64 #include "alloc.h"
65 #include "dynbind.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
72 * This is a workaround for some slightly silly Linux/GNU Libc
73 * behaviour: glibc defines sigset_t to support 1024 signals, which is
74 * more than the kernel. This is usually not a problem, but becomes
75 * one when we want to save a signal mask from a ucontext, and restore
76 * it later into another ucontext: the ucontext is allocated on the
77 * stack by the kernel, so copying a libc-sized sigset_t into it will
78 * overflow and cause other data on the stack to be corrupted */
79 /* FIXME: do not rely on NSIG being a multiple of 8 */
80 /* See https://sourceware.org/bugzilla/show_bug.cgi?id=1780 */
82 #ifdef LISP_FEATURE_WIN32
83 # define REAL_SIGSET_SIZE_BYTES (4)
84 #else
85 # define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
86 #endif
88 static inline void
89 sigcopyset(sigset_t *new, sigset_t *old)
91 memcpy(new, old, REAL_SIGSET_SIZE_BYTES);
94 /* When we catch an internal error, should we pass it back to Lisp to
95 * be handled in a high-level way? (Early in cold init, the answer is
96 * 'no', because Lisp is still too brain-dead to handle anything.
97 * After sufficient initialization has been completed, the answer
98 * becomes 'yes'.) */
99 boolean internal_errors_enabled = 0;
101 #ifndef LISP_FEATURE_WIN32
102 static
103 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
104 #endif
105 union interrupt_handler interrupt_handlers[NSIG];
107 /* Under Linux on some architectures, we appear to have to restore the
108 * FPU control word from the context, as after the signal is delivered
109 * we appear to have a null FPU control word. */
110 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
111 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
112 os_context_t *context = arch_os_get_context(&void_context); \
113 os_restore_fp_control(context);
114 #else
115 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
116 os_context_t *context = arch_os_get_context(&void_context);
117 #endif
119 /* Foreign code may want to start some threads on its own.
120 * Non-targetted, truly asynchronous signals can be delivered to
121 * basically any thread, but invoking Lisp handlers in such foregign
122 * threads is really bad, so let's resignal it.
124 * This should at least bring attention to the problem, but it cannot
125 * work for SIGSEGV and similar. It is good enough for timers, and
126 * maybe all deferrables. */
128 #ifndef LISP_FEATURE_WIN32
129 static void
130 add_handled_signals(sigset_t *sigset)
132 int i;
133 for(i = 1; i < NSIG; i++) {
134 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
135 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
136 sigaddset(sigset, i);
141 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
142 #endif
144 static boolean
145 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
147 #ifndef LISP_FEATURE_WIN32
148 if (!lisp_thread_p(context)) {
149 if (!(sigismember(&deferrable_sigset,signal))) {
150 corruption_warning_and_maybe_lose
151 #ifdef LISP_FEATURE_SB_THREAD
152 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
153 signal, pthread_self());
154 #else
155 ("Received signal %d in non-lisp thread, resignalling to a lisp thread.",
156 signal);
157 #endif
160 sigset_t sigset;
161 sigemptyset(&sigset);
162 add_handled_signals(&sigset);
163 block_signals(&sigset, 0, 0);
164 block_signals(&sigset, os_context_sigmask_addr(context), 0);
165 kill(getpid(), signal);
167 return 1;
168 } else
169 #endif
170 return 0;
173 #if INSTALL_SIG_MEMORY_FAULT_HANDLER && defined(THREAD_SANITIZER)
174 /* Under TSAN, every signal blocks every other signal regardless of the
175 * 'sa_mask' given to sigaction(). This is courtesy of an interceptor -
176 * https://github.com/llvm-mirror/compiler-rt/blob/bcc227ee4af1ef3e63033b35dcb1d5627a3b2941/lib/tsan/rtl/tsan_interceptors.cc#L1972
178 * So among other things, SIGSEGV is blocked on receipt of any random signal
179 * of interest (SIGPROF, SIGALRM, SIGPIPE, ...) that might call Lisp code.
180 * Therefore, if any handler re-enters Lisp, there is a high likelihood
181 * of SIGSEGV being delivered while blocked. Unfortunately, the OS treats
182 * blocked SIGSEGV exactly as if the specified disposition were SIG_DFL,
183 * which results in process termination and a core dump.
185 * It doesn't work to route all our signals through 'unblock_me_trampoline',
186 * because that only unblocks the specific signal that was just delivered,
187 * to work around the problem of SA_NODEFER not working. (Which says that
188 * a signal should not be blocked within in its own handler; it says nothing
189 * about all other signals.)
190 * Our trick is to unblock SIGSEGV early in every handler,
191 * so not to face sudden death if it happens to invoke Lisp.
193 # define UNBLOCK_SIGSEGV() \
194 { sigset_t mask; sigemptyset(&mask); \
195 sigaddset(&mask, SIG_MEMORY_FAULT); /* usually SIGSEGV */ \
196 thread_sigmask(SIG_UNBLOCK, &mask, 0); }
197 #else
198 # define UNBLOCK_SIGSEGV() {}
199 #endif
201 /* These are to be used in signal handlers. Currently all handlers are
202 * called from one of:
204 * interrupt_handle_now_handler
205 * maybe_now_maybe_later
206 * unblock_me_trampoline
207 * low_level_handle_now_handler
208 * low_level_maybe_now_maybe_later
209 * low_level_unblock_me_trampoline
211 * This gives us a single point of control (or six) over errno, fp
212 * control word, and fixing up signal context on sparc.
214 * The SPARC/Linux platform doesn't quite do signals the way we want
215 * them done. The third argument in the handler isn't filled in by the
216 * kernel properly, so we fix it up ourselves in the
217 * arch_os_get_context(..) function. -- CSR, 2002-07-23
219 #define SAVE_ERRNO(signal,context,void_context) \
221 int _saved_errno = errno; \
222 UNBLOCK_SIGSEGV(); \
223 RESTORE_FP_CONTROL_WORD(context,void_context); \
224 if (!maybe_resignal_to_lisp_thread(signal, context)) \
227 #define RESTORE_ERRNO \
229 errno = _saved_errno; \
232 static void run_deferred_handler(struct interrupt_data *data,
233 os_context_t *context);
234 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
235 static void store_signal_data_for_later (struct interrupt_data *data,
236 void *handler, int signal,
237 siginfo_t *info,
238 os_context_t *context);
241 /* Generic signal related utilities. */
243 void
244 get_current_sigmask(sigset_t *sigset)
246 /* Get the current sigmask, by blocking the empty set. */
247 thread_sigmask(SIG_BLOCK, 0, sigset);
250 void
251 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
253 if (where) {
254 int i;
255 if (old)
256 sigcopyset(old, where);
257 for(i = 1; i < NSIG; i++) {
258 if (sigismember(what, i))
259 sigaddset(where, i);
261 } else {
262 thread_sigmask(SIG_BLOCK, what, old);
266 void
267 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
269 if (where) {
270 int i;
271 if (old)
272 sigcopyset(old, where);
273 for(i = 1; i < NSIG; i++) {
274 if (sigismember(what, i))
275 sigdelset(where, i);
277 } else {
278 thread_sigmask(SIG_UNBLOCK, what, old);
282 // Stringify sigset into the supplied result buffer.
283 static void
284 sigset_tostring(sigset_t *sigset, char* result, int result_length)
286 int i;
287 int len = 0;
288 for(i = 1; i < NSIG; i++)
289 if (sigismember(sigset, i)) {
290 // ensure room for (generously) 3 digits + comma + null, or give up
291 if (len > result_length - 5) {
292 strcpy(result, "too many to list");
293 return;
295 len += sprintf(result+len, "%s%d", len?",":"", i);
297 result[len] = 0;
300 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
301 * if all re unmasked else die. Passing NULL for sigset is a shorthand
302 * for the current sigmask. */
303 boolean
304 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
305 const char *name)
307 int i;
308 boolean has_blocked = 0, has_unblocked = 0;
309 sigset_t current;
310 if (sigset == 0) {
311 get_current_sigmask(&current);
312 sigset = &current;
314 for(i = 1; i < NSIG; i++) {
315 if (sigismember(sigset2, i)) {
316 if (sigismember(sigset, i))
317 has_blocked = 1;
318 else
319 has_unblocked = 1;
322 if (has_blocked && has_unblocked) {
323 char buf[3*64]; // assuming worst case 64 signals present in sigset
324 sigset_tostring(sigset, buf, sizeof buf);
325 lose("%s signals partially blocked: {%s}\n", name);
327 if (has_blocked)
328 return 1;
329 else
330 return 0;
334 /* Deferrables, blockables, gc signals. */
336 void
337 sigaddset_deferrable(sigset_t *s)
339 sigaddset(s, SIGHUP);
340 sigaddset(s, SIGINT);
341 sigaddset(s, SIGTERM);
342 sigaddset(s, SIGQUIT);
343 sigaddset(s, SIGPIPE);
344 sigaddset(s, SIGALRM);
345 sigaddset(s, SIGURG);
346 sigaddset(s, SIGTSTP);
347 sigaddset(s, SIGCHLD);
348 sigaddset(s, SIGIO);
349 #ifndef LISP_FEATURE_HPUX
350 sigaddset(s, SIGXCPU);
351 sigaddset(s, SIGXFSZ);
352 #endif
353 sigaddset(s, SIGVTALRM);
354 sigaddset(s, SIGPROF);
355 sigaddset(s, SIGWINCH);
358 void
359 sigaddset_blockable(sigset_t *sigset)
361 sigaddset_deferrable(sigset);
362 sigaddset_gc(sigset);
365 void
366 sigaddset_gc(sigset_t *sigset)
368 #ifdef THREADS_USING_GCSIGNAL
369 sigaddset(sigset,SIG_STOP_FOR_GC);
370 #endif
373 /* initialized in interrupt_init */
374 sigset_t deferrable_sigset;
375 sigset_t blockable_sigset;
376 sigset_t gc_sigset;
378 boolean
379 deferrables_blocked_p(sigset_t *sigset)
381 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
383 #endif
385 void
386 check_deferrables_unblocked_or_lose(sigset_t *sigset)
388 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
389 if (deferrables_blocked_p(sigset))
390 lose("deferrables blocked\n");
391 #endif
394 void
395 check_deferrables_blocked_or_lose(sigset_t *sigset)
397 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
398 if (!deferrables_blocked_p(sigset))
399 lose("deferrables unblocked\n");
400 #endif
403 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
404 boolean
405 blockables_blocked_p(sigset_t *sigset)
407 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
409 #endif
411 void
412 check_blockables_unblocked_or_lose(sigset_t *sigset)
414 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
415 if (blockables_blocked_p(sigset))
416 lose("blockables blocked\n");
417 #endif
420 void
421 check_blockables_blocked_or_lose(sigset_t *sigset)
423 #if !defined(LISP_FEATURE_WIN32)
424 /* On Windows, there are no actual signals, but since the win32 port
425 * tracks the sigmask and checks it explicitly, some functions are
426 * still required to keep the mask set up properly. (After all, the
427 * goal of the sigmask emulation is to not have to change all the
428 * call sites in the first place.)
430 * However, this does not hold for all signals equally: While
431 * deferrables matter ("is interrupt-thread okay?"), it is not worth
432 * having to set up blockables properly (which include the
433 * non-existing GC signals).
435 * Yet, as the original comment explains it:
436 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
437 * fake_foreign_function_call machinery are sometimes useful here[...].
439 * So we merely skip this assertion.
440 * -- DFL, trying to expand on a comment by AK.
442 if (!blockables_blocked_p(sigset))
443 lose("blockables unblocked\n");
444 #endif
447 #ifndef LISP_FEATURE_SB_SAFEPOINT
448 #if !defined(LISP_FEATURE_WIN32)
449 boolean
450 gc_signals_blocked_p(sigset_t *sigset)
452 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
454 #endif
456 void
457 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
459 #if !defined(LISP_FEATURE_WIN32)
460 if (gc_signals_blocked_p(sigset))
461 lose("gc signals blocked\n");
462 #endif
465 void
466 check_gc_signals_blocked_or_lose(sigset_t *sigset)
468 #if !defined(LISP_FEATURE_WIN32)
469 if (!gc_signals_blocked_p(sigset))
470 lose("gc signals unblocked\n");
471 #endif
473 #endif
475 void
476 block_deferrable_signals(sigset_t *old)
478 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
479 block_signals(&deferrable_sigset, 0, old);
480 #endif
483 void
484 block_blockable_signals(sigset_t *old)
486 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
487 block_signals(&blockable_sigset, 0, old);
488 #endif
491 void
492 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
494 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
495 if (interrupt_handler_pending_p())
496 lose("unblock_deferrable_signals: losing proposition\n");
497 #ifndef LISP_FEATURE_SB_SAFEPOINT
498 check_gc_signals_unblocked_or_lose(where);
499 #endif
500 unblock_signals(&deferrable_sigset, where, old);
501 #endif
504 #ifndef LISP_FEATURE_SB_SAFEPOINT
505 void
506 unblock_gc_signals(sigset_t *where, sigset_t *old)
508 #ifndef LISP_FEATURE_WIN32
509 unblock_signals(&gc_sigset, where, old);
510 #endif
512 #endif
514 void
515 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
517 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
518 sigset_t *sigset = os_context_sigmask_addr(context);
519 #ifndef LISP_FEATURE_SB_SAFEPOINT
520 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
521 corruption_warning_and_maybe_lose(
522 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
523 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
524 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
525 unblock_gc_signals(sigset, 0);
527 #endif
528 if (!interrupt_handler_pending_p()) {
529 unblock_deferrable_signals(sigset, 0);
531 #endif
535 inline static void
536 check_interrupts_enabled_or_lose(os_context_t *context)
538 __attribute__((unused)) struct thread *thread = arch_os_get_current_thread();
539 if (read_TLS(INTERRUPTS_ENABLED,thread) == NIL)
540 lose("interrupts not enabled\n");
541 if (arch_pseudo_atomic_atomic(context))
542 lose ("in pseudo atomic section\n");
545 /* Save sigset (or the current sigmask if 0) if there is no pending
546 * handler, because that means that deferabbles are already blocked.
547 * The purpose is to avoid losing the pending gc signal if a
548 * deferrable interrupt async unwinds between clearing the pseudo
549 * atomic and trapping to GC.*/
550 #ifndef LISP_FEATURE_SB_SAFEPOINT
551 void
552 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
554 #ifndef LISP_FEATURE_WIN32
555 struct thread *thread = arch_os_get_current_thread();
556 struct interrupt_data *data = thread->interrupt_data;
557 sigset_t oldset;
558 /* Obviously, this function is called when signals may not be
559 * blocked. Let's make sure we are not interrupted. */
560 block_blockable_signals(&oldset);
561 #ifndef LISP_FEATURE_SB_THREAD
562 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
563 * block. */
564 if (data->gc_blocked_deferrables)
565 lose("gc_blocked_deferrables already true\n");
566 #endif
567 if ((!data->pending_handler) &&
568 (!data->gc_blocked_deferrables)) {
569 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
570 data->gc_blocked_deferrables = 1;
571 if (sigset) {
572 /* This is the sigmask of some context. */
573 sigcopyset(&data->pending_mask, sigset);
574 sigaddset_deferrable(sigset);
575 thread_sigmask(SIG_SETMASK,&oldset,0);
576 return;
577 } else {
578 /* Operating on the current sigmask. Save oldset and
579 * unblock gc signals. In the end, this is equivalent to
580 * blocking the deferrables. */
581 sigcopyset(&data->pending_mask, &oldset);
582 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
583 return;
586 thread_sigmask(SIG_SETMASK,&oldset,0);
587 #endif
589 #endif
591 /* Are we leaving WITH-GCING and already running with interrupts
592 * enabled, without the protection of *GC-INHIBIT* T and there is gc
593 * (or stop for gc) pending, but we haven't trapped yet? */
595 in_leaving_without_gcing_race_p(struct thread *thread)
597 return ((read_TLS(IN_WITHOUT_GCING,thread) != NIL) &&
598 (read_TLS(INTERRUPTS_ENABLED,thread) != NIL) &&
599 (read_TLS(GC_INHIBIT,thread) == NIL) &&
600 ((read_TLS(GC_PENDING,thread) != NIL)
601 #if defined(LISP_FEATURE_SB_THREAD)
602 || (read_TLS(STOP_FOR_GC_PENDING,thread) != NIL)
603 #endif
607 /* Check our baroque invariants. */
608 void
609 check_interrupt_context_or_lose(os_context_t *context)
611 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
612 struct thread *thread = arch_os_get_current_thread();
613 struct interrupt_data *data = thread->interrupt_data;
614 int interrupt_deferred_p = (data->pending_handler != 0);
615 int interrupt_pending = (read_TLS(INTERRUPT_PENDING,thread) != NIL);
616 sigset_t *sigset = os_context_sigmask_addr(context);
617 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
618 * handle_allocation_trap. */
619 #if defined(LISP_FEATURE_GENCGC) && !GENCGC_IS_PRECISE
620 int interrupts_enabled = (read_TLS(INTERRUPTS_ENABLED,thread) != NIL);
621 int gc_inhibit = (read_TLS(GC_INHIBIT,thread) != NIL);
622 int gc_pending = (read_TLS(GC_PENDING,thread) == T);
623 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
624 int in_race_p = in_leaving_without_gcing_race_p(thread);
625 int safepoint_active = 0;
626 #if defined(LISP_FEATURE_SB_SAFEPOINT)
627 /* Don't try to take the gc state lock if there's a chance that
628 * we're already holding it (thread_register_gc_trigger() is
629 * called from PA, gc_stop_the_world() and gc_start_the_world()
630 * are called from WITHOUT-GCING, all other takers of the lock
631 * have deferrables blocked). */
632 if (!(interrupt_pending || pseudo_atomic_interrupted || gc_inhibit)) {
633 WITH_GC_STATE_LOCK {
634 safepoint_active = gc_cycle_active();
637 #endif
638 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
639 * section and trapping, a SIG_STOP_FOR_GC would see the next
640 * check fail, for this reason sig_stop_for_gc handler does not
641 * call this function. */
642 if (interrupt_deferred_p) {
643 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
644 lose("Stray deferred interrupt.\n");
646 if (gc_pending)
647 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p || safepoint_active))
648 lose("GC_PENDING, but why?\n");
649 #if defined(LISP_FEATURE_SB_THREAD)
651 int stop_for_gc_pending =
652 (read_TLS(STOP_FOR_GC_PENDING,thread) != NIL);
653 if (stop_for_gc_pending)
654 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p || safepoint_active))
655 lose("STOP_FOR_GC_PENDING, but why?\n");
656 if (pseudo_atomic_interrupted)
657 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
658 lose("pseudo_atomic_interrupted, but why?\n");
660 #else
661 if (pseudo_atomic_interrupted)
662 if (!(gc_pending || interrupt_deferred_p))
663 lose("pseudo_atomic_interrupted, but why?\n");
664 #endif
665 #endif
666 if (interrupt_pending && !interrupt_deferred_p)
667 lose("INTERRUPT_PENDING but not pending handler.\n");
668 if ((data->gc_blocked_deferrables) && interrupt_pending)
669 lose("gc_blocked_deferrables and interrupt pending\n.");
670 if (data->gc_blocked_deferrables)
671 check_deferrables_blocked_or_lose(sigset);
672 if (interrupt_pending || interrupt_deferred_p ||
673 data->gc_blocked_deferrables)
674 check_deferrables_blocked_or_lose(sigset);
675 else {
676 check_deferrables_unblocked_or_lose(sigset);
677 #ifndef LISP_FEATURE_SB_SAFEPOINT
678 /* If deferrables are unblocked then we are open to signals
679 * that run lisp code. */
680 check_gc_signals_unblocked_or_lose(sigset);
681 #endif
683 #endif
687 * utility routines used by various signal handlers
690 static void
691 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
693 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
695 lispobj oldcont;
697 /* Build a fake stack frame or frames */
699 #if !defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_ARM64)
700 access_control_frame_pointer(th) =
701 (lispobj *)(uword_t)
702 (*os_context_register_addr(context, reg_CSP));
703 if ((lispobj *)(uword_t)
704 (*os_context_register_addr(context, reg_CFP))
705 == access_control_frame_pointer(th)) {
706 /* There is a small window during call where the callee's
707 * frame isn't built yet. */
708 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
709 == FUN_POINTER_LOWTAG) {
710 /* We have called, but not built the new frame, so
711 * build it for them. */
712 access_control_frame_pointer(th)[0] =
713 *os_context_register_addr(context, reg_OCFP);
714 access_control_frame_pointer(th)[1] =
715 *os_context_register_addr(context, reg_LRA);
716 access_control_frame_pointer(th) += 2;
717 /* Build our frame on top of it. */
718 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
720 else {
721 /* We haven't yet called, build our frame as if the
722 * partial frame wasn't there. */
723 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
725 } else
726 #elif defined (LISP_FEATURE_ARM)
727 access_control_frame_pointer(th) = (lispobj*)
728 SymbolValue(CONTROL_STACK_POINTER, th);
729 #elif defined (LISP_FEATURE_ARM64)
730 access_control_frame_pointer(th) =
731 (lispobj *)(uword_t) (*os_context_register_addr(context, reg_CSP));
732 #endif
733 /* We can't tell whether we are still in the caller if it had to
734 * allocate a stack frame due to stack arguments. */
735 /* This observation provoked some past CMUCL maintainer to ask
736 * "Can anything strange happen during return?" */
738 /* normal case */
739 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
742 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 3;
744 access_control_frame_pointer(th)[0] = oldcont;
745 access_control_frame_pointer(th)[1] = NIL;
746 access_control_frame_pointer(th)[2] =
747 (lispobj)(*os_context_register_addr(context, reg_CODE));
748 #endif
751 /* Stores the context for gc to scavange and builds fake stack
752 * frames. */
753 void
754 fake_foreign_function_call(os_context_t *context)
756 int context_index;
757 struct thread *thread=arch_os_get_current_thread();
759 /* context_index incrementing must not be interrupted */
760 check_blockables_blocked_or_lose(0);
762 /* Get current Lisp state from context. */
763 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
764 dynamic_space_free_pointer = SymbolValue(ALLOCATION_POINTER, thread);
765 #endif
766 #ifdef reg_ALLOC
767 #ifdef LISP_FEATURE_SB_THREAD
768 thread->pseudo_atomic_bits =
769 #else
770 dynamic_space_free_pointer =
771 (lispobj *)(uword_t)
772 #endif
773 (*os_context_register_addr(context, reg_ALLOC));
774 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
775 /* dynamic_space_free_pointer); */
776 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
777 if ((sword_t)dynamic_space_free_pointer & 1) {
778 lose("dead in fake_foreign_function_call, context = %x\n", context);
780 #endif
781 /* why doesnt PPC and SPARC do something like this: */
782 #if defined(LISP_FEATURE_HPPA)
783 if ((sword_t)dynamic_space_free_pointer & 4) {
784 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
786 #endif
787 #endif
788 #ifdef reg_BSP
789 set_binding_stack_pointer(thread,
790 *os_context_register_addr(context, reg_BSP));
791 #endif
793 #if defined(LISP_FEATURE_ARM)
794 /* Stash our control stack pointer */
795 bind_variable(INTERRUPTED_CONTROL_STACK_POINTER,
796 SymbolValue(CONTROL_STACK_POINTER, thread),
797 thread);
798 #endif
800 build_fake_control_stack_frames(thread,context);
802 /* Do dynamic binding of the active interrupt context index
803 * and save the context in the context array. */
804 context_index =
805 fixnum_value(read_TLS(FREE_INTERRUPT_CONTEXT_INDEX,thread));
807 if (context_index >= MAX_INTERRUPTS) {
808 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
811 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
812 make_fixnum(context_index + 1),thread);
814 thread->interrupt_contexts[context_index] = context;
816 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
817 /* x86oid targets don't maintain the foreign function call flag at
818 * all, so leave them to believe that they are never in foreign
819 * code. */
820 foreign_function_call_active_p(thread) = 1;
821 #endif
824 /* blocks all blockable signals. If you are calling from a signal handler,
825 * the usual signal mask will be restored from the context when the handler
826 * finishes. Otherwise, be careful */
827 void
828 undo_fake_foreign_function_call(os_context_t *context)
830 struct thread *thread=arch_os_get_current_thread();
831 /* Block all blockable signals. */
832 block_blockable_signals(0);
834 foreign_function_call_active_p(thread) = 0;
836 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
837 unbind(thread);
839 #if defined(LISP_FEATURE_ARM)
840 /* Restore our saved control stack pointer */
841 SetSymbolValue(CONTROL_STACK_POINTER,
842 SymbolValue(INTERRUPTED_CONTROL_STACK_POINTER,
843 thread),
844 thread);
845 unbind(thread);
846 #endif
848 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
849 /* Put the dynamic space free pointer back into the context. */
850 *os_context_register_addr(context, reg_ALLOC) =
851 (uword_t) dynamic_space_free_pointer
852 | (*os_context_register_addr(context, reg_ALLOC)
853 & LOWTAG_MASK);
855 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
856 & ~LOWTAG_MASK)
857 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
859 #endif
860 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
861 /* Put the pseudo-atomic bits and dynamic space free pointer back
862 * into the context (p-a-bits for p-a, and dynamic space free
863 * pointer for ROOM). */
864 *os_context_register_addr(context, reg_ALLOC) =
865 (uword_t) dynamic_space_free_pointer
866 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
867 /* And clear them so we don't get bit later by call-in/call-out
868 * not updating them. */
869 thread->pseudo_atomic_bits = 0;
870 #endif
871 #if defined(LISP_FEATURE_ARM) && !defined(LISP_FEATURE_GENCGC)
872 SetSymbolValue(ALLOCATION_POINTER, dynamic_space_free_pointer, thread);
873 #endif
876 /* a handler for the signal caused by execution of a trap opcode
877 * signalling an internal error */
878 void
879 interrupt_internal_error(os_context_t *context, boolean continuable)
881 DX_ALLOC_SAP(context_sap, context);
883 fake_foreign_function_call(context);
885 if (!internal_errors_enabled) {
886 describe_internal_error(context);
887 /* There's no good way to recover from an internal error
888 * before the Lisp error handling mechanism is set up. */
889 lose("internal error too early in init, can't recover\n");
892 #ifndef LISP_FEATURE_SB_SAFEPOINT
893 unblock_gc_signals(0, 0);
894 #endif
896 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
897 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
898 #endif
900 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
901 /* Workaround for blocked SIGTRAP. */
903 sigset_t newset;
904 sigemptyset(&newset);
905 sigaddset(&newset, SIGTRAP);
906 thread_sigmask(SIG_UNBLOCK, &newset, 0);
908 #endif
910 SHOW("in interrupt_internal_error");
911 #if QSHOW == 2
912 /* Display some rudimentary debugging information about the
913 * error, so that even if the Lisp error handler gets badly
914 * confused, we have a chance to determine what's going on. */
915 describe_internal_error(context);
916 #endif
917 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
918 continuable ? T : NIL);
920 undo_fake_foreign_function_call(context); /* blocks signals again */
921 if (continuable)
922 arch_skip_instruction(context);
925 boolean
926 interrupt_handler_pending_p(void)
928 struct thread *thread = arch_os_get_current_thread();
929 struct interrupt_data *data = thread->interrupt_data;
930 return (data->pending_handler != 0);
933 void
934 interrupt_handle_pending(os_context_t *context)
936 /* There are three ways we can get here. First, if an interrupt
937 * occurs within pseudo-atomic, it will be deferred, and we'll
938 * trap to here at the end of the pseudo-atomic block. Second, if
939 * the GC (in alloc()) decides that a GC is required, it will set
940 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
941 * and alloc() is always called from within pseudo-atomic, and
942 * thus we end up here again. Third, when calling GC-ON or at the
943 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
944 * here if there is a pending GC. Fourth, ahem, at the end of
945 * WITHOUT-INTERRUPTS (bar complications with nesting).
947 * A fourth way happens with safepoints: In addition to a stop for
948 * GC that is pending, there are thruptions. Both mechanisms are
949 * mostly signal-free, yet also of an asynchronous nature, so it makes
950 * sense to let interrupt_handle_pending take care of running them:
951 * It gets run precisely at those places where it is safe to process
952 * pending asynchronous tasks. */
954 struct thread *thread = arch_os_get_current_thread();
955 struct interrupt_data *data = thread->interrupt_data;
957 if (arch_pseudo_atomic_atomic(context)) {
958 lose("Handling pending interrupt in pseudo atomic.");
961 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
963 check_blockables_blocked_or_lose(0);
964 #ifndef LISP_FEATURE_SB_SAFEPOINT
966 * (On safepoint builds, there is no gc_blocked_deferrables nor
967 * SIG_STOP_FOR_GC.)
969 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
970 * handler, then the pending mask was saved and
971 * gc_blocked_deferrables set. Hence, there can be no pending
972 * handler and it's safe to restore the pending mask.
974 * Note, that if gc_blocked_deferrables is false we may still have
975 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
976 * pseudo atomic was interrupt be a deferrable first. */
977 if (data->gc_blocked_deferrables) {
978 if (data->pending_handler)
979 lose("GC blocked deferrables but still got a pending handler.");
980 if (read_TLS(GC_INHIBIT,thread)!=NIL)
981 lose("GC blocked deferrables while GC is inhibited.");
982 /* Restore the saved signal mask from the original signal (the
983 * one that interrupted us during the critical section) into
984 * the os_context for the signal we're currently in the
985 * handler for. This should ensure that when we return from
986 * the handler the blocked signals are unblocked. */
987 #ifndef LISP_FEATURE_WIN32
988 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
989 #endif
990 data->gc_blocked_deferrables = 0;
992 #endif
994 if (read_TLS(GC_INHIBIT,thread)==NIL) {
995 void *original_pending_handler = data->pending_handler;
997 #if defined(LISP_FEATURE_SB_SAFEPOINT) && defined(LISP_FEATURE_SB_THREAD)
998 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
999 if (read_TLS(STOP_FOR_GC_PENDING,thread) != NIL
1000 # ifdef LISP_FEATURE_SB_THRUPTION
1001 || (read_TLS(THRUPTION_PENDING,thread) != NIL
1002 && read_TLS(INTERRUPTS_ENABLED, thread) != NIL)
1003 # endif
1005 /* We ought to take this chance to do a pitstop now. */
1006 fake_foreign_function_call(context);
1007 thread_in_lisp_raised(context);
1008 undo_fake_foreign_function_call(context);
1010 #elif defined(LISP_FEATURE_SB_THREAD)
1011 if (read_TLS(STOP_FOR_GC_PENDING,thread) != NIL) {
1012 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
1013 * the signal handler if it actually stops us. */
1014 arch_clear_pseudo_atomic_interrupted(context);
1015 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
1016 } else
1017 #endif
1018 /* Test for T and not for != NIL since the value :IN-PROGRESS
1019 * used to be used in SUB-GC as part of the mechanism to
1020 * supress recursive gcs.*/
1021 if (read_TLS(GC_PENDING,thread) == T) {
1023 /* Two reasons for doing this. First, if there is a
1024 * pending handler we don't want to run. Second, we are
1025 * going to clear pseudo atomic interrupted to avoid
1026 * spurious trapping on every allocation in SUB_GC and
1027 * having a pending handler with interrupts enabled and
1028 * without pseudo atomic interrupted breaks an
1029 * invariant. */
1030 if (data->pending_handler) {
1031 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
1032 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
1035 arch_clear_pseudo_atomic_interrupted(context);
1037 /* GC_PENDING is cleared in SUB-GC, or if another thread
1038 * is doing a gc already we will get a SIG_STOP_FOR_GC and
1039 * that will clear it.
1041 * If there is a pending handler or gc was triggerred in a
1042 * signal handler then maybe_gc won't run POST_GC and will
1043 * return normally. */
1044 if (!maybe_gc(context))
1045 lose("GC not inhibited but maybe_gc did not GC.");
1047 if (data->pending_handler) {
1048 unbind(thread);
1049 unbind(thread);
1051 } else if (read_TLS(GC_PENDING,thread) != NIL) {
1052 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
1053 * GC-PENDING is not NIL then we cannot trap on pseudo
1054 * atomic due to GC (see if(GC_PENDING) logic in
1055 * cheneygc.c an gengcgc.c), plus there is a outer
1056 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
1057 * here? */
1058 lose("Trapping to run pending handler while GC in progress.");
1061 check_blockables_blocked_or_lose(0);
1063 /* No GC shall be lost. If SUB_GC triggers another GC then
1064 * that should be handled on the spot. */
1065 if (read_TLS(GC_PENDING,thread) != NIL)
1066 lose("GC_PENDING after doing gc.");
1067 #ifdef THREADS_USING_GCSIGNAL
1068 if (read_TLS(STOP_FOR_GC_PENDING,thread) != NIL)
1069 lose("STOP_FOR_GC_PENDING after doing gc.");
1070 #endif
1071 /* Check two things. First, that gc does not clobber a handler
1072 * that's already pending. Second, that there is no interrupt
1073 * lossage: if original_pending_handler was NULL then even if
1074 * an interrupt arrived during GC (POST-GC, really) it was
1075 * handled. */
1076 if (original_pending_handler != data->pending_handler)
1077 lose("pending handler changed in gc: %x -> %x.",
1078 original_pending_handler, data->pending_handler);
1081 #ifndef LISP_FEATURE_WIN32
1082 /* There may be no pending handler, because it was only a gc that
1083 * had to be executed or because Lisp is a bit too eager to call
1084 * DO-PENDING-INTERRUPT. */
1085 if ((read_TLS(INTERRUPTS_ENABLED,thread) != NIL) &&
1086 (data->pending_handler)) {
1087 /* No matter how we ended up here, clear both
1088 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1089 * because we checked above that there is no GC pending. */
1090 write_TLS(INTERRUPT_PENDING, NIL, thread);
1091 arch_clear_pseudo_atomic_interrupted(context);
1092 /* Restore the sigmask in the context. */
1093 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1094 run_deferred_handler(data, context);
1096 #ifdef LISP_FEATURE_SB_THRUPTION
1097 if (read_TLS(THRUPTION_PENDING,thread)==T)
1098 /* Special case for the following situation: There is a
1099 * thruption pending, but a signal had been deferred. The
1100 * pitstop at the top of this function could only take care
1101 * of GC, and skipped the thruption, so we need to try again
1102 * now that INTERRUPT_PENDING and the sigmask have been
1103 * reset. */
1104 while (check_pending_thruptions(context))
1106 #endif
1107 #endif
1108 #ifdef LISP_FEATURE_GENCGC
1109 if (get_pseudo_atomic_interrupted(thread))
1110 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1111 #endif
1112 /* It is possible that the end of this function was reached
1113 * without never actually doing anything, the tests in Lisp for
1114 * when to call receive-pending-interrupt are not exact. */
1115 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1119 void
1120 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1122 boolean were_in_lisp;
1123 union interrupt_handler handler;
1125 check_blockables_blocked_or_lose(0);
1127 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1128 if (sigismember(&deferrable_sigset,signal))
1129 check_interrupts_enabled_or_lose(context);
1130 #endif
1132 handler = interrupt_handlers[signal];
1134 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1135 return;
1138 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1139 if (were_in_lisp)
1141 fake_foreign_function_call(context);
1144 FSHOW_SIGNAL((stderr,
1145 "/entering interrupt_handle_now(%d, info, context)\n",
1146 signal));
1148 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1150 /* This can happen if someone tries to ignore or default one
1151 * of the signals we need for runtime support, and the runtime
1152 * support decides to pass on it. */
1153 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1155 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1156 /* Once we've decided what to do about contexts in a
1157 * return-elsewhere world (the original context will no longer
1158 * be available; should we copy it or was nobody using it anyway?)
1159 * then we should convert this to return-elsewhere */
1161 /* CMUCL comment said "Allocate the SAPs while the interrupts
1162 * are still disabled.". I (dan, 2003.08.21) assume this is
1163 * because we're not in pseudoatomic and allocation shouldn't
1164 * be interrupted. In which case it's no longer an issue as
1165 * all our allocation from C now goes through a PA wrapper,
1166 * but still, doesn't hurt.
1168 * Yeah, but non-gencgc platforms don't really wrap allocation
1169 * in PA. MG - 2005-08-29 */
1172 #ifndef LISP_FEATURE_SB_SAFEPOINT
1173 /* Leave deferrable signals blocked, the handler itself will
1174 * allow signals again when it sees fit. */
1175 unblock_gc_signals(0, 0);
1176 #else
1177 WITH_GC_AT_SAFEPOINTS_ONLY()
1178 #endif
1179 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1180 DX_ALLOC_SAP(context_sap, context);
1181 DX_ALLOC_SAP(info_sap, info);
1183 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1185 funcall3(handler.lisp,
1186 make_fixnum(signal),
1187 info_sap,
1188 context_sap);
1190 } else {
1191 /* This cannot happen in sane circumstances. */
1193 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1195 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1196 /* Allow signals again. */
1197 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1198 (*handler.c)(signal, info, context);
1199 #endif
1202 if (were_in_lisp)
1204 undo_fake_foreign_function_call(context); /* block signals again */
1207 FSHOW_SIGNAL((stderr,
1208 "/returning from interrupt_handle_now(%d, info, context)\n",
1209 signal));
1212 /* This is called at the end of a critical section if the indications
1213 * are that some signal was deferred during the section. Note that as
1214 * far as C or the kernel is concerned we dealt with the signal
1215 * already; we're just doing the Lisp-level processing now that we
1216 * put off then */
1217 static void
1218 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1220 /* The pending_handler may enable interrupts and then another
1221 * interrupt may hit, overwrite interrupt_data, so reset the
1222 * pending handler before calling it. Trust the handler to finish
1223 * with the siginfo before enabling interrupts. */
1224 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1225 data->pending_handler;
1227 data->pending_handler=0;
1228 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1229 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1232 #ifndef LISP_FEATURE_WIN32
1233 boolean
1234 maybe_defer_handler(void *handler, struct interrupt_data *data,
1235 int signal, siginfo_t *info, os_context_t *context)
1237 struct thread *thread=arch_os_get_current_thread();
1239 check_blockables_blocked_or_lose(0);
1241 if (read_TLS(INTERRUPT_PENDING,thread) != NIL)
1242 lose("interrupt already pending\n");
1243 if (thread->interrupt_data->pending_handler)
1244 lose("there is a pending handler already (PA)\n");
1245 if (data->gc_blocked_deferrables)
1246 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1248 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1249 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1250 * atomic section inside a WITHOUT-INTERRUPTS.
1252 * Also, if in_leaving_without_gcing_race_p then
1253 * interrupt_handle_pending is going to be called soon, so
1254 * stashing the signal away is safe.
1256 if ((read_TLS(INTERRUPTS_ENABLED,thread) == NIL) ||
1257 in_leaving_without_gcing_race_p(thread)) {
1258 FSHOW_SIGNAL((stderr,
1259 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1260 (unsigned int)handler,signal,
1261 in_leaving_without_gcing_race_p(thread)));
1262 store_signal_data_for_later(data,handler,signal,info,context);
1263 write_TLS(INTERRUPT_PENDING, T,thread);
1264 check_interrupt_context_or_lose(context);
1265 return 1;
1267 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1268 * actually use its argument for anything on x86, so this branch
1269 * may succeed even when context is null (gencgc alloc()) */
1270 if (arch_pseudo_atomic_atomic(context)) {
1271 FSHOW_SIGNAL((stderr,
1272 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1273 (unsigned int)handler,signal));
1274 store_signal_data_for_later(data,handler,signal,info,context);
1275 arch_set_pseudo_atomic_interrupted(context);
1276 check_interrupt_context_or_lose(context);
1277 return 1;
1280 check_interrupt_context_or_lose(context);
1282 FSHOW_SIGNAL((stderr,
1283 "/maybe_defer_handler(%x,%d): not deferred\n",
1284 (unsigned int)handler,signal));
1285 return 0;
1288 static void
1289 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1290 int signal,
1291 siginfo_t *info, os_context_t *context)
1293 if (data->pending_handler)
1294 lose("tried to overwrite pending interrupt handler %x with %x\n",
1295 data->pending_handler, handler);
1296 if (!handler)
1297 lose("tried to defer null interrupt handler\n");
1298 data->pending_handler = handler;
1299 data->pending_signal = signal;
1300 if(info)
1301 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1303 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1304 signal));
1306 if(!context)
1307 lose("Null context");
1309 /* the signal mask in the context (from before we were
1310 * interrupted) is copied to be restored when run_deferred_handler
1311 * happens. Then the usually-blocked signals are added to the mask
1312 * in the context so that we are running with blocked signals when
1313 * the handler returns */
1314 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1315 sigaddset_deferrable(os_context_sigmask_addr(context));
1318 static void
1319 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1321 SAVE_ERRNO(signal,context,void_context);
1322 struct thread *thread = arch_os_get_current_thread();
1323 struct interrupt_data *data = thread->interrupt_data;
1324 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1325 interrupt_handle_now(signal, info, context);
1326 RESTORE_ERRNO;
1329 static void
1330 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1331 os_context_t *context)
1333 /* No FP control fixage needed, caller has done that. */
1334 check_blockables_blocked_or_lose(0);
1335 check_interrupts_enabled_or_lose(context);
1336 (*interrupt_low_level_handlers[signal])(signal, info, context);
1339 static void
1340 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1342 SAVE_ERRNO(signal,context,void_context);
1343 struct thread *thread = arch_os_get_current_thread();
1344 struct interrupt_data *data = thread->interrupt_data;
1346 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1347 signal,info,context))
1348 low_level_interrupt_handle_now(signal, info, context);
1349 RESTORE_ERRNO;
1351 #endif
1353 #ifdef THREADS_USING_GCSIGNAL
1355 /* This function must not cons, because that may trigger a GC. */
1356 void
1357 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1359 struct thread *thread=arch_os_get_current_thread();
1360 boolean was_in_lisp;
1362 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1363 * pseudo atomic until gc is finally allowed. */
1364 if (read_TLS(GC_INHIBIT,thread) != NIL) {
1365 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1366 write_TLS(STOP_FOR_GC_PENDING,T,thread);
1367 return;
1368 } else if (arch_pseudo_atomic_atomic(context)) {
1369 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1370 write_TLS(STOP_FOR_GC_PENDING,T,thread);
1371 arch_set_pseudo_atomic_interrupted(context);
1372 maybe_save_gc_mask_and_block_deferrables
1373 (os_context_sigmask_addr(context));
1374 return;
1377 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1379 /* Not PA and GC not inhibited -- we can stop now. */
1381 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1383 if (was_in_lisp) {
1384 /* need the context stored so it can have registers scavenged */
1385 fake_foreign_function_call(context);
1388 /* Not pending anymore. */
1389 write_TLS(GC_PENDING,NIL,thread);
1390 write_TLS(STOP_FOR_GC_PENDING,NIL,thread);
1392 /* Consider this: in a PA section GC is requested: GC_PENDING,
1393 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1394 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1395 * but a SIG_STOP_FOR_GC arrives before trapping to
1396 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1397 * pseudo_atomic_interrupted is not and we go on running with
1398 * pseudo_atomic_interrupted but without a pending interrupt or
1399 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1400 * up. */
1401 if (thread->interrupt_data->gc_blocked_deferrables) {
1402 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1403 clear_pseudo_atomic_interrupted(thread);
1404 sigcopyset(os_context_sigmask_addr(context),
1405 &thread->interrupt_data->pending_mask);
1406 thread->interrupt_data->gc_blocked_deferrables = 0;
1409 if(thread_state(thread)!=STATE_RUNNING) {
1410 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1411 fixnum_value(thread->state));
1414 set_thread_state(thread,STATE_STOPPED);
1415 FSHOW_SIGNAL((stderr,"suspended\n"));
1417 /* While waiting for gc to finish occupy ourselves with zeroing
1418 * the unused portion of the control stack to reduce conservatism.
1419 * On the platforms with threads and exact gc it is
1420 * actually a must. */
1421 scrub_control_stack();
1423 wait_for_thread_state_change(thread, STATE_STOPPED);
1424 FSHOW_SIGNAL((stderr,"resumed\n"));
1426 if(thread_state(thread)!=STATE_RUNNING) {
1427 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1428 fixnum_value(thread_state(thread)));
1431 if (was_in_lisp) {
1432 undo_fake_foreign_function_call(context);
1436 #endif
1438 void
1439 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1441 SAVE_ERRNO(signal,context,void_context);
1442 #ifndef LISP_FEATURE_WIN32
1443 if ((signal == SIGILL) || (signal == SIGBUS)
1444 #if !(defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_ANDROID))
1445 || (signal == SIGEMT)
1446 #endif
1448 corruption_warning_and_maybe_lose("Signal %d received (PC: %p)", signal,
1449 *os_context_pc_addr(context));
1450 #endif
1451 interrupt_handle_now(signal, info, context);
1452 RESTORE_ERRNO;
1455 /* manipulate the signal context and stack such that when the handler
1456 * returns, it will call function instead of whatever it was doing
1457 * previously
1460 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1461 extern int *context_eflags_addr(os_context_t *context);
1462 #endif
1464 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1465 extern void post_signal_tramp(void);
1466 extern void call_into_lisp_tramp(void);
1468 void
1469 arrange_return_to_c_function(os_context_t *context,
1470 call_into_lisp_lookalike funptr,
1471 lispobj function)
1473 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1474 check_gc_signals_unblocked_or_lose
1475 (os_context_sigmask_addr(context));
1476 #endif
1477 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1478 void * fun=native_pointer(function);
1479 void *code = &(((struct simple_fun *) fun)->code);
1480 #endif
1482 /* Build a stack frame showing `interrupted' so that the
1483 * user's backtrace makes (as much) sense (as usual) */
1485 /* fp state is saved and restored by call_into_lisp */
1486 /* FIXME: errno is not restored, but since current uses of this
1487 * function only call Lisp code that signals an error, it's not
1488 * much of a problem. In other words, running out of the control
1489 * stack between a syscall and (GET-ERRNO) may clobber errno if
1490 * something fails during signalling or in the handler. But I
1491 * can't see what can go wrong as long as there is no CONTINUE
1492 * like restart on them. */
1493 #ifdef LISP_FEATURE_X86
1494 /* Suppose the existence of some function that saved all
1495 * registers, called call_into_lisp, then restored GP registers and
1496 * returned. It would look something like this:
1498 push ebp
1499 mov ebp esp
1500 pushfl
1501 pushal
1502 push $0
1503 push $0
1504 pushl {address of function to call}
1505 call 0x8058db0 <call_into_lisp>
1506 addl $12,%esp
1507 popal
1508 popfl
1509 leave
1512 * What we do here is set up the stack that call_into_lisp would
1513 * expect to see if it had been called by this code, and frob the
1514 * signal context so that signal return goes directly to call_into_lisp,
1515 * and when that function (and the lisp function it invoked) returns,
1516 * it returns to the second half of this imaginary function which
1517 * restores all registers and returns to C
1519 * For this to work, the latter part of the imaginary function
1520 * must obviously exist in reality. That would be post_signal_tramp
1523 #ifndef LISP_FEATURE_DARWIN
1524 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1525 #endif
1527 #if defined(LISP_FEATURE_DARWIN)
1528 u32 *register_save_area = (u32 *)os_allocate(0x40);
1530 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function,
1531 *os_context_register_addr(context,reg_ESP)));
1532 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1534 /* 1. os_validate (malloc/mmap) register_save_block
1535 * 2. copy register state into register_save_block
1536 * 3. put a pointer to register_save_block in a register in the context
1537 * 4. set the context's EIP to point to a trampoline which:
1538 * a. builds the fake stack frame from the block
1539 * b. frees the block
1540 * c. calls the function
1543 *register_save_area = *os_context_pc_addr(context);
1544 *(register_save_area + 1) = function;
1545 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1546 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1547 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1548 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1549 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1550 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1551 *(register_save_area + 8) = *context_eflags_addr(context);
1553 *os_context_pc_addr(context) =
1554 (os_context_register_t) funptr;
1555 *os_context_register_addr(context,reg_ECX) =
1556 (os_context_register_t) register_save_area;
1557 #else
1559 /* return address for call_into_lisp: */
1560 *(sp-15) = (u32)post_signal_tramp;
1561 *(sp-14) = function; /* args for call_into_lisp : function*/
1562 *(sp-13) = 0; /* arg array */
1563 *(sp-12) = 0; /* no. args */
1564 /* this order matches that used in POPAD */
1565 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1566 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1568 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1569 /* POPAD ignores the value of ESP: */
1570 *(sp-8)=0;
1571 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1573 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1574 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1575 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1576 *(sp-3)=*context_eflags_addr(context);
1577 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1578 *(sp-1)=*os_context_pc_addr(context);
1580 #endif
1582 #elif defined(LISP_FEATURE_X86_64)
1583 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1585 /* return address for call_into_lisp: */
1586 *(sp-18) = (u64)post_signal_tramp;
1588 *(sp-17)=*os_context_register_addr(context,reg_R15);
1589 *(sp-16)=*os_context_register_addr(context,reg_R14);
1590 *(sp-15)=*os_context_register_addr(context,reg_R13);
1591 *(sp-14)=*os_context_register_addr(context,reg_R12);
1592 *(sp-13)=*os_context_register_addr(context,reg_R11);
1593 *(sp-12)=*os_context_register_addr(context,reg_R10);
1594 *(sp-11)=*os_context_register_addr(context,reg_R9);
1595 *(sp-10)=*os_context_register_addr(context,reg_R8);
1596 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1597 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1598 /* skip RBP and RSP */
1599 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1600 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1601 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1602 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1603 *(sp-3)=*context_eflags_addr(context);
1604 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1605 *(sp-1)=*os_context_pc_addr(context);
1607 *os_context_register_addr(context,reg_RDI) =
1608 (os_context_register_t)function; /* function */
1609 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1610 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1611 #else
1612 struct thread *th=arch_os_get_current_thread();
1613 build_fake_control_stack_frames(th,context);
1614 #endif
1616 #ifdef LISP_FEATURE_X86
1618 #if !defined(LISP_FEATURE_DARWIN)
1619 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1620 *os_context_register_addr(context,reg_ECX) = 0;
1621 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1622 #ifdef __NetBSD__
1623 *os_context_register_addr(context,reg_UESP) =
1624 (os_context_register_t)(sp-15);
1625 #else
1626 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1627 #endif /* __NETBSD__ */
1628 #endif /* LISP_FEATURE_DARWIN */
1630 #elif defined(LISP_FEATURE_X86_64)
1631 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1632 *os_context_register_addr(context,reg_RCX) = 0;
1633 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1634 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1635 #else
1636 /* this much of the calling convention is common to all
1637 non-x86 ports */
1638 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1639 *os_context_register_addr(context,reg_NARGS) = 0;
1640 #ifdef reg_LIP
1641 *os_context_register_addr(context,reg_LIP) =
1642 (os_context_register_t)(unsigned long)code;
1643 #endif
1644 *os_context_register_addr(context,reg_CFP) =
1645 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1646 #endif
1647 #ifdef ARCH_HAS_NPC_REGISTER
1648 *os_context_npc_addr(context) =
1649 4 + *os_context_pc_addr(context);
1650 #endif
1651 #if defined(LISP_FEATURE_SPARC) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64)
1652 *os_context_register_addr(context,reg_CODE) =
1653 (os_context_register_t)((char*)fun + FUN_POINTER_LOWTAG);
1654 #endif
1655 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1656 (long)function));
1659 void
1660 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1662 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1663 arrange_return_to_c_function(context,
1664 (call_into_lisp_lookalike)call_into_lisp_tramp,
1665 function);
1666 #else
1667 arrange_return_to_c_function(context, call_into_lisp, function);
1668 #endif
1671 // These have undefined_alien_function tramp in x-assem.S
1672 #if !(defined(LISP_FEATURE_X86_64) || defined(LISP_FEATURE_ARM) || defined(LISP_FEATURE_ARM64))
1673 /* KLUDGE: Theoretically the approach we use for undefined alien
1674 * variables should work for functions as well, but on PPC/Darwin
1675 * we get bus error at bogus addresses instead, hence this workaround,
1676 * that has the added benefit of automatically discriminating between
1677 * functions and variables.
1679 void
1680 undefined_alien_function(void)
1682 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUN_ERROR));
1684 #endif
1686 void lower_thread_control_stack_guard_page(struct thread *th)
1688 protect_control_stack_guard_page(0, th);
1689 protect_control_stack_return_guard_page(1, th);
1690 th->control_stack_guard_page_protected = NIL;
1691 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1694 void reset_thread_control_stack_guard_page(struct thread *th)
1696 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1697 protect_control_stack_guard_page(1, th);
1698 protect_control_stack_return_guard_page(0, th);
1699 th->control_stack_guard_page_protected = T;
1700 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1703 /* Called from the REPL, too. */
1704 void reset_control_stack_guard_page(void)
1706 struct thread *th=arch_os_get_current_thread();
1707 if (th->control_stack_guard_page_protected == NIL) {
1708 reset_thread_control_stack_guard_page(th);
1712 boolean
1713 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1715 struct thread *th=arch_os_get_current_thread();
1717 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1718 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1719 lose("Control stack exhausted, fault: %p, PC: %p",
1720 addr, *os_context_pc_addr(context));
1722 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1723 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1724 /* We hit the end of the control stack: disable guard page
1725 * protection so the error handler has some headroom, protect the
1726 * previous page so that we can catch returns from the guard page
1727 * and restore it. */
1728 if (th->control_stack_guard_page_protected == NIL)
1729 lose("control_stack_guard_page_protected NIL");
1730 lower_thread_control_stack_guard_page(th);
1731 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1732 /* For the unfortunate case, when the control stack is
1733 * exhausted in a signal handler. */
1734 unblock_signals_in_context_and_maybe_warn(context);
1735 #endif
1736 arrange_return_to_lisp_function
1737 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1738 return 1;
1740 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1741 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1742 /* We're returning from the guard page: reprotect it, and
1743 * unprotect this one. This works even if we somehow missed
1744 * the return-guard-page, and hit it on our way to new
1745 * exhaustion instead. */
1746 if (th->control_stack_guard_page_protected != NIL)
1747 lose("control_stack_guard_page_protected not NIL");
1748 reset_control_stack_guard_page();
1749 return 1;
1751 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1752 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1753 lose("Binding stack exhausted");
1755 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1756 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1757 protect_binding_stack_guard_page(0, NULL);
1758 protect_binding_stack_return_guard_page(1, NULL);
1759 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1761 /* For the unfortunate case, when the binding stack is
1762 * exhausted in a signal handler. */
1763 unblock_signals_in_context_and_maybe_warn(context);
1764 arrange_return_to_lisp_function
1765 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1766 return 1;
1768 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1769 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1770 protect_binding_stack_guard_page(1, NULL);
1771 protect_binding_stack_return_guard_page(0, NULL);
1772 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1773 return 1;
1775 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1776 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1777 lose("Alien stack exhausted");
1779 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1780 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1781 protect_alien_stack_guard_page(0, NULL);
1782 protect_alien_stack_return_guard_page(1, NULL);
1783 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1785 /* For the unfortunate case, when the alien stack is
1786 * exhausted in a signal handler. */
1787 unblock_signals_in_context_and_maybe_warn(context);
1788 arrange_return_to_lisp_function
1789 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1790 return 1;
1792 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1793 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1794 protect_alien_stack_guard_page(1, NULL);
1795 protect_alien_stack_return_guard_page(0, NULL);
1796 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1797 return 1;
1799 else if (addr >= undefined_alien_address &&
1800 addr < undefined_alien_address + os_vm_page_size) {
1801 arrange_return_to_lisp_function
1802 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1803 return 1;
1805 else return 0;
1809 * noise to install handlers
1812 #ifndef LISP_FEATURE_WIN32
1813 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1814 * they are blocked, in Linux 2.6 the default handler is invoked
1815 * instead that usually coredumps. One might hastily think that adding
1816 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1817 * the whole sa_mask is ignored and instead of not adding the signal
1818 * in question to the mask. That means if it's not blockable the
1819 * signal must be unblocked at the beginning of signal handlers.
1821 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1822 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1823 * will be unblocked in the sigmask during the signal handler. -- RMK
1824 * X-mas day, 2005
1826 static volatile int sigaction_nodefer_works = -1;
1828 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1829 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1831 static void
1832 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1834 sigset_t current;
1835 int i;
1836 get_current_sigmask(&current);
1837 /* There should be exactly two blocked signals: the two we added
1838 * to sa_mask when setting up the handler. NetBSD doesn't block
1839 * the signal we're handling when SA_NODEFER is set; Linux before
1840 * 2.6.13 or so also doesn't block the other signal when
1841 * SA_NODEFER is set. */
1842 for(i = 1; i < NSIG; i++)
1843 if (sigismember(&current, i) !=
1844 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1845 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1846 sigaction_nodefer_works = 0;
1848 if (sigaction_nodefer_works == -1)
1849 sigaction_nodefer_works = 1;
1852 static void
1853 see_if_sigaction_nodefer_works(void)
1855 struct sigaction sa, old_sa;
1857 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1858 sa.sa_sigaction = sigaction_nodefer_test_handler;
1859 sigemptyset(&sa.sa_mask);
1860 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1861 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1862 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1863 /* Make sure no signals are blocked. */
1865 sigset_t empty;
1866 sigemptyset(&empty);
1867 thread_sigmask(SIG_SETMASK, &empty, 0);
1869 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1870 while (sigaction_nodefer_works == -1);
1871 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1874 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1875 #undef SA_NODEFER_TEST_KILL_SIGNAL
1877 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1879 static void *
1880 signal_thread_trampoline(void *pthread_arg)
1882 intptr_t signo = (intptr_t) pthread_arg;
1883 os_context_t fake_context;
1884 siginfo_t fake_info;
1885 #ifdef LISP_FEATURE_PPC
1886 mcontext_t uc_regs;
1887 #endif
1889 memset(&fake_info, 0, sizeof(fake_info));
1890 memset(&fake_context, 0, sizeof(fake_context));
1891 #ifdef LISP_FEATURE_PPC
1892 memset(&uc_regs, 0, sizeof(uc_regs));
1893 fake_context.uc_mcontext.uc_regs = &uc_regs;
1894 #endif
1896 *os_context_pc_addr(&fake_context) = (intptr_t) &signal_thread_trampoline;
1897 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1898 *os_context_sp_addr(&fake_context) = (intptr_t) __builtin_frame_address(0);
1899 #endif
1901 signal_handler_callback(interrupt_handlers[signo].lisp,
1902 signo, &fake_info, &fake_context);
1903 return 0;
1906 static void
1907 sigprof_handler_trampoline(int signal, siginfo_t *info, void *void_context)
1909 SAVE_ERRNO(signal,context,void_context);
1910 struct thread *self = arch_os_get_current_thread();
1912 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1913 * inline allocation does not). In this case, give up. */
1914 if (get_pseudo_atomic_atomic(self))
1915 goto cleanup;
1917 struct alloc_region tmp = self->alloc_region;
1918 self->alloc_region = self->sprof_alloc_region;
1919 self->sprof_alloc_region = tmp;
1921 interrupt_handle_now_handler(signal, info, void_context);
1923 /* And we're back. We know that the SIGPROF handler never unwinds
1924 * non-locally, and can simply swap things back: */
1926 tmp = self->alloc_region;
1927 self->alloc_region = self->sprof_alloc_region;
1928 self->sprof_alloc_region = tmp;
1930 cleanup:
1931 ; /* Dear C compiler, it's OK to have a label here. */
1932 RESTORE_ERRNO;
1935 static void
1936 spawn_signal_thread_handler(int signal, siginfo_t *info, void *void_context)
1938 SAVE_ERRNO(signal,context,void_context);
1940 pthread_attr_t attr;
1941 pthread_t th;
1943 if (pthread_attr_init(&attr))
1944 goto lost;
1945 if (pthread_attr_setstacksize(&attr, thread_control_stack_size))
1946 goto lost;
1947 if (pthread_create(&th, &attr, &signal_thread_trampoline, (void*)(intptr_t) signal))
1948 goto lost;
1949 if (pthread_attr_destroy(&attr))
1950 goto lost;
1952 RESTORE_ERRNO;
1953 return;
1955 lost:
1956 lose("spawn_signal_thread_handler");
1958 #endif
1960 static void
1961 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1963 SAVE_ERRNO(signal,context,void_context);
1964 sigset_t unblock;
1966 sigemptyset(&unblock);
1967 sigaddset(&unblock, signal);
1968 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1969 interrupt_handle_now(signal, info, context);
1970 RESTORE_ERRNO;
1973 static void
1974 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1976 SAVE_ERRNO(signal,context,void_context);
1977 sigset_t unblock;
1979 sigemptyset(&unblock);
1980 sigaddset(&unblock, signal);
1981 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1982 (*interrupt_low_level_handlers[signal])(signal, info, context);
1983 RESTORE_ERRNO;
1986 static void
1987 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1989 SAVE_ERRNO(signal,context,void_context);
1990 (*interrupt_low_level_handlers[signal])(signal, info, context);
1991 RESTORE_ERRNO;
1994 void
1995 undoably_install_low_level_interrupt_handler (int signal,
1996 interrupt_handler_t handler)
1998 struct sigaction sa;
2000 if (0 > signal || signal >= NSIG) {
2001 lose("bad signal number %d\n", signal);
2004 if (ARE_SAME_HANDLER(handler, SIG_DFL))
2005 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
2006 else if (sigismember(&deferrable_sigset,signal))
2007 sa.sa_sigaction = low_level_maybe_now_maybe_later;
2008 else if (!sigaction_nodefer_works &&
2009 !sigismember(&blockable_sigset, signal))
2010 sa.sa_sigaction = low_level_unblock_me_trampoline;
2011 else
2012 sa.sa_sigaction = low_level_handle_now_handler;
2014 #ifdef LISP_FEATURE_SB_THRUPTION
2015 /* It's in `deferrable_sigset' so that we block&unblock it properly,
2016 * but we don't actually want to defer it. And if we put it only
2017 * into blockable_sigset, we'd have to special-case it around thread
2018 * creation at least. */
2019 if (signal == SIGPIPE)
2020 sa.sa_sigaction = low_level_handle_now_handler;
2021 #endif
2023 sa.sa_mask = blockable_sigset;
2024 sa.sa_flags = SA_SIGINFO | SA_RESTART
2025 | (sigaction_nodefer_works ? SA_NODEFER : 0);
2026 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK)
2027 if(signal==SIG_MEMORY_FAULT) {
2028 sa.sa_flags |= SA_ONSTACK;
2029 # ifdef LISP_FEATURE_SB_SAFEPOINT
2030 sigaddset(&sa.sa_mask, SIGRTMIN);
2031 sigaddset(&sa.sa_mask, SIGRTMIN+1);
2032 # endif
2034 #endif
2036 sigaction(signal, &sa, NULL);
2037 interrupt_low_level_handlers[signal] =
2038 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
2040 #endif
2042 /* This is called from Lisp. */
2043 uword_t
2044 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*),
2045 int synchronous)
2047 #ifndef LISP_FEATURE_WIN32
2048 struct sigaction sa;
2049 sigset_t old;
2050 union interrupt_handler oldhandler;
2052 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
2054 block_blockable_signals(&old);
2056 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%p\n",
2057 interrupt_low_level_handlers[signal]));
2058 if (interrupt_low_level_handlers[signal]==0) {
2059 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
2060 ARE_SAME_HANDLER(handler, SIG_IGN))
2061 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
2062 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
2063 else if (signal == SIGPROF)
2064 sa.sa_sigaction = sigprof_handler_trampoline;
2065 else if (!synchronous)
2066 sa.sa_sigaction = spawn_signal_thread_handler;
2067 #endif
2068 else if (sigismember(&deferrable_sigset, signal))
2069 sa.sa_sigaction = maybe_now_maybe_later;
2070 else if (!sigaction_nodefer_works &&
2071 !sigismember(&blockable_sigset, signal))
2072 sa.sa_sigaction = unblock_me_trampoline;
2073 else
2074 sa.sa_sigaction = interrupt_handle_now_handler;
2076 sa.sa_mask = blockable_sigset;
2077 sa.sa_flags = SA_SIGINFO | SA_RESTART |
2078 (sigaction_nodefer_works ? SA_NODEFER : 0);
2079 sigaction(signal, &sa, NULL);
2082 oldhandler = interrupt_handlers[signal];
2083 interrupt_handlers[signal].c = handler;
2085 thread_sigmask(SIG_SETMASK, &old, 0);
2087 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
2089 return (uword_t)oldhandler.lisp;
2090 #else
2091 /* Probably-wrong Win32 hack */
2092 return 0;
2093 #endif
2096 /* This must not go through lisp as it's allowed anytime, even when on
2097 * the altstack. */
2098 void
2099 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
2101 /* Save the interrupt context. No need to undo it, since lose()
2102 * shouldn't return. */
2103 fake_foreign_function_call(context);
2104 lose("SIGABRT received.\n");
2107 void
2108 interrupt_init(void)
2110 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2111 int i;
2112 SHOW("entering interrupt_init()");
2113 #ifndef LISP_FEATURE_WIN32
2114 see_if_sigaction_nodefer_works();
2115 #endif
2116 sigemptyset(&deferrable_sigset);
2117 sigemptyset(&blockable_sigset);
2118 sigemptyset(&gc_sigset);
2119 sigaddset_deferrable(&deferrable_sigset);
2120 sigaddset_blockable(&blockable_sigset);
2121 sigaddset_gc(&gc_sigset);
2122 #endif
2124 #ifndef LISP_FEATURE_WIN32
2125 /* Set up high level handler information. */
2126 for (i = 0; i < NSIG; i++) {
2127 interrupt_handlers[i].c =
2128 /* (The cast here blasts away the distinction between
2129 * SA_SIGACTION-style three-argument handlers and
2130 * signal(..)-style one-argument handlers, which is OK
2131 * because it works to call the 1-argument form where the
2132 * 3-argument form is expected.) */
2133 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
2135 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
2136 #endif
2137 SHOW("returning from interrupt_init()");
2140 #ifndef LISP_FEATURE_WIN32
2142 siginfo_code(siginfo_t *info)
2144 return info->si_code;
2147 void
2148 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
2150 /* If we lose on corruption, provide LDB with debugging information. */
2151 fake_foreign_function_call(context);
2153 /* To allow debugging memory faults in signal handlers and such. */
2154 corruption_warning_and_maybe_lose("Memory fault at %p (pc=%p, sp=%p)",
2155 addr,
2156 *os_context_pc_addr(context),
2157 #ifdef ARCH_HAS_STACK_POINTER
2158 *os_context_sp_addr(context)
2159 #else
2161 #endif
2163 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2164 # if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
2165 # error memory fault emulation needs validating for this architecture
2166 # endif
2167 /* We're on the altstack, and don't want to run Lisp code here, so
2168 * we need to return from this signal handler. But when we get to
2169 * Lisp we'd like to have a signal context (with correct values in
2170 * it) to present to the debugger, along with knowledge of what
2171 * the faulting address was. To get a signal context on the main
2172 * stack, we arrange to return to a trap instruction. To get the
2173 * correct program counter in the context, we save it on the stack
2174 * here and restore it to the context in the trap handler. To
2175 * pass the fault address, we save it on the stack here and pick
2176 * it up in the trap handler. And the stack pointer manipulation
2177 * works as long as the on-stack side only pops items in its trap
2178 * handler. */
2179 extern void memory_fault_emulation_trap(void);
2180 undo_fake_foreign_function_call(context);
2181 void **sp = (void **)*os_context_sp_addr(context);
2182 *--sp = (void *)*os_context_pc_addr(context);
2183 *--sp = addr;
2184 # ifdef LISP_FEATURE_X86
2185 /* KLUDGE: x86-linux sp_addr doesn't affect the CPU on return */
2186 *((void **)os_context_register_addr(context, reg_ESP)) = sp;
2187 # else
2188 *((void **)os_context_sp_addr(context)) = sp;
2189 # endif
2190 *os_context_pc_addr(context) =
2191 (os_context_register_t)memory_fault_emulation_trap;
2192 /* We exit here, letting the signal handler return, picking up at
2193 * memory_fault_emulation_trap (in target-assem.S), which will
2194 * trap, and the handler calls the function below, where we
2195 * restore our state to parallel what a non-x86oid would have, and
2196 * then run the common code for handling the error in Lisp. */
2199 void
2200 handle_memory_fault_emulation_trap(os_context_t *context)
2202 void **sp = (void **)*os_context_sp_addr(context);
2203 void *addr = *sp++;
2204 *os_context_pc_addr(context) = (os_context_register_t)*sp++;
2205 # ifdef LISP_FEATURE_X86
2206 /* KLUDGE: x86-linux sp_addr doesn't affect the CPU on return */
2207 *((void **)os_context_register_addr(context, reg_ESP)) = sp;
2208 # else
2209 *os_context_sp_addr(context) = (os_context_register_t)sp;
2210 # endif
2211 fake_foreign_function_call(context);
2212 #endif /* C_STACK_IS_CONTROL_STACK */
2213 /* On x86oids, we're in handle_memory_fault_emulation_trap().
2214 * On real computers, we're still in lisp_memory_fault_error(). */
2215 #ifndef LISP_FEATURE_SB_SAFEPOINT
2216 unblock_gc_signals(0, 0);
2217 #endif
2218 DX_ALLOC_SAP(context_sap, context);
2219 DX_ALLOC_SAP(fault_address_sap, addr);
2220 funcall2(StaticSymbolFunction(MEMORY_FAULT_ERROR),
2221 context_sap, fault_address_sap);
2222 undo_fake_foreign_function_call(context);
2224 #endif /* !LISP_FEATURE_WIN32 */
2226 static void
2227 unhandled_trap_error(os_context_t *context)
2229 DX_ALLOC_SAP(context_sap, context);
2230 fake_foreign_function_call(context);
2231 #ifndef LISP_FEATURE_SB_SAFEPOINT
2232 unblock_gc_signals(0, 0);
2233 #endif
2235 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2236 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2237 #endif
2238 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2239 lose("UNHANDLED-TRAP-ERROR fell through");
2242 /* Common logic for trapping instructions. How we actually handle each
2243 * case is highly architecture dependent, but the overall shape is
2244 * this. */
2245 void
2246 handle_trap(os_context_t *context, int trap)
2248 switch(trap) {
2249 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2250 case trap_PendingInterrupt:
2251 FSHOW((stderr, "/<trap pending interrupt>\n"));
2252 arch_skip_instruction(context);
2253 interrupt_handle_pending(context);
2254 break;
2255 #endif
2256 case trap_Error:
2257 case trap_Cerror:
2258 #ifdef trap_InvalidArgCount
2259 case trap_InvalidArgCount:
2260 #endif
2261 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2262 interrupt_internal_error(context, trap==trap_Cerror);
2263 break;
2264 case trap_Breakpoint:
2265 arch_handle_breakpoint(context);
2266 break;
2267 case trap_FunEndBreakpoint:
2268 arch_handle_fun_end_breakpoint(context);
2269 break;
2270 #ifdef trap_AfterBreakpoint
2271 case trap_AfterBreakpoint:
2272 arch_handle_after_breakpoint(context);
2273 break;
2274 #endif
2275 #ifdef trap_SingleStepAround
2276 case trap_SingleStepAround:
2277 case trap_SingleStepBefore:
2278 arch_handle_single_step_trap(context, trap);
2279 break;
2280 #endif
2281 #ifdef trap_GlobalSafepoint
2282 case trap_GlobalSafepoint:
2283 fake_foreign_function_call(context);
2284 thread_in_lisp_raised(context);
2285 undo_fake_foreign_function_call(context);
2286 arch_skip_instruction(context);
2287 break;
2288 case trap_CspSafepoint:
2289 fake_foreign_function_call(context);
2290 thread_in_safety_transition(context);
2291 undo_fake_foreign_function_call(context);
2292 arch_skip_instruction(context);
2293 break;
2294 #endif
2295 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2296 case trap_Allocation:
2297 arch_handle_allocation_trap(context);
2298 arch_skip_instruction(context);
2299 break;
2300 #endif
2301 #if defined(LISP_FEATURE_C_STACK_IS_CONTROL_STACK) && !defined(LISP_FEATURE_WIN32)
2302 case trap_MemoryFaultEmulation:
2303 handle_memory_fault_emulation_trap(context);
2304 break;
2305 #endif
2306 case trap_Halt:
2307 fake_foreign_function_call(context);
2308 lose("%%PRIMITIVE HALT called; the party is over.\n");
2309 default:
2310 unhandled_trap_error(context);