linux-user: Make sigaltstack stacks per-thread
[qemu/ar7.git] / linux-user / signal.c
blob5ca6d62b15d3e4d3faee3f554fff38297d5ac49e
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "trace.h"
26 #include "signal-common.h"
28 static struct target_sigaction sigact_table[TARGET_NSIG];
30 static void host_signal_handler(int host_signum, siginfo_t *info,
31 void *puc);
33 static uint8_t host_to_target_signal_table[_NSIG] = {
34 [SIGHUP] = TARGET_SIGHUP,
35 [SIGINT] = TARGET_SIGINT,
36 [SIGQUIT] = TARGET_SIGQUIT,
37 [SIGILL] = TARGET_SIGILL,
38 [SIGTRAP] = TARGET_SIGTRAP,
39 [SIGABRT] = TARGET_SIGABRT,
40 /* [SIGIOT] = TARGET_SIGIOT,*/
41 [SIGBUS] = TARGET_SIGBUS,
42 [SIGFPE] = TARGET_SIGFPE,
43 [SIGKILL] = TARGET_SIGKILL,
44 [SIGUSR1] = TARGET_SIGUSR1,
45 [SIGSEGV] = TARGET_SIGSEGV,
46 [SIGUSR2] = TARGET_SIGUSR2,
47 [SIGPIPE] = TARGET_SIGPIPE,
48 [SIGALRM] = TARGET_SIGALRM,
49 [SIGTERM] = TARGET_SIGTERM,
50 #ifdef SIGSTKFLT
51 [SIGSTKFLT] = TARGET_SIGSTKFLT,
52 #endif
53 [SIGCHLD] = TARGET_SIGCHLD,
54 [SIGCONT] = TARGET_SIGCONT,
55 [SIGSTOP] = TARGET_SIGSTOP,
56 [SIGTSTP] = TARGET_SIGTSTP,
57 [SIGTTIN] = TARGET_SIGTTIN,
58 [SIGTTOU] = TARGET_SIGTTOU,
59 [SIGURG] = TARGET_SIGURG,
60 [SIGXCPU] = TARGET_SIGXCPU,
61 [SIGXFSZ] = TARGET_SIGXFSZ,
62 [SIGVTALRM] = TARGET_SIGVTALRM,
63 [SIGPROF] = TARGET_SIGPROF,
64 [SIGWINCH] = TARGET_SIGWINCH,
65 [SIGIO] = TARGET_SIGIO,
66 [SIGPWR] = TARGET_SIGPWR,
67 [SIGSYS] = TARGET_SIGSYS,
68 /* next signals stay the same */
69 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
70 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
71 To fix this properly we need to do manual signal delivery multiplexed
72 over a single host signal. */
73 [__SIGRTMIN] = __SIGRTMAX,
74 [__SIGRTMAX] = __SIGRTMIN,
76 static uint8_t target_to_host_signal_table[_NSIG];
78 int host_to_target_signal(int sig)
80 if (sig < 0 || sig >= _NSIG)
81 return sig;
82 return host_to_target_signal_table[sig];
85 int target_to_host_signal(int sig)
87 if (sig < 0 || sig >= _NSIG)
88 return sig;
89 return target_to_host_signal_table[sig];
92 static inline void target_sigaddset(target_sigset_t *set, int signum)
94 signum--;
95 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
96 set->sig[signum / TARGET_NSIG_BPW] |= mask;
99 static inline int target_sigismember(const target_sigset_t *set, int signum)
101 signum--;
102 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
106 void host_to_target_sigset_internal(target_sigset_t *d,
107 const sigset_t *s)
109 int i;
110 target_sigemptyset(d);
111 for (i = 1; i <= TARGET_NSIG; i++) {
112 if (sigismember(s, i)) {
113 target_sigaddset(d, host_to_target_signal(i));
118 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
120 target_sigset_t d1;
121 int i;
123 host_to_target_sigset_internal(&d1, s);
124 for(i = 0;i < TARGET_NSIG_WORDS; i++)
125 d->sig[i] = tswapal(d1.sig[i]);
128 void target_to_host_sigset_internal(sigset_t *d,
129 const target_sigset_t *s)
131 int i;
132 sigemptyset(d);
133 for (i = 1; i <= TARGET_NSIG; i++) {
134 if (target_sigismember(s, i)) {
135 sigaddset(d, target_to_host_signal(i));
140 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
142 target_sigset_t s1;
143 int i;
145 for(i = 0;i < TARGET_NSIG_WORDS; i++)
146 s1.sig[i] = tswapal(s->sig[i]);
147 target_to_host_sigset_internal(d, &s1);
150 void host_to_target_old_sigset(abi_ulong *old_sigset,
151 const sigset_t *sigset)
153 target_sigset_t d;
154 host_to_target_sigset(&d, sigset);
155 *old_sigset = d.sig[0];
158 void target_to_host_old_sigset(sigset_t *sigset,
159 const abi_ulong *old_sigset)
161 target_sigset_t d;
162 int i;
164 d.sig[0] = *old_sigset;
165 for(i = 1;i < TARGET_NSIG_WORDS; i++)
166 d.sig[i] = 0;
167 target_to_host_sigset(sigset, &d);
170 int block_signals(void)
172 TaskState *ts = (TaskState *)thread_cpu->opaque;
173 sigset_t set;
175 /* It's OK to block everything including SIGSEGV, because we won't
176 * run any further guest code before unblocking signals in
177 * process_pending_signals().
179 sigfillset(&set);
180 sigprocmask(SIG_SETMASK, &set, 0);
182 return atomic_xchg(&ts->signal_pending, 1);
185 /* Wrapper for sigprocmask function
186 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
187 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
188 * a signal was already pending and the syscall must be restarted, or
189 * 0 on success.
190 * If set is NULL, this is guaranteed not to fail.
192 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
194 TaskState *ts = (TaskState *)thread_cpu->opaque;
196 if (oldset) {
197 *oldset = ts->signal_mask;
200 if (set) {
201 int i;
203 if (block_signals()) {
204 return -TARGET_ERESTARTSYS;
207 switch (how) {
208 case SIG_BLOCK:
209 sigorset(&ts->signal_mask, &ts->signal_mask, set);
210 break;
211 case SIG_UNBLOCK:
212 for (i = 1; i <= NSIG; ++i) {
213 if (sigismember(set, i)) {
214 sigdelset(&ts->signal_mask, i);
217 break;
218 case SIG_SETMASK:
219 ts->signal_mask = *set;
220 break;
221 default:
222 g_assert_not_reached();
225 /* Silently ignore attempts to change blocking status of KILL or STOP */
226 sigdelset(&ts->signal_mask, SIGKILL);
227 sigdelset(&ts->signal_mask, SIGSTOP);
229 return 0;
232 #if !defined(TARGET_NIOS2)
233 /* Just set the guest's signal mask to the specified value; the
234 * caller is assumed to have called block_signals() already.
236 void set_sigmask(const sigset_t *set)
238 TaskState *ts = (TaskState *)thread_cpu->opaque;
240 ts->signal_mask = *set;
242 #endif
244 /* sigaltstack management */
246 int on_sig_stack(unsigned long sp)
248 TaskState *ts = (TaskState *)thread_cpu->opaque;
250 return (sp - ts->sigaltstack_used.ss_sp
251 < ts->sigaltstack_used.ss_size);
254 int sas_ss_flags(unsigned long sp)
256 TaskState *ts = (TaskState *)thread_cpu->opaque;
258 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
259 : on_sig_stack(sp) ? SS_ONSTACK : 0);
262 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
265 * This is the X/Open sanctioned signal stack switching.
267 TaskState *ts = (TaskState *)thread_cpu->opaque;
269 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
270 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
272 return sp;
275 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
277 TaskState *ts = (TaskState *)thread_cpu->opaque;
279 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
280 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
281 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
284 /* siginfo conversion */
286 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
287 const siginfo_t *info)
289 int sig = host_to_target_signal(info->si_signo);
290 int si_code = info->si_code;
291 int si_type;
292 tinfo->si_signo = sig;
293 tinfo->si_errno = 0;
294 tinfo->si_code = info->si_code;
296 /* This memset serves two purposes:
297 * (1) ensure we don't leak random junk to the guest later
298 * (2) placate false positives from gcc about fields
299 * being used uninitialized if it chooses to inline both this
300 * function and tswap_siginfo() into host_to_target_siginfo().
302 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
304 /* This is awkward, because we have to use a combination of
305 * the si_code and si_signo to figure out which of the union's
306 * members are valid. (Within the host kernel it is always possible
307 * to tell, but the kernel carefully avoids giving userspace the
308 * high 16 bits of si_code, so we don't have the information to
309 * do this the easy way...) We therefore make our best guess,
310 * bearing in mind that a guest can spoof most of the si_codes
311 * via rt_sigqueueinfo() if it likes.
313 * Once we have made our guess, we record it in the top 16 bits of
314 * the si_code, so that tswap_siginfo() later can use it.
315 * tswap_siginfo() will strip these top bits out before writing
316 * si_code to the guest (sign-extending the lower bits).
319 switch (si_code) {
320 case SI_USER:
321 case SI_TKILL:
322 case SI_KERNEL:
323 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
324 * These are the only unspoofable si_code values.
326 tinfo->_sifields._kill._pid = info->si_pid;
327 tinfo->_sifields._kill._uid = info->si_uid;
328 si_type = QEMU_SI_KILL;
329 break;
330 default:
331 /* Everything else is spoofable. Make best guess based on signal */
332 switch (sig) {
333 case TARGET_SIGCHLD:
334 tinfo->_sifields._sigchld._pid = info->si_pid;
335 tinfo->_sifields._sigchld._uid = info->si_uid;
336 tinfo->_sifields._sigchld._status
337 = host_to_target_waitstatus(info->si_status);
338 tinfo->_sifields._sigchld._utime = info->si_utime;
339 tinfo->_sifields._sigchld._stime = info->si_stime;
340 si_type = QEMU_SI_CHLD;
341 break;
342 case TARGET_SIGIO:
343 tinfo->_sifields._sigpoll._band = info->si_band;
344 tinfo->_sifields._sigpoll._fd = info->si_fd;
345 si_type = QEMU_SI_POLL;
346 break;
347 default:
348 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
349 tinfo->_sifields._rt._pid = info->si_pid;
350 tinfo->_sifields._rt._uid = info->si_uid;
351 /* XXX: potential problem if 64 bit */
352 tinfo->_sifields._rt._sigval.sival_ptr
353 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
354 si_type = QEMU_SI_RT;
355 break;
357 break;
360 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
363 void tswap_siginfo(target_siginfo_t *tinfo,
364 const target_siginfo_t *info)
366 int si_type = extract32(info->si_code, 16, 16);
367 int si_code = sextract32(info->si_code, 0, 16);
369 __put_user(info->si_signo, &tinfo->si_signo);
370 __put_user(info->si_errno, &tinfo->si_errno);
371 __put_user(si_code, &tinfo->si_code);
373 /* We can use our internal marker of which fields in the structure
374 * are valid, rather than duplicating the guesswork of
375 * host_to_target_siginfo_noswap() here.
377 switch (si_type) {
378 case QEMU_SI_KILL:
379 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
380 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
381 break;
382 case QEMU_SI_TIMER:
383 __put_user(info->_sifields._timer._timer1,
384 &tinfo->_sifields._timer._timer1);
385 __put_user(info->_sifields._timer._timer2,
386 &tinfo->_sifields._timer._timer2);
387 break;
388 case QEMU_SI_POLL:
389 __put_user(info->_sifields._sigpoll._band,
390 &tinfo->_sifields._sigpoll._band);
391 __put_user(info->_sifields._sigpoll._fd,
392 &tinfo->_sifields._sigpoll._fd);
393 break;
394 case QEMU_SI_FAULT:
395 __put_user(info->_sifields._sigfault._addr,
396 &tinfo->_sifields._sigfault._addr);
397 break;
398 case QEMU_SI_CHLD:
399 __put_user(info->_sifields._sigchld._pid,
400 &tinfo->_sifields._sigchld._pid);
401 __put_user(info->_sifields._sigchld._uid,
402 &tinfo->_sifields._sigchld._uid);
403 __put_user(info->_sifields._sigchld._status,
404 &tinfo->_sifields._sigchld._status);
405 __put_user(info->_sifields._sigchld._utime,
406 &tinfo->_sifields._sigchld._utime);
407 __put_user(info->_sifields._sigchld._stime,
408 &tinfo->_sifields._sigchld._stime);
409 break;
410 case QEMU_SI_RT:
411 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
412 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
413 __put_user(info->_sifields._rt._sigval.sival_ptr,
414 &tinfo->_sifields._rt._sigval.sival_ptr);
415 break;
416 default:
417 g_assert_not_reached();
421 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
423 target_siginfo_t tgt_tmp;
424 host_to_target_siginfo_noswap(&tgt_tmp, info);
425 tswap_siginfo(tinfo, &tgt_tmp);
428 /* XXX: we support only POSIX RT signals are used. */
429 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
430 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
432 /* This conversion is used only for the rt_sigqueueinfo syscall,
433 * and so we know that the _rt fields are the valid ones.
435 abi_ulong sival_ptr;
437 __get_user(info->si_signo, &tinfo->si_signo);
438 __get_user(info->si_errno, &tinfo->si_errno);
439 __get_user(info->si_code, &tinfo->si_code);
440 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
441 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
442 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
443 info->si_value.sival_ptr = (void *)(long)sival_ptr;
446 static int fatal_signal (int sig)
448 switch (sig) {
449 case TARGET_SIGCHLD:
450 case TARGET_SIGURG:
451 case TARGET_SIGWINCH:
452 /* Ignored by default. */
453 return 0;
454 case TARGET_SIGCONT:
455 case TARGET_SIGSTOP:
456 case TARGET_SIGTSTP:
457 case TARGET_SIGTTIN:
458 case TARGET_SIGTTOU:
459 /* Job control signals. */
460 return 0;
461 default:
462 return 1;
466 /* returns 1 if given signal should dump core if not handled */
467 static int core_dump_signal(int sig)
469 switch (sig) {
470 case TARGET_SIGABRT:
471 case TARGET_SIGFPE:
472 case TARGET_SIGILL:
473 case TARGET_SIGQUIT:
474 case TARGET_SIGSEGV:
475 case TARGET_SIGTRAP:
476 case TARGET_SIGBUS:
477 return (1);
478 default:
479 return (0);
483 void signal_init(void)
485 TaskState *ts = (TaskState *)thread_cpu->opaque;
486 struct sigaction act;
487 struct sigaction oact;
488 int i, j;
489 int host_sig;
491 /* generate signal conversion tables */
492 for(i = 1; i < _NSIG; i++) {
493 if (host_to_target_signal_table[i] == 0)
494 host_to_target_signal_table[i] = i;
496 for(i = 1; i < _NSIG; i++) {
497 j = host_to_target_signal_table[i];
498 target_to_host_signal_table[j] = i;
501 /* Set the signal mask from the host mask. */
502 sigprocmask(0, 0, &ts->signal_mask);
504 /* set all host signal handlers. ALL signals are blocked during
505 the handlers to serialize them. */
506 memset(sigact_table, 0, sizeof(sigact_table));
508 sigfillset(&act.sa_mask);
509 act.sa_flags = SA_SIGINFO;
510 act.sa_sigaction = host_signal_handler;
511 for(i = 1; i <= TARGET_NSIG; i++) {
512 #ifdef TARGET_GPROF
513 if (i == SIGPROF) {
514 continue;
516 #endif
517 host_sig = target_to_host_signal(i);
518 sigaction(host_sig, NULL, &oact);
519 if (oact.sa_sigaction == (void *)SIG_IGN) {
520 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
521 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
522 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
524 /* If there's already a handler installed then something has
525 gone horribly wrong, so don't even try to handle that case. */
526 /* Install some handlers for our own use. We need at least
527 SIGSEGV and SIGBUS, to detect exceptions. We can not just
528 trap all signals because it affects syscall interrupt
529 behavior. But do trap all default-fatal signals. */
530 if (fatal_signal (i))
531 sigaction(host_sig, &act, NULL);
535 /* Force a synchronously taken signal. The kernel force_sig() function
536 * also forces the signal to "not blocked, not ignored", but for QEMU
537 * that work is done in process_pending_signals().
539 void force_sig(int sig)
541 CPUState *cpu = thread_cpu;
542 CPUArchState *env = cpu->env_ptr;
543 target_siginfo_t info;
545 info.si_signo = sig;
546 info.si_errno = 0;
547 info.si_code = TARGET_SI_KERNEL;
548 info._sifields._kill._pid = 0;
549 info._sifields._kill._uid = 0;
550 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
553 /* Force a SIGSEGV if we couldn't write to memory trying to set
554 * up the signal frame. oldsig is the signal we were trying to handle
555 * at the point of failure.
557 #if !defined(TARGET_RISCV)
558 void force_sigsegv(int oldsig)
560 if (oldsig == SIGSEGV) {
561 /* Make sure we don't try to deliver the signal again; this will
562 * end up with handle_pending_signal() calling dump_core_and_abort().
564 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
566 force_sig(TARGET_SIGSEGV);
569 #endif
571 /* abort execution with signal */
572 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
574 CPUState *cpu = thread_cpu;
575 CPUArchState *env = cpu->env_ptr;
576 TaskState *ts = (TaskState *)cpu->opaque;
577 int host_sig, core_dumped = 0;
578 struct sigaction act;
580 host_sig = target_to_host_signal(target_sig);
581 trace_user_force_sig(env, target_sig, host_sig);
582 gdb_signalled(env, target_sig);
584 /* dump core if supported by target binary format */
585 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
586 stop_all_tasks();
587 core_dumped =
588 ((*ts->bprm->core_dump)(target_sig, env) == 0);
590 if (core_dumped) {
591 /* we already dumped the core of target process, we don't want
592 * a coredump of qemu itself */
593 struct rlimit nodump;
594 getrlimit(RLIMIT_CORE, &nodump);
595 nodump.rlim_cur=0;
596 setrlimit(RLIMIT_CORE, &nodump);
597 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
598 target_sig, strsignal(host_sig), "core dumped" );
601 /* The proper exit code for dying from an uncaught signal is
602 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
603 * a negative value. To get the proper exit code we need to
604 * actually die from an uncaught signal. Here the default signal
605 * handler is installed, we send ourself a signal and we wait for
606 * it to arrive. */
607 sigfillset(&act.sa_mask);
608 act.sa_handler = SIG_DFL;
609 act.sa_flags = 0;
610 sigaction(host_sig, &act, NULL);
612 /* For some reason raise(host_sig) doesn't send the signal when
613 * statically linked on x86-64. */
614 kill(getpid(), host_sig);
616 /* Make sure the signal isn't masked (just reuse the mask inside
617 of act) */
618 sigdelset(&act.sa_mask, host_sig);
619 sigsuspend(&act.sa_mask);
621 /* unreachable */
622 abort();
625 /* queue a signal so that it will be send to the virtual CPU as soon
626 as possible */
627 int queue_signal(CPUArchState *env, int sig, int si_type,
628 target_siginfo_t *info)
630 CPUState *cpu = env_cpu(env);
631 TaskState *ts = cpu->opaque;
633 trace_user_queue_signal(env, sig);
635 info->si_code = deposit32(info->si_code, 16, 16, si_type);
637 ts->sync_signal.info = *info;
638 ts->sync_signal.pending = sig;
639 /* signal that a new signal is pending */
640 atomic_set(&ts->signal_pending, 1);
641 return 1; /* indicates that the signal was queued */
644 #ifndef HAVE_SAFE_SYSCALL
645 static inline void rewind_if_in_safe_syscall(void *puc)
647 /* Default version: never rewind */
649 #endif
651 static void host_signal_handler(int host_signum, siginfo_t *info,
652 void *puc)
654 CPUArchState *env = thread_cpu->env_ptr;
655 CPUState *cpu = env_cpu(env);
656 TaskState *ts = cpu->opaque;
658 int sig;
659 target_siginfo_t tinfo;
660 ucontext_t *uc = puc;
661 struct emulated_sigtable *k;
663 /* the CPU emulator uses some host signals to detect exceptions,
664 we forward to it some signals */
665 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
666 && info->si_code > 0) {
667 if (cpu_signal_handler(host_signum, info, puc))
668 return;
671 /* get target signal number */
672 sig = host_to_target_signal(host_signum);
673 if (sig < 1 || sig > TARGET_NSIG)
674 return;
675 trace_user_host_signal(env, host_signum, sig);
677 rewind_if_in_safe_syscall(puc);
679 host_to_target_siginfo_noswap(&tinfo, info);
680 k = &ts->sigtab[sig - 1];
681 k->info = tinfo;
682 k->pending = sig;
683 ts->signal_pending = 1;
685 /* Block host signals until target signal handler entered. We
686 * can't block SIGSEGV or SIGBUS while we're executing guest
687 * code in case the guest code provokes one in the window between
688 * now and it getting out to the main loop. Signals will be
689 * unblocked again in process_pending_signals().
691 * WARNING: we cannot use sigfillset() here because the uc_sigmask
692 * field is a kernel sigset_t, which is much smaller than the
693 * libc sigset_t which sigfillset() operates on. Using sigfillset()
694 * would write 0xff bytes off the end of the structure and trash
695 * data on the struct.
696 * We can't use sizeof(uc->uc_sigmask) either, because the libc
697 * headers define the struct field with the wrong (too large) type.
699 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
700 sigdelset(&uc->uc_sigmask, SIGSEGV);
701 sigdelset(&uc->uc_sigmask, SIGBUS);
703 /* interrupt the virtual CPU as soon as possible */
704 cpu_exit(thread_cpu);
707 /* do_sigaltstack() returns target values and errnos. */
708 /* compare linux/kernel/signal.c:do_sigaltstack() */
709 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
711 int ret;
712 struct target_sigaltstack oss;
713 TaskState *ts = (TaskState *)thread_cpu->opaque;
715 /* XXX: test errors */
716 if(uoss_addr)
718 __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
719 __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
720 __put_user(sas_ss_flags(sp), &oss.ss_flags);
723 if(uss_addr)
725 struct target_sigaltstack *uss;
726 struct target_sigaltstack ss;
727 size_t minstacksize = TARGET_MINSIGSTKSZ;
729 #if defined(TARGET_PPC64)
730 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
731 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
732 if (get_ppc64_abi(image) > 1) {
733 minstacksize = 4096;
735 #endif
737 ret = -TARGET_EFAULT;
738 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
739 goto out;
741 __get_user(ss.ss_sp, &uss->ss_sp);
742 __get_user(ss.ss_size, &uss->ss_size);
743 __get_user(ss.ss_flags, &uss->ss_flags);
744 unlock_user_struct(uss, uss_addr, 0);
746 ret = -TARGET_EPERM;
747 if (on_sig_stack(sp))
748 goto out;
750 ret = -TARGET_EINVAL;
751 if (ss.ss_flags != TARGET_SS_DISABLE
752 && ss.ss_flags != TARGET_SS_ONSTACK
753 && ss.ss_flags != 0)
754 goto out;
756 if (ss.ss_flags == TARGET_SS_DISABLE) {
757 ss.ss_size = 0;
758 ss.ss_sp = 0;
759 } else {
760 ret = -TARGET_ENOMEM;
761 if (ss.ss_size < minstacksize) {
762 goto out;
766 ts->sigaltstack_used.ss_sp = ss.ss_sp;
767 ts->sigaltstack_used.ss_size = ss.ss_size;
770 if (uoss_addr) {
771 ret = -TARGET_EFAULT;
772 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
773 goto out;
776 ret = 0;
777 out:
778 return ret;
781 /* do_sigaction() return target values and host errnos */
782 int do_sigaction(int sig, const struct target_sigaction *act,
783 struct target_sigaction *oact)
785 struct target_sigaction *k;
786 struct sigaction act1;
787 int host_sig;
788 int ret = 0;
790 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
791 return -TARGET_EINVAL;
794 if (block_signals()) {
795 return -TARGET_ERESTARTSYS;
798 k = &sigact_table[sig - 1];
799 if (oact) {
800 __put_user(k->_sa_handler, &oact->_sa_handler);
801 __put_user(k->sa_flags, &oact->sa_flags);
802 #ifdef TARGET_ARCH_HAS_SA_RESTORER
803 __put_user(k->sa_restorer, &oact->sa_restorer);
804 #endif
805 /* Not swapped. */
806 oact->sa_mask = k->sa_mask;
808 if (act) {
809 /* FIXME: This is not threadsafe. */
810 __get_user(k->_sa_handler, &act->_sa_handler);
811 __get_user(k->sa_flags, &act->sa_flags);
812 #ifdef TARGET_ARCH_HAS_SA_RESTORER
813 __get_user(k->sa_restorer, &act->sa_restorer);
814 #endif
815 /* To be swapped in target_to_host_sigset. */
816 k->sa_mask = act->sa_mask;
818 /* we update the host linux signal state */
819 host_sig = target_to_host_signal(sig);
820 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
821 sigfillset(&act1.sa_mask);
822 act1.sa_flags = SA_SIGINFO;
823 if (k->sa_flags & TARGET_SA_RESTART)
824 act1.sa_flags |= SA_RESTART;
825 /* NOTE: it is important to update the host kernel signal
826 ignore state to avoid getting unexpected interrupted
827 syscalls */
828 if (k->_sa_handler == TARGET_SIG_IGN) {
829 act1.sa_sigaction = (void *)SIG_IGN;
830 } else if (k->_sa_handler == TARGET_SIG_DFL) {
831 if (fatal_signal (sig))
832 act1.sa_sigaction = host_signal_handler;
833 else
834 act1.sa_sigaction = (void *)SIG_DFL;
835 } else {
836 act1.sa_sigaction = host_signal_handler;
838 ret = sigaction(host_sig, &act1, NULL);
841 return ret;
844 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
845 struct emulated_sigtable *k)
847 CPUState *cpu = env_cpu(cpu_env);
848 abi_ulong handler;
849 sigset_t set;
850 target_sigset_t target_old_set;
851 struct target_sigaction *sa;
852 TaskState *ts = cpu->opaque;
854 trace_user_handle_signal(cpu_env, sig);
855 /* dequeue signal */
856 k->pending = 0;
858 sig = gdb_handlesig(cpu, sig);
859 if (!sig) {
860 sa = NULL;
861 handler = TARGET_SIG_IGN;
862 } else {
863 sa = &sigact_table[sig - 1];
864 handler = sa->_sa_handler;
867 if (do_strace) {
868 print_taken_signal(sig, &k->info);
871 if (handler == TARGET_SIG_DFL) {
872 /* default handler : ignore some signal. The other are job control or fatal */
873 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
874 kill(getpid(),SIGSTOP);
875 } else if (sig != TARGET_SIGCHLD &&
876 sig != TARGET_SIGURG &&
877 sig != TARGET_SIGWINCH &&
878 sig != TARGET_SIGCONT) {
879 dump_core_and_abort(sig);
881 } else if (handler == TARGET_SIG_IGN) {
882 /* ignore sig */
883 } else if (handler == TARGET_SIG_ERR) {
884 dump_core_and_abort(sig);
885 } else {
886 /* compute the blocked signals during the handler execution */
887 sigset_t *blocked_set;
889 target_to_host_sigset(&set, &sa->sa_mask);
890 /* SA_NODEFER indicates that the current signal should not be
891 blocked during the handler */
892 if (!(sa->sa_flags & TARGET_SA_NODEFER))
893 sigaddset(&set, target_to_host_signal(sig));
895 /* save the previous blocked signal state to restore it at the
896 end of the signal execution (see do_sigreturn) */
897 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
899 /* block signals in the handler */
900 blocked_set = ts->in_sigsuspend ?
901 &ts->sigsuspend_mask : &ts->signal_mask;
902 sigorset(&ts->signal_mask, blocked_set, &set);
903 ts->in_sigsuspend = 0;
905 /* if the CPU is in VM86 mode, we restore the 32 bit values */
906 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
908 CPUX86State *env = cpu_env;
909 if (env->eflags & VM_MASK)
910 save_v86_state(env);
912 #endif
913 /* prepare the stack frame of the virtual CPU */
914 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
915 if (sa->sa_flags & TARGET_SA_SIGINFO) {
916 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
917 } else {
918 setup_frame(sig, sa, &target_old_set, cpu_env);
920 #else
921 /* These targets do not have traditional signals. */
922 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
923 #endif
924 if (sa->sa_flags & TARGET_SA_RESETHAND) {
925 sa->_sa_handler = TARGET_SIG_DFL;
930 void process_pending_signals(CPUArchState *cpu_env)
932 CPUState *cpu = env_cpu(cpu_env);
933 int sig;
934 TaskState *ts = cpu->opaque;
935 sigset_t set;
936 sigset_t *blocked_set;
938 while (atomic_read(&ts->signal_pending)) {
939 /* FIXME: This is not threadsafe. */
940 sigfillset(&set);
941 sigprocmask(SIG_SETMASK, &set, 0);
943 restart_scan:
944 sig = ts->sync_signal.pending;
945 if (sig) {
946 /* Synchronous signals are forced,
947 * see force_sig_info() and callers in Linux
948 * Note that not all of our queue_signal() calls in QEMU correspond
949 * to force_sig_info() calls in Linux (some are send_sig_info()).
950 * However it seems like a kernel bug to me to allow the process
951 * to block a synchronous signal since it could then just end up
952 * looping round and round indefinitely.
954 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
955 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
956 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
957 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
960 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
963 for (sig = 1; sig <= TARGET_NSIG; sig++) {
964 blocked_set = ts->in_sigsuspend ?
965 &ts->sigsuspend_mask : &ts->signal_mask;
967 if (ts->sigtab[sig - 1].pending &&
968 (!sigismember(blocked_set,
969 target_to_host_signal_table[sig]))) {
970 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
971 /* Restart scan from the beginning, as handle_pending_signal
972 * might have resulted in a new synchronous signal (eg SIGSEGV).
974 goto restart_scan;
978 /* if no signal is pending, unblock signals and recheck (the act
979 * of unblocking might cause us to take another host signal which
980 * will set signal_pending again).
982 atomic_set(&ts->signal_pending, 0);
983 ts->in_sigsuspend = 0;
984 set = ts->signal_mask;
985 sigdelset(&set, SIGSEGV);
986 sigdelset(&set, SIGBUS);
987 sigprocmask(SIG_SETMASK, &set, 0);
989 ts->in_sigsuspend = 0;