target/arm: Convert "single-precision" register moves to decodetree
[qemu/ar7.git] / linux-user / signal.c
blob5cd237834d9a63c799efaf1243c4c633c1cf2f18
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "trace.h"
26 #include "signal-common.h"
28 struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
34 static struct target_sigaction sigact_table[TARGET_NSIG];
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
82 static uint8_t target_to_host_signal_table[_NSIG];
84 int host_to_target_signal(int sig)
86 if (sig < 0 || sig >= _NSIG)
87 return sig;
88 return host_to_target_signal_table[sig];
91 int target_to_host_signal(int sig)
93 if (sig < 0 || sig >= _NSIG)
94 return sig;
95 return target_to_host_signal_table[sig];
98 static inline void target_sigaddset(target_sigset_t *set, int signum)
100 signum--;
101 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
102 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 static inline int target_sigismember(const target_sigset_t *set, int signum)
107 signum--;
108 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
109 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 void host_to_target_sigset_internal(target_sigset_t *d,
113 const sigset_t *s)
115 int i;
116 target_sigemptyset(d);
117 for (i = 1; i <= TARGET_NSIG; i++) {
118 if (sigismember(s, i)) {
119 target_sigaddset(d, host_to_target_signal(i));
124 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
126 target_sigset_t d1;
127 int i;
129 host_to_target_sigset_internal(&d1, s);
130 for(i = 0;i < TARGET_NSIG_WORDS; i++)
131 d->sig[i] = tswapal(d1.sig[i]);
134 void target_to_host_sigset_internal(sigset_t *d,
135 const target_sigset_t *s)
137 int i;
138 sigemptyset(d);
139 for (i = 1; i <= TARGET_NSIG; i++) {
140 if (target_sigismember(s, i)) {
141 sigaddset(d, target_to_host_signal(i));
146 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
148 target_sigset_t s1;
149 int i;
151 for(i = 0;i < TARGET_NSIG_WORDS; i++)
152 s1.sig[i] = tswapal(s->sig[i]);
153 target_to_host_sigset_internal(d, &s1);
156 void host_to_target_old_sigset(abi_ulong *old_sigset,
157 const sigset_t *sigset)
159 target_sigset_t d;
160 host_to_target_sigset(&d, sigset);
161 *old_sigset = d.sig[0];
164 void target_to_host_old_sigset(sigset_t *sigset,
165 const abi_ulong *old_sigset)
167 target_sigset_t d;
168 int i;
170 d.sig[0] = *old_sigset;
171 for(i = 1;i < TARGET_NSIG_WORDS; i++)
172 d.sig[i] = 0;
173 target_to_host_sigset(sigset, &d);
176 int block_signals(void)
178 TaskState *ts = (TaskState *)thread_cpu->opaque;
179 sigset_t set;
181 /* It's OK to block everything including SIGSEGV, because we won't
182 * run any further guest code before unblocking signals in
183 * process_pending_signals().
185 sigfillset(&set);
186 sigprocmask(SIG_SETMASK, &set, 0);
188 return atomic_xchg(&ts->signal_pending, 1);
191 /* Wrapper for sigprocmask function
192 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
193 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
194 * a signal was already pending and the syscall must be restarted, or
195 * 0 on success.
196 * If set is NULL, this is guaranteed not to fail.
198 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
200 TaskState *ts = (TaskState *)thread_cpu->opaque;
202 if (oldset) {
203 *oldset = ts->signal_mask;
206 if (set) {
207 int i;
209 if (block_signals()) {
210 return -TARGET_ERESTARTSYS;
213 switch (how) {
214 case SIG_BLOCK:
215 sigorset(&ts->signal_mask, &ts->signal_mask, set);
216 break;
217 case SIG_UNBLOCK:
218 for (i = 1; i <= NSIG; ++i) {
219 if (sigismember(set, i)) {
220 sigdelset(&ts->signal_mask, i);
223 break;
224 case SIG_SETMASK:
225 ts->signal_mask = *set;
226 break;
227 default:
228 g_assert_not_reached();
231 /* Silently ignore attempts to change blocking status of KILL or STOP */
232 sigdelset(&ts->signal_mask, SIGKILL);
233 sigdelset(&ts->signal_mask, SIGSTOP);
235 return 0;
238 #if !defined(TARGET_NIOS2)
239 /* Just set the guest's signal mask to the specified value; the
240 * caller is assumed to have called block_signals() already.
242 void set_sigmask(const sigset_t *set)
244 TaskState *ts = (TaskState *)thread_cpu->opaque;
246 ts->signal_mask = *set;
248 #endif
250 /* sigaltstack management */
252 int on_sig_stack(unsigned long sp)
254 return (sp - target_sigaltstack_used.ss_sp
255 < target_sigaltstack_used.ss_size);
258 int sas_ss_flags(unsigned long sp)
260 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
261 : on_sig_stack(sp) ? SS_ONSTACK : 0);
264 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
267 * This is the X/Open sanctioned signal stack switching.
269 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
270 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
272 return sp;
275 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
277 __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
278 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
279 __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
282 /* siginfo conversion */
284 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
285 const siginfo_t *info)
287 int sig = host_to_target_signal(info->si_signo);
288 int si_code = info->si_code;
289 int si_type;
290 tinfo->si_signo = sig;
291 tinfo->si_errno = 0;
292 tinfo->si_code = info->si_code;
294 /* This memset serves two purposes:
295 * (1) ensure we don't leak random junk to the guest later
296 * (2) placate false positives from gcc about fields
297 * being used uninitialized if it chooses to inline both this
298 * function and tswap_siginfo() into host_to_target_siginfo().
300 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
302 /* This is awkward, because we have to use a combination of
303 * the si_code and si_signo to figure out which of the union's
304 * members are valid. (Within the host kernel it is always possible
305 * to tell, but the kernel carefully avoids giving userspace the
306 * high 16 bits of si_code, so we don't have the information to
307 * do this the easy way...) We therefore make our best guess,
308 * bearing in mind that a guest can spoof most of the si_codes
309 * via rt_sigqueueinfo() if it likes.
311 * Once we have made our guess, we record it in the top 16 bits of
312 * the si_code, so that tswap_siginfo() later can use it.
313 * tswap_siginfo() will strip these top bits out before writing
314 * si_code to the guest (sign-extending the lower bits).
317 switch (si_code) {
318 case SI_USER:
319 case SI_TKILL:
320 case SI_KERNEL:
321 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
322 * These are the only unspoofable si_code values.
324 tinfo->_sifields._kill._pid = info->si_pid;
325 tinfo->_sifields._kill._uid = info->si_uid;
326 si_type = QEMU_SI_KILL;
327 break;
328 default:
329 /* Everything else is spoofable. Make best guess based on signal */
330 switch (sig) {
331 case TARGET_SIGCHLD:
332 tinfo->_sifields._sigchld._pid = info->si_pid;
333 tinfo->_sifields._sigchld._uid = info->si_uid;
334 tinfo->_sifields._sigchld._status
335 = host_to_target_waitstatus(info->si_status);
336 tinfo->_sifields._sigchld._utime = info->si_utime;
337 tinfo->_sifields._sigchld._stime = info->si_stime;
338 si_type = QEMU_SI_CHLD;
339 break;
340 case TARGET_SIGIO:
341 tinfo->_sifields._sigpoll._band = info->si_band;
342 tinfo->_sifields._sigpoll._fd = info->si_fd;
343 si_type = QEMU_SI_POLL;
344 break;
345 default:
346 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
347 tinfo->_sifields._rt._pid = info->si_pid;
348 tinfo->_sifields._rt._uid = info->si_uid;
349 /* XXX: potential problem if 64 bit */
350 tinfo->_sifields._rt._sigval.sival_ptr
351 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
352 si_type = QEMU_SI_RT;
353 break;
355 break;
358 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
361 void tswap_siginfo(target_siginfo_t *tinfo,
362 const target_siginfo_t *info)
364 int si_type = extract32(info->si_code, 16, 16);
365 int si_code = sextract32(info->si_code, 0, 16);
367 __put_user(info->si_signo, &tinfo->si_signo);
368 __put_user(info->si_errno, &tinfo->si_errno);
369 __put_user(si_code, &tinfo->si_code);
371 /* We can use our internal marker of which fields in the structure
372 * are valid, rather than duplicating the guesswork of
373 * host_to_target_siginfo_noswap() here.
375 switch (si_type) {
376 case QEMU_SI_KILL:
377 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
378 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
379 break;
380 case QEMU_SI_TIMER:
381 __put_user(info->_sifields._timer._timer1,
382 &tinfo->_sifields._timer._timer1);
383 __put_user(info->_sifields._timer._timer2,
384 &tinfo->_sifields._timer._timer2);
385 break;
386 case QEMU_SI_POLL:
387 __put_user(info->_sifields._sigpoll._band,
388 &tinfo->_sifields._sigpoll._band);
389 __put_user(info->_sifields._sigpoll._fd,
390 &tinfo->_sifields._sigpoll._fd);
391 break;
392 case QEMU_SI_FAULT:
393 __put_user(info->_sifields._sigfault._addr,
394 &tinfo->_sifields._sigfault._addr);
395 break;
396 case QEMU_SI_CHLD:
397 __put_user(info->_sifields._sigchld._pid,
398 &tinfo->_sifields._sigchld._pid);
399 __put_user(info->_sifields._sigchld._uid,
400 &tinfo->_sifields._sigchld._uid);
401 __put_user(info->_sifields._sigchld._status,
402 &tinfo->_sifields._sigchld._status);
403 __put_user(info->_sifields._sigchld._utime,
404 &tinfo->_sifields._sigchld._utime);
405 __put_user(info->_sifields._sigchld._stime,
406 &tinfo->_sifields._sigchld._stime);
407 break;
408 case QEMU_SI_RT:
409 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
410 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
411 __put_user(info->_sifields._rt._sigval.sival_ptr,
412 &tinfo->_sifields._rt._sigval.sival_ptr);
413 break;
414 default:
415 g_assert_not_reached();
419 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
421 target_siginfo_t tgt_tmp;
422 host_to_target_siginfo_noswap(&tgt_tmp, info);
423 tswap_siginfo(tinfo, &tgt_tmp);
426 /* XXX: we support only POSIX RT signals are used. */
427 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
428 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
430 /* This conversion is used only for the rt_sigqueueinfo syscall,
431 * and so we know that the _rt fields are the valid ones.
433 abi_ulong sival_ptr;
435 __get_user(info->si_signo, &tinfo->si_signo);
436 __get_user(info->si_errno, &tinfo->si_errno);
437 __get_user(info->si_code, &tinfo->si_code);
438 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
439 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
440 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
441 info->si_value.sival_ptr = (void *)(long)sival_ptr;
444 static int fatal_signal (int sig)
446 switch (sig) {
447 case TARGET_SIGCHLD:
448 case TARGET_SIGURG:
449 case TARGET_SIGWINCH:
450 /* Ignored by default. */
451 return 0;
452 case TARGET_SIGCONT:
453 case TARGET_SIGSTOP:
454 case TARGET_SIGTSTP:
455 case TARGET_SIGTTIN:
456 case TARGET_SIGTTOU:
457 /* Job control signals. */
458 return 0;
459 default:
460 return 1;
464 /* returns 1 if given signal should dump core if not handled */
465 static int core_dump_signal(int sig)
467 switch (sig) {
468 case TARGET_SIGABRT:
469 case TARGET_SIGFPE:
470 case TARGET_SIGILL:
471 case TARGET_SIGQUIT:
472 case TARGET_SIGSEGV:
473 case TARGET_SIGTRAP:
474 case TARGET_SIGBUS:
475 return (1);
476 default:
477 return (0);
481 void signal_init(void)
483 TaskState *ts = (TaskState *)thread_cpu->opaque;
484 struct sigaction act;
485 struct sigaction oact;
486 int i, j;
487 int host_sig;
489 /* generate signal conversion tables */
490 for(i = 1; i < _NSIG; i++) {
491 if (host_to_target_signal_table[i] == 0)
492 host_to_target_signal_table[i] = i;
494 for(i = 1; i < _NSIG; i++) {
495 j = host_to_target_signal_table[i];
496 target_to_host_signal_table[j] = i;
499 /* Set the signal mask from the host mask. */
500 sigprocmask(0, 0, &ts->signal_mask);
502 /* set all host signal handlers. ALL signals are blocked during
503 the handlers to serialize them. */
504 memset(sigact_table, 0, sizeof(sigact_table));
506 sigfillset(&act.sa_mask);
507 act.sa_flags = SA_SIGINFO;
508 act.sa_sigaction = host_signal_handler;
509 for(i = 1; i <= TARGET_NSIG; i++) {
510 #ifdef TARGET_GPROF
511 if (i == SIGPROF) {
512 continue;
514 #endif
515 host_sig = target_to_host_signal(i);
516 sigaction(host_sig, NULL, &oact);
517 if (oact.sa_sigaction == (void *)SIG_IGN) {
518 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
519 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
520 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
522 /* If there's already a handler installed then something has
523 gone horribly wrong, so don't even try to handle that case. */
524 /* Install some handlers for our own use. We need at least
525 SIGSEGV and SIGBUS, to detect exceptions. We can not just
526 trap all signals because it affects syscall interrupt
527 behavior. But do trap all default-fatal signals. */
528 if (fatal_signal (i))
529 sigaction(host_sig, &act, NULL);
533 /* Force a synchronously taken signal. The kernel force_sig() function
534 * also forces the signal to "not blocked, not ignored", but for QEMU
535 * that work is done in process_pending_signals().
537 void force_sig(int sig)
539 CPUState *cpu = thread_cpu;
540 CPUArchState *env = cpu->env_ptr;
541 target_siginfo_t info;
543 info.si_signo = sig;
544 info.si_errno = 0;
545 info.si_code = TARGET_SI_KERNEL;
546 info._sifields._kill._pid = 0;
547 info._sifields._kill._uid = 0;
548 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
551 /* Force a SIGSEGV if we couldn't write to memory trying to set
552 * up the signal frame. oldsig is the signal we were trying to handle
553 * at the point of failure.
555 #if !defined(TARGET_RISCV)
556 void force_sigsegv(int oldsig)
558 if (oldsig == SIGSEGV) {
559 /* Make sure we don't try to deliver the signal again; this will
560 * end up with handle_pending_signal() calling dump_core_and_abort().
562 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
564 force_sig(TARGET_SIGSEGV);
567 #endif
569 /* abort execution with signal */
570 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
572 CPUState *cpu = thread_cpu;
573 CPUArchState *env = cpu->env_ptr;
574 TaskState *ts = (TaskState *)cpu->opaque;
575 int host_sig, core_dumped = 0;
576 struct sigaction act;
578 host_sig = target_to_host_signal(target_sig);
579 trace_user_force_sig(env, target_sig, host_sig);
580 gdb_signalled(env, target_sig);
582 /* dump core if supported by target binary format */
583 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
584 stop_all_tasks();
585 core_dumped =
586 ((*ts->bprm->core_dump)(target_sig, env) == 0);
588 if (core_dumped) {
589 /* we already dumped the core of target process, we don't want
590 * a coredump of qemu itself */
591 struct rlimit nodump;
592 getrlimit(RLIMIT_CORE, &nodump);
593 nodump.rlim_cur=0;
594 setrlimit(RLIMIT_CORE, &nodump);
595 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
596 target_sig, strsignal(host_sig), "core dumped" );
599 /* The proper exit code for dying from an uncaught signal is
600 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
601 * a negative value. To get the proper exit code we need to
602 * actually die from an uncaught signal. Here the default signal
603 * handler is installed, we send ourself a signal and we wait for
604 * it to arrive. */
605 sigfillset(&act.sa_mask);
606 act.sa_handler = SIG_DFL;
607 act.sa_flags = 0;
608 sigaction(host_sig, &act, NULL);
610 /* For some reason raise(host_sig) doesn't send the signal when
611 * statically linked on x86-64. */
612 kill(getpid(), host_sig);
614 /* Make sure the signal isn't masked (just reuse the mask inside
615 of act) */
616 sigdelset(&act.sa_mask, host_sig);
617 sigsuspend(&act.sa_mask);
619 /* unreachable */
620 abort();
623 /* queue a signal so that it will be send to the virtual CPU as soon
624 as possible */
625 int queue_signal(CPUArchState *env, int sig, int si_type,
626 target_siginfo_t *info)
628 CPUState *cpu = env_cpu(env);
629 TaskState *ts = cpu->opaque;
631 trace_user_queue_signal(env, sig);
633 info->si_code = deposit32(info->si_code, 16, 16, si_type);
635 ts->sync_signal.info = *info;
636 ts->sync_signal.pending = sig;
637 /* signal that a new signal is pending */
638 atomic_set(&ts->signal_pending, 1);
639 return 1; /* indicates that the signal was queued */
642 #ifndef HAVE_SAFE_SYSCALL
643 static inline void rewind_if_in_safe_syscall(void *puc)
645 /* Default version: never rewind */
647 #endif
649 static void host_signal_handler(int host_signum, siginfo_t *info,
650 void *puc)
652 CPUArchState *env = thread_cpu->env_ptr;
653 CPUState *cpu = env_cpu(env);
654 TaskState *ts = cpu->opaque;
656 int sig;
657 target_siginfo_t tinfo;
658 ucontext_t *uc = puc;
659 struct emulated_sigtable *k;
661 /* the CPU emulator uses some host signals to detect exceptions,
662 we forward to it some signals */
663 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
664 && info->si_code > 0) {
665 if (cpu_signal_handler(host_signum, info, puc))
666 return;
669 /* get target signal number */
670 sig = host_to_target_signal(host_signum);
671 if (sig < 1 || sig > TARGET_NSIG)
672 return;
673 trace_user_host_signal(env, host_signum, sig);
675 rewind_if_in_safe_syscall(puc);
677 host_to_target_siginfo_noswap(&tinfo, info);
678 k = &ts->sigtab[sig - 1];
679 k->info = tinfo;
680 k->pending = sig;
681 ts->signal_pending = 1;
683 /* Block host signals until target signal handler entered. We
684 * can't block SIGSEGV or SIGBUS while we're executing guest
685 * code in case the guest code provokes one in the window between
686 * now and it getting out to the main loop. Signals will be
687 * unblocked again in process_pending_signals().
689 * WARNING: we cannot use sigfillset() here because the uc_sigmask
690 * field is a kernel sigset_t, which is much smaller than the
691 * libc sigset_t which sigfillset() operates on. Using sigfillset()
692 * would write 0xff bytes off the end of the structure and trash
693 * data on the struct.
694 * We can't use sizeof(uc->uc_sigmask) either, because the libc
695 * headers define the struct field with the wrong (too large) type.
697 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
698 sigdelset(&uc->uc_sigmask, SIGSEGV);
699 sigdelset(&uc->uc_sigmask, SIGBUS);
701 /* interrupt the virtual CPU as soon as possible */
702 cpu_exit(thread_cpu);
705 /* do_sigaltstack() returns target values and errnos. */
706 /* compare linux/kernel/signal.c:do_sigaltstack() */
707 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
709 int ret;
710 struct target_sigaltstack oss;
712 /* XXX: test errors */
713 if(uoss_addr)
715 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
716 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
717 __put_user(sas_ss_flags(sp), &oss.ss_flags);
720 if(uss_addr)
722 struct target_sigaltstack *uss;
723 struct target_sigaltstack ss;
724 size_t minstacksize = TARGET_MINSIGSTKSZ;
726 #if defined(TARGET_PPC64)
727 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
728 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
729 if (get_ppc64_abi(image) > 1) {
730 minstacksize = 4096;
732 #endif
734 ret = -TARGET_EFAULT;
735 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
736 goto out;
738 __get_user(ss.ss_sp, &uss->ss_sp);
739 __get_user(ss.ss_size, &uss->ss_size);
740 __get_user(ss.ss_flags, &uss->ss_flags);
741 unlock_user_struct(uss, uss_addr, 0);
743 ret = -TARGET_EPERM;
744 if (on_sig_stack(sp))
745 goto out;
747 ret = -TARGET_EINVAL;
748 if (ss.ss_flags != TARGET_SS_DISABLE
749 && ss.ss_flags != TARGET_SS_ONSTACK
750 && ss.ss_flags != 0)
751 goto out;
753 if (ss.ss_flags == TARGET_SS_DISABLE) {
754 ss.ss_size = 0;
755 ss.ss_sp = 0;
756 } else {
757 ret = -TARGET_ENOMEM;
758 if (ss.ss_size < minstacksize) {
759 goto out;
763 target_sigaltstack_used.ss_sp = ss.ss_sp;
764 target_sigaltstack_used.ss_size = ss.ss_size;
767 if (uoss_addr) {
768 ret = -TARGET_EFAULT;
769 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
770 goto out;
773 ret = 0;
774 out:
775 return ret;
778 /* do_sigaction() return target values and host errnos */
779 int do_sigaction(int sig, const struct target_sigaction *act,
780 struct target_sigaction *oact)
782 struct target_sigaction *k;
783 struct sigaction act1;
784 int host_sig;
785 int ret = 0;
787 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
788 return -TARGET_EINVAL;
791 if (block_signals()) {
792 return -TARGET_ERESTARTSYS;
795 k = &sigact_table[sig - 1];
796 if (oact) {
797 __put_user(k->_sa_handler, &oact->_sa_handler);
798 __put_user(k->sa_flags, &oact->sa_flags);
799 #ifdef TARGET_ARCH_HAS_SA_RESTORER
800 __put_user(k->sa_restorer, &oact->sa_restorer);
801 #endif
802 /* Not swapped. */
803 oact->sa_mask = k->sa_mask;
805 if (act) {
806 /* FIXME: This is not threadsafe. */
807 __get_user(k->_sa_handler, &act->_sa_handler);
808 __get_user(k->sa_flags, &act->sa_flags);
809 #ifdef TARGET_ARCH_HAS_SA_RESTORER
810 __get_user(k->sa_restorer, &act->sa_restorer);
811 #endif
812 /* To be swapped in target_to_host_sigset. */
813 k->sa_mask = act->sa_mask;
815 /* we update the host linux signal state */
816 host_sig = target_to_host_signal(sig);
817 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
818 sigfillset(&act1.sa_mask);
819 act1.sa_flags = SA_SIGINFO;
820 if (k->sa_flags & TARGET_SA_RESTART)
821 act1.sa_flags |= SA_RESTART;
822 /* NOTE: it is important to update the host kernel signal
823 ignore state to avoid getting unexpected interrupted
824 syscalls */
825 if (k->_sa_handler == TARGET_SIG_IGN) {
826 act1.sa_sigaction = (void *)SIG_IGN;
827 } else if (k->_sa_handler == TARGET_SIG_DFL) {
828 if (fatal_signal (sig))
829 act1.sa_sigaction = host_signal_handler;
830 else
831 act1.sa_sigaction = (void *)SIG_DFL;
832 } else {
833 act1.sa_sigaction = host_signal_handler;
835 ret = sigaction(host_sig, &act1, NULL);
838 return ret;
841 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
842 struct emulated_sigtable *k)
844 CPUState *cpu = env_cpu(cpu_env);
845 abi_ulong handler;
846 sigset_t set;
847 target_sigset_t target_old_set;
848 struct target_sigaction *sa;
849 TaskState *ts = cpu->opaque;
851 trace_user_handle_signal(cpu_env, sig);
852 /* dequeue signal */
853 k->pending = 0;
855 sig = gdb_handlesig(cpu, sig);
856 if (!sig) {
857 sa = NULL;
858 handler = TARGET_SIG_IGN;
859 } else {
860 sa = &sigact_table[sig - 1];
861 handler = sa->_sa_handler;
864 if (do_strace) {
865 print_taken_signal(sig, &k->info);
868 if (handler == TARGET_SIG_DFL) {
869 /* default handler : ignore some signal. The other are job control or fatal */
870 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
871 kill(getpid(),SIGSTOP);
872 } else if (sig != TARGET_SIGCHLD &&
873 sig != TARGET_SIGURG &&
874 sig != TARGET_SIGWINCH &&
875 sig != TARGET_SIGCONT) {
876 dump_core_and_abort(sig);
878 } else if (handler == TARGET_SIG_IGN) {
879 /* ignore sig */
880 } else if (handler == TARGET_SIG_ERR) {
881 dump_core_and_abort(sig);
882 } else {
883 /* compute the blocked signals during the handler execution */
884 sigset_t *blocked_set;
886 target_to_host_sigset(&set, &sa->sa_mask);
887 /* SA_NODEFER indicates that the current signal should not be
888 blocked during the handler */
889 if (!(sa->sa_flags & TARGET_SA_NODEFER))
890 sigaddset(&set, target_to_host_signal(sig));
892 /* save the previous blocked signal state to restore it at the
893 end of the signal execution (see do_sigreturn) */
894 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
896 /* block signals in the handler */
897 blocked_set = ts->in_sigsuspend ?
898 &ts->sigsuspend_mask : &ts->signal_mask;
899 sigorset(&ts->signal_mask, blocked_set, &set);
900 ts->in_sigsuspend = 0;
902 /* if the CPU is in VM86 mode, we restore the 32 bit values */
903 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
905 CPUX86State *env = cpu_env;
906 if (env->eflags & VM_MASK)
907 save_v86_state(env);
909 #endif
910 /* prepare the stack frame of the virtual CPU */
911 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
912 if (sa->sa_flags & TARGET_SA_SIGINFO) {
913 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
914 } else {
915 setup_frame(sig, sa, &target_old_set, cpu_env);
917 #else
918 /* These targets do not have traditional signals. */
919 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
920 #endif
921 if (sa->sa_flags & TARGET_SA_RESETHAND) {
922 sa->_sa_handler = TARGET_SIG_DFL;
927 void process_pending_signals(CPUArchState *cpu_env)
929 CPUState *cpu = env_cpu(cpu_env);
930 int sig;
931 TaskState *ts = cpu->opaque;
932 sigset_t set;
933 sigset_t *blocked_set;
935 while (atomic_read(&ts->signal_pending)) {
936 /* FIXME: This is not threadsafe. */
937 sigfillset(&set);
938 sigprocmask(SIG_SETMASK, &set, 0);
940 restart_scan:
941 sig = ts->sync_signal.pending;
942 if (sig) {
943 /* Synchronous signals are forced,
944 * see force_sig_info() and callers in Linux
945 * Note that not all of our queue_signal() calls in QEMU correspond
946 * to force_sig_info() calls in Linux (some are send_sig_info()).
947 * However it seems like a kernel bug to me to allow the process
948 * to block a synchronous signal since it could then just end up
949 * looping round and round indefinitely.
951 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
952 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
953 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
954 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
957 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
960 for (sig = 1; sig <= TARGET_NSIG; sig++) {
961 blocked_set = ts->in_sigsuspend ?
962 &ts->sigsuspend_mask : &ts->signal_mask;
964 if (ts->sigtab[sig - 1].pending &&
965 (!sigismember(blocked_set,
966 target_to_host_signal_table[sig]))) {
967 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
968 /* Restart scan from the beginning, as handle_pending_signal
969 * might have resulted in a new synchronous signal (eg SIGSEGV).
971 goto restart_scan;
975 /* if no signal is pending, unblock signals and recheck (the act
976 * of unblocking might cause us to take another host signal which
977 * will set signal_pending again).
979 atomic_set(&ts->signal_pending, 0);
980 ts->in_sigsuspend = 0;
981 set = ts->signal_mask;
982 sigdelset(&set, SIGSEGV);
983 sigdelset(&set, SIGBUS);
984 sigprocmask(SIG_SETMASK, &set, 0);
986 ts->in_sigsuspend = 0;