2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
27 #include "signal-common.h"
29 struct target_sigaltstack target_sigaltstack_used
= {
32 .ss_flags
= TARGET_SS_DISABLE
,
35 static struct target_sigaction sigact_table
[TARGET_NSIG
];
37 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
40 static uint8_t host_to_target_signal_table
[_NSIG
] = {
41 [SIGHUP
] = TARGET_SIGHUP
,
42 [SIGINT
] = TARGET_SIGINT
,
43 [SIGQUIT
] = TARGET_SIGQUIT
,
44 [SIGILL
] = TARGET_SIGILL
,
45 [SIGTRAP
] = TARGET_SIGTRAP
,
46 [SIGABRT
] = TARGET_SIGABRT
,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS
] = TARGET_SIGBUS
,
49 [SIGFPE
] = TARGET_SIGFPE
,
50 [SIGKILL
] = TARGET_SIGKILL
,
51 [SIGUSR1
] = TARGET_SIGUSR1
,
52 [SIGSEGV
] = TARGET_SIGSEGV
,
53 [SIGUSR2
] = TARGET_SIGUSR2
,
54 [SIGPIPE
] = TARGET_SIGPIPE
,
55 [SIGALRM
] = TARGET_SIGALRM
,
56 [SIGTERM
] = TARGET_SIGTERM
,
58 [SIGSTKFLT
] = TARGET_SIGSTKFLT
,
60 [SIGCHLD
] = TARGET_SIGCHLD
,
61 [SIGCONT
] = TARGET_SIGCONT
,
62 [SIGSTOP
] = TARGET_SIGSTOP
,
63 [SIGTSTP
] = TARGET_SIGTSTP
,
64 [SIGTTIN
] = TARGET_SIGTTIN
,
65 [SIGTTOU
] = TARGET_SIGTTOU
,
66 [SIGURG
] = TARGET_SIGURG
,
67 [SIGXCPU
] = TARGET_SIGXCPU
,
68 [SIGXFSZ
] = TARGET_SIGXFSZ
,
69 [SIGVTALRM
] = TARGET_SIGVTALRM
,
70 [SIGPROF
] = TARGET_SIGPROF
,
71 [SIGWINCH
] = TARGET_SIGWINCH
,
72 [SIGIO
] = TARGET_SIGIO
,
73 [SIGPWR
] = TARGET_SIGPWR
,
74 [SIGSYS
] = TARGET_SIGSYS
,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN
] = __SIGRTMAX
,
81 [__SIGRTMAX
] = __SIGRTMIN
,
83 static uint8_t target_to_host_signal_table
[_NSIG
];
85 int host_to_target_signal(int sig
)
87 if (sig
< 0 || sig
>= _NSIG
)
89 return host_to_target_signal_table
[sig
];
92 int target_to_host_signal(int sig
)
94 if (sig
< 0 || sig
>= _NSIG
)
96 return target_to_host_signal_table
[sig
];
99 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
102 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
103 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
106 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
109 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
110 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
113 void host_to_target_sigset_internal(target_sigset_t
*d
,
117 target_sigemptyset(d
);
118 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
119 if (sigismember(s
, i
)) {
120 target_sigaddset(d
, host_to_target_signal(i
));
125 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
130 host_to_target_sigset_internal(&d1
, s
);
131 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
132 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
135 void target_to_host_sigset_internal(sigset_t
*d
,
136 const target_sigset_t
*s
)
140 for (i
= 1; i
<= TARGET_NSIG
; i
++) {
141 if (target_sigismember(s
, i
)) {
142 sigaddset(d
, target_to_host_signal(i
));
147 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
152 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
153 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
154 target_to_host_sigset_internal(d
, &s1
);
157 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
158 const sigset_t
*sigset
)
161 host_to_target_sigset(&d
, sigset
);
162 *old_sigset
= d
.sig
[0];
165 void target_to_host_old_sigset(sigset_t
*sigset
,
166 const abi_ulong
*old_sigset
)
171 d
.sig
[0] = *old_sigset
;
172 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
174 target_to_host_sigset(sigset
, &d
);
177 int block_signals(void)
179 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
182 /* It's OK to block everything including SIGSEGV, because we won't
183 * run any further guest code before unblocking signals in
184 * process_pending_signals().
187 sigprocmask(SIG_SETMASK
, &set
, 0);
189 return atomic_xchg(&ts
->signal_pending
, 1);
192 /* Wrapper for sigprocmask function
193 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
194 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
195 * a signal was already pending and the syscall must be restarted, or
197 * If set is NULL, this is guaranteed not to fail.
199 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
201 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
204 *oldset
= ts
->signal_mask
;
210 if (block_signals()) {
211 return -TARGET_ERESTARTSYS
;
216 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
219 for (i
= 1; i
<= NSIG
; ++i
) {
220 if (sigismember(set
, i
)) {
221 sigdelset(&ts
->signal_mask
, i
);
226 ts
->signal_mask
= *set
;
229 g_assert_not_reached();
232 /* Silently ignore attempts to change blocking status of KILL or STOP */
233 sigdelset(&ts
->signal_mask
, SIGKILL
);
234 sigdelset(&ts
->signal_mask
, SIGSTOP
);
239 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
240 /* Just set the guest's signal mask to the specified value; the
241 * caller is assumed to have called block_signals() already.
243 void set_sigmask(const sigset_t
*set
)
245 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
247 ts
->signal_mask
= *set
;
251 /* sigaltstack management */
253 int on_sig_stack(unsigned long sp
)
255 return (sp
- target_sigaltstack_used
.ss_sp
256 < target_sigaltstack_used
.ss_size
);
259 int sas_ss_flags(unsigned long sp
)
261 return (target_sigaltstack_used
.ss_size
== 0 ? SS_DISABLE
262 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
265 abi_ulong
target_sigsp(abi_ulong sp
, struct target_sigaction
*ka
)
268 * This is the X/Open sanctioned signal stack switching.
270 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
271 return target_sigaltstack_used
.ss_sp
+ target_sigaltstack_used
.ss_size
;
276 void target_save_altstack(target_stack_t
*uss
, CPUArchState
*env
)
278 __put_user(target_sigaltstack_used
.ss_sp
, &uss
->ss_sp
);
279 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &uss
->ss_flags
);
280 __put_user(target_sigaltstack_used
.ss_size
, &uss
->ss_size
);
283 /* siginfo conversion */
285 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
286 const siginfo_t
*info
)
288 int sig
= host_to_target_signal(info
->si_signo
);
289 int si_code
= info
->si_code
;
291 tinfo
->si_signo
= sig
;
293 tinfo
->si_code
= info
->si_code
;
295 /* This memset serves two purposes:
296 * (1) ensure we don't leak random junk to the guest later
297 * (2) placate false positives from gcc about fields
298 * being used uninitialized if it chooses to inline both this
299 * function and tswap_siginfo() into host_to_target_siginfo().
301 memset(tinfo
->_sifields
._pad
, 0, sizeof(tinfo
->_sifields
._pad
));
303 /* This is awkward, because we have to use a combination of
304 * the si_code and si_signo to figure out which of the union's
305 * members are valid. (Within the host kernel it is always possible
306 * to tell, but the kernel carefully avoids giving userspace the
307 * high 16 bits of si_code, so we don't have the information to
308 * do this the easy way...) We therefore make our best guess,
309 * bearing in mind that a guest can spoof most of the si_codes
310 * via rt_sigqueueinfo() if it likes.
312 * Once we have made our guess, we record it in the top 16 bits of
313 * the si_code, so that tswap_siginfo() later can use it.
314 * tswap_siginfo() will strip these top bits out before writing
315 * si_code to the guest (sign-extending the lower bits).
322 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
323 * These are the only unspoofable si_code values.
325 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
326 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
327 si_type
= QEMU_SI_KILL
;
330 /* Everything else is spoofable. Make best guess based on signal */
333 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
334 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
335 tinfo
->_sifields
._sigchld
._status
336 = host_to_target_waitstatus(info
->si_status
);
337 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
338 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
339 si_type
= QEMU_SI_CHLD
;
342 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
343 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
344 si_type
= QEMU_SI_POLL
;
347 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
348 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
349 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
350 /* XXX: potential problem if 64 bit */
351 tinfo
->_sifields
._rt
._sigval
.sival_ptr
352 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
353 si_type
= QEMU_SI_RT
;
359 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
362 void tswap_siginfo(target_siginfo_t
*tinfo
,
363 const target_siginfo_t
*info
)
365 int si_type
= extract32(info
->si_code
, 16, 16);
366 int si_code
= sextract32(info
->si_code
, 0, 16);
368 __put_user(info
->si_signo
, &tinfo
->si_signo
);
369 __put_user(info
->si_errno
, &tinfo
->si_errno
);
370 __put_user(si_code
, &tinfo
->si_code
);
372 /* We can use our internal marker of which fields in the structure
373 * are valid, rather than duplicating the guesswork of
374 * host_to_target_siginfo_noswap() here.
378 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
379 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
382 __put_user(info
->_sifields
._timer
._timer1
,
383 &tinfo
->_sifields
._timer
._timer1
);
384 __put_user(info
->_sifields
._timer
._timer2
,
385 &tinfo
->_sifields
._timer
._timer2
);
388 __put_user(info
->_sifields
._sigpoll
._band
,
389 &tinfo
->_sifields
._sigpoll
._band
);
390 __put_user(info
->_sifields
._sigpoll
._fd
,
391 &tinfo
->_sifields
._sigpoll
._fd
);
394 __put_user(info
->_sifields
._sigfault
._addr
,
395 &tinfo
->_sifields
._sigfault
._addr
);
398 __put_user(info
->_sifields
._sigchld
._pid
,
399 &tinfo
->_sifields
._sigchld
._pid
);
400 __put_user(info
->_sifields
._sigchld
._uid
,
401 &tinfo
->_sifields
._sigchld
._uid
);
402 __put_user(info
->_sifields
._sigchld
._status
,
403 &tinfo
->_sifields
._sigchld
._status
);
404 __put_user(info
->_sifields
._sigchld
._utime
,
405 &tinfo
->_sifields
._sigchld
._utime
);
406 __put_user(info
->_sifields
._sigchld
._stime
,
407 &tinfo
->_sifields
._sigchld
._stime
);
410 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
411 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
412 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
413 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
416 g_assert_not_reached();
420 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
422 target_siginfo_t tgt_tmp
;
423 host_to_target_siginfo_noswap(&tgt_tmp
, info
);
424 tswap_siginfo(tinfo
, &tgt_tmp
);
427 /* XXX: we support only POSIX RT signals are used. */
428 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
429 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
431 /* This conversion is used only for the rt_sigqueueinfo syscall,
432 * and so we know that the _rt fields are the valid ones.
436 __get_user(info
->si_signo
, &tinfo
->si_signo
);
437 __get_user(info
->si_errno
, &tinfo
->si_errno
);
438 __get_user(info
->si_code
, &tinfo
->si_code
);
439 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
440 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
441 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
442 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
445 static int fatal_signal (int sig
)
450 case TARGET_SIGWINCH
:
451 /* Ignored by default. */
458 /* Job control signals. */
465 /* returns 1 if given signal should dump core if not handled */
466 static int core_dump_signal(int sig
)
482 void signal_init(void)
484 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
485 struct sigaction act
;
486 struct sigaction oact
;
490 /* generate signal conversion tables */
491 for(i
= 1; i
< _NSIG
; i
++) {
492 if (host_to_target_signal_table
[i
] == 0)
493 host_to_target_signal_table
[i
] = i
;
495 for(i
= 1; i
< _NSIG
; i
++) {
496 j
= host_to_target_signal_table
[i
];
497 target_to_host_signal_table
[j
] = i
;
500 /* Set the signal mask from the host mask. */
501 sigprocmask(0, 0, &ts
->signal_mask
);
503 /* set all host signal handlers. ALL signals are blocked during
504 the handlers to serialize them. */
505 memset(sigact_table
, 0, sizeof(sigact_table
));
507 sigfillset(&act
.sa_mask
);
508 act
.sa_flags
= SA_SIGINFO
;
509 act
.sa_sigaction
= host_signal_handler
;
510 for(i
= 1; i
<= TARGET_NSIG
; i
++) {
511 host_sig
= target_to_host_signal(i
);
512 sigaction(host_sig
, NULL
, &oact
);
513 if (oact
.sa_sigaction
== (void *)SIG_IGN
) {
514 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_IGN
;
515 } else if (oact
.sa_sigaction
== (void *)SIG_DFL
) {
516 sigact_table
[i
- 1]._sa_handler
= TARGET_SIG_DFL
;
518 /* If there's already a handler installed then something has
519 gone horribly wrong, so don't even try to handle that case. */
520 /* Install some handlers for our own use. We need at least
521 SIGSEGV and SIGBUS, to detect exceptions. We can not just
522 trap all signals because it affects syscall interrupt
523 behavior. But do trap all default-fatal signals. */
524 if (fatal_signal (i
))
525 sigaction(host_sig
, &act
, NULL
);
529 /* Force a synchronously taken signal. The kernel force_sig() function
530 * also forces the signal to "not blocked, not ignored", but for QEMU
531 * that work is done in process_pending_signals().
533 void force_sig(int sig
)
535 CPUState
*cpu
= thread_cpu
;
536 CPUArchState
*env
= cpu
->env_ptr
;
537 target_siginfo_t info
;
541 info
.si_code
= TARGET_SI_KERNEL
;
542 info
._sifields
._kill
._pid
= 0;
543 info
._sifields
._kill
._uid
= 0;
544 queue_signal(env
, info
.si_signo
, QEMU_SI_KILL
, &info
);
547 /* Force a SIGSEGV if we couldn't write to memory trying to set
548 * up the signal frame. oldsig is the signal we were trying to handle
549 * at the point of failure.
551 #if !defined(TARGET_RISCV)
552 void force_sigsegv(int oldsig
)
554 if (oldsig
== SIGSEGV
) {
555 /* Make sure we don't try to deliver the signal again; this will
556 * end up with handle_pending_signal() calling dump_core_and_abort().
558 sigact_table
[oldsig
- 1]._sa_handler
= TARGET_SIG_DFL
;
560 force_sig(TARGET_SIGSEGV
);
565 /* abort execution with signal */
566 static void QEMU_NORETURN
dump_core_and_abort(int target_sig
)
568 CPUState
*cpu
= thread_cpu
;
569 CPUArchState
*env
= cpu
->env_ptr
;
570 TaskState
*ts
= (TaskState
*)cpu
->opaque
;
571 int host_sig
, core_dumped
= 0;
572 struct sigaction act
;
574 host_sig
= target_to_host_signal(target_sig
);
575 trace_user_force_sig(env
, target_sig
, host_sig
);
576 gdb_signalled(env
, target_sig
);
578 /* dump core if supported by target binary format */
579 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
582 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
585 /* we already dumped the core of target process, we don't want
586 * a coredump of qemu itself */
587 struct rlimit nodump
;
588 getrlimit(RLIMIT_CORE
, &nodump
);
590 setrlimit(RLIMIT_CORE
, &nodump
);
591 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
592 target_sig
, strsignal(host_sig
), "core dumped" );
595 /* The proper exit code for dying from an uncaught signal is
596 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
597 * a negative value. To get the proper exit code we need to
598 * actually die from an uncaught signal. Here the default signal
599 * handler is installed, we send ourself a signal and we wait for
601 sigfillset(&act
.sa_mask
);
602 act
.sa_handler
= SIG_DFL
;
604 sigaction(host_sig
, &act
, NULL
);
606 /* For some reason raise(host_sig) doesn't send the signal when
607 * statically linked on x86-64. */
608 kill(getpid(), host_sig
);
610 /* Make sure the signal isn't masked (just reuse the mask inside
612 sigdelset(&act
.sa_mask
, host_sig
);
613 sigsuspend(&act
.sa_mask
);
619 /* queue a signal so that it will be send to the virtual CPU as soon
621 int queue_signal(CPUArchState
*env
, int sig
, int si_type
,
622 target_siginfo_t
*info
)
624 CPUState
*cpu
= ENV_GET_CPU(env
);
625 TaskState
*ts
= cpu
->opaque
;
627 trace_user_queue_signal(env
, sig
);
629 info
->si_code
= deposit32(info
->si_code
, 16, 16, si_type
);
631 ts
->sync_signal
.info
= *info
;
632 ts
->sync_signal
.pending
= sig
;
633 /* signal that a new signal is pending */
634 atomic_set(&ts
->signal_pending
, 1);
635 return 1; /* indicates that the signal was queued */
638 #ifndef HAVE_SAFE_SYSCALL
639 static inline void rewind_if_in_safe_syscall(void *puc
)
641 /* Default version: never rewind */
645 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
648 CPUArchState
*env
= thread_cpu
->env_ptr
;
649 CPUState
*cpu
= ENV_GET_CPU(env
);
650 TaskState
*ts
= cpu
->opaque
;
653 target_siginfo_t tinfo
;
654 ucontext_t
*uc
= puc
;
655 struct emulated_sigtable
*k
;
657 /* the CPU emulator uses some host signals to detect exceptions,
658 we forward to it some signals */
659 if ((host_signum
== SIGSEGV
|| host_signum
== SIGBUS
)
660 && info
->si_code
> 0) {
661 if (cpu_signal_handler(host_signum
, info
, puc
))
665 /* get target signal number */
666 sig
= host_to_target_signal(host_signum
);
667 if (sig
< 1 || sig
> TARGET_NSIG
)
669 trace_user_host_signal(env
, host_signum
, sig
);
671 rewind_if_in_safe_syscall(puc
);
673 host_to_target_siginfo_noswap(&tinfo
, info
);
674 k
= &ts
->sigtab
[sig
- 1];
677 ts
->signal_pending
= 1;
679 /* Block host signals until target signal handler entered. We
680 * can't block SIGSEGV or SIGBUS while we're executing guest
681 * code in case the guest code provokes one in the window between
682 * now and it getting out to the main loop. Signals will be
683 * unblocked again in process_pending_signals().
685 * WARNING: we cannot use sigfillset() here because the uc_sigmask
686 * field is a kernel sigset_t, which is much smaller than the
687 * libc sigset_t which sigfillset() operates on. Using sigfillset()
688 * would write 0xff bytes off the end of the structure and trash
689 * data on the struct.
690 * We can't use sizeof(uc->uc_sigmask) either, because the libc
691 * headers define the struct field with the wrong (too large) type.
693 memset(&uc
->uc_sigmask
, 0xff, SIGSET_T_SIZE
);
694 sigdelset(&uc
->uc_sigmask
, SIGSEGV
);
695 sigdelset(&uc
->uc_sigmask
, SIGBUS
);
697 /* interrupt the virtual CPU as soon as possible */
698 cpu_exit(thread_cpu
);
701 /* do_sigaltstack() returns target values and errnos. */
702 /* compare linux/kernel/signal.c:do_sigaltstack() */
703 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
, abi_ulong sp
)
706 struct target_sigaltstack oss
;
708 /* XXX: test errors */
711 __put_user(target_sigaltstack_used
.ss_sp
, &oss
.ss_sp
);
712 __put_user(target_sigaltstack_used
.ss_size
, &oss
.ss_size
);
713 __put_user(sas_ss_flags(sp
), &oss
.ss_flags
);
718 struct target_sigaltstack
*uss
;
719 struct target_sigaltstack ss
;
720 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
722 #if defined(TARGET_PPC64)
723 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
724 struct image_info
*image
= ((TaskState
*)thread_cpu
->opaque
)->info
;
725 if (get_ppc64_abi(image
) > 1) {
730 ret
= -TARGET_EFAULT
;
731 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
734 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
735 __get_user(ss
.ss_size
, &uss
->ss_size
);
736 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
737 unlock_user_struct(uss
, uss_addr
, 0);
740 if (on_sig_stack(sp
))
743 ret
= -TARGET_EINVAL
;
744 if (ss
.ss_flags
!= TARGET_SS_DISABLE
745 && ss
.ss_flags
!= TARGET_SS_ONSTACK
749 if (ss
.ss_flags
== TARGET_SS_DISABLE
) {
753 ret
= -TARGET_ENOMEM
;
754 if (ss
.ss_size
< minstacksize
) {
759 target_sigaltstack_used
.ss_sp
= ss
.ss_sp
;
760 target_sigaltstack_used
.ss_size
= ss
.ss_size
;
764 ret
= -TARGET_EFAULT
;
765 if (copy_to_user(uoss_addr
, &oss
, sizeof(oss
)))
774 /* do_sigaction() return target values and host errnos */
775 int do_sigaction(int sig
, const struct target_sigaction
*act
,
776 struct target_sigaction
*oact
)
778 struct target_sigaction
*k
;
779 struct sigaction act1
;
783 if (sig
< 1 || sig
> TARGET_NSIG
|| sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
) {
784 return -TARGET_EINVAL
;
787 if (block_signals()) {
788 return -TARGET_ERESTARTSYS
;
791 k
= &sigact_table
[sig
- 1];
793 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
794 __put_user(k
->sa_flags
, &oact
->sa_flags
);
795 #ifdef TARGET_ARCH_HAS_SA_RESTORER
796 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
799 oact
->sa_mask
= k
->sa_mask
;
802 /* FIXME: This is not threadsafe. */
803 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
804 __get_user(k
->sa_flags
, &act
->sa_flags
);
805 #ifdef TARGET_ARCH_HAS_SA_RESTORER
806 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
808 /* To be swapped in target_to_host_sigset. */
809 k
->sa_mask
= act
->sa_mask
;
811 /* we update the host linux signal state */
812 host_sig
= target_to_host_signal(sig
);
813 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
814 sigfillset(&act1
.sa_mask
);
815 act1
.sa_flags
= SA_SIGINFO
;
816 if (k
->sa_flags
& TARGET_SA_RESTART
)
817 act1
.sa_flags
|= SA_RESTART
;
818 /* NOTE: it is important to update the host kernel signal
819 ignore state to avoid getting unexpected interrupted
821 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
822 act1
.sa_sigaction
= (void *)SIG_IGN
;
823 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
824 if (fatal_signal (sig
))
825 act1
.sa_sigaction
= host_signal_handler
;
827 act1
.sa_sigaction
= (void *)SIG_DFL
;
829 act1
.sa_sigaction
= host_signal_handler
;
831 ret
= sigaction(host_sig
, &act1
, NULL
);
837 static void handle_pending_signal(CPUArchState
*cpu_env
, int sig
,
838 struct emulated_sigtable
*k
)
840 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
843 target_sigset_t target_old_set
;
844 struct target_sigaction
*sa
;
845 TaskState
*ts
= cpu
->opaque
;
847 trace_user_handle_signal(cpu_env
, sig
);
851 sig
= gdb_handlesig(cpu
, sig
);
854 handler
= TARGET_SIG_IGN
;
856 sa
= &sigact_table
[sig
- 1];
857 handler
= sa
->_sa_handler
;
861 print_taken_signal(sig
, &k
->info
);
864 if (handler
== TARGET_SIG_DFL
) {
865 /* default handler : ignore some signal. The other are job control or fatal */
866 if (sig
== TARGET_SIGTSTP
|| sig
== TARGET_SIGTTIN
|| sig
== TARGET_SIGTTOU
) {
867 kill(getpid(),SIGSTOP
);
868 } else if (sig
!= TARGET_SIGCHLD
&&
869 sig
!= TARGET_SIGURG
&&
870 sig
!= TARGET_SIGWINCH
&&
871 sig
!= TARGET_SIGCONT
) {
872 dump_core_and_abort(sig
);
874 } else if (handler
== TARGET_SIG_IGN
) {
876 } else if (handler
== TARGET_SIG_ERR
) {
877 dump_core_and_abort(sig
);
879 /* compute the blocked signals during the handler execution */
880 sigset_t
*blocked_set
;
882 target_to_host_sigset(&set
, &sa
->sa_mask
);
883 /* SA_NODEFER indicates that the current signal should not be
884 blocked during the handler */
885 if (!(sa
->sa_flags
& TARGET_SA_NODEFER
))
886 sigaddset(&set
, target_to_host_signal(sig
));
888 /* save the previous blocked signal state to restore it at the
889 end of the signal execution (see do_sigreturn) */
890 host_to_target_sigset_internal(&target_old_set
, &ts
->signal_mask
);
892 /* block signals in the handler */
893 blocked_set
= ts
->in_sigsuspend
?
894 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
895 sigorset(&ts
->signal_mask
, blocked_set
, &set
);
896 ts
->in_sigsuspend
= 0;
898 /* if the CPU is in VM86 mode, we restore the 32 bit values */
899 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
901 CPUX86State
*env
= cpu_env
;
902 if (env
->eflags
& VM_MASK
)
906 /* prepare the stack frame of the virtual CPU */
907 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
908 if (sa
->sa_flags
& TARGET_SA_SIGINFO
) {
909 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
911 setup_frame(sig
, sa
, &target_old_set
, cpu_env
);
914 /* These targets do not have traditional signals. */
915 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
917 if (sa
->sa_flags
& TARGET_SA_RESETHAND
) {
918 sa
->_sa_handler
= TARGET_SIG_DFL
;
923 void process_pending_signals(CPUArchState
*cpu_env
)
925 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
927 TaskState
*ts
= cpu
->opaque
;
929 sigset_t
*blocked_set
;
931 while (atomic_read(&ts
->signal_pending
)) {
932 /* FIXME: This is not threadsafe. */
934 sigprocmask(SIG_SETMASK
, &set
, 0);
937 sig
= ts
->sync_signal
.pending
;
939 /* Synchronous signals are forced,
940 * see force_sig_info() and callers in Linux
941 * Note that not all of our queue_signal() calls in QEMU correspond
942 * to force_sig_info() calls in Linux (some are send_sig_info()).
943 * However it seems like a kernel bug to me to allow the process
944 * to block a synchronous signal since it could then just end up
945 * looping round and round indefinitely.
947 if (sigismember(&ts
->signal_mask
, target_to_host_signal_table
[sig
])
948 || sigact_table
[sig
- 1]._sa_handler
== TARGET_SIG_IGN
) {
949 sigdelset(&ts
->signal_mask
, target_to_host_signal_table
[sig
]);
950 sigact_table
[sig
- 1]._sa_handler
= TARGET_SIG_DFL
;
953 handle_pending_signal(cpu_env
, sig
, &ts
->sync_signal
);
956 for (sig
= 1; sig
<= TARGET_NSIG
; sig
++) {
957 blocked_set
= ts
->in_sigsuspend
?
958 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
960 if (ts
->sigtab
[sig
- 1].pending
&&
961 (!sigismember(blocked_set
,
962 target_to_host_signal_table
[sig
]))) {
963 handle_pending_signal(cpu_env
, sig
, &ts
->sigtab
[sig
- 1]);
964 /* Restart scan from the beginning, as handle_pending_signal
965 * might have resulted in a new synchronous signal (eg SIGSEGV).
971 /* if no signal is pending, unblock signals and recheck (the act
972 * of unblocking might cause us to take another host signal which
973 * will set signal_pending again).
975 atomic_set(&ts
->signal_pending
, 0);
976 ts
->in_sigsuspend
= 0;
977 set
= ts
->signal_mask
;
978 sigdelset(&set
, SIGSEGV
);
979 sigdelset(&set
, SIGBUS
);
980 sigprocmask(SIG_SETMASK
, &set
, 0);
982 ts
->in_sigsuspend
= 0;