2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "gdbstub/user.h"
22 #include "hw/core/tcg-cpu-ops.h"
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
28 #include "user-internals.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
34 #include "user/safe-syscall.h"
37 static struct target_sigaction sigact_table
[TARGET_NSIG
];
39 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
42 /* Fallback addresses into sigtramp page. */
43 abi_ulong default_sigreturn
;
44 abi_ulong default_rt_sigreturn
;
47 * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
48 * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
49 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
50 * a process exists without sending it a signal.
53 QEMU_BUILD_BUG_ON(__SIGRTMAX
+ 1 != _NSIG
);
55 static uint8_t host_to_target_signal_table
[_NSIG
] = {
56 #define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
61 static uint8_t target_to_host_signal_table
[TARGET_NSIG
+ 1];
63 /* valid sig is between 1 and _NSIG - 1 */
64 int host_to_target_signal(int sig
)
70 return TARGET_NSIG
+ 1;
72 return host_to_target_signal_table
[sig
];
75 /* valid sig is between 1 and TARGET_NSIG */
76 int target_to_host_signal(int sig
)
81 if (sig
> TARGET_NSIG
) {
84 return target_to_host_signal_table
[sig
];
87 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
90 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
91 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
94 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
97 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
98 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
101 void host_to_target_sigset_internal(target_sigset_t
*d
,
104 int host_sig
, target_sig
;
105 target_sigemptyset(d
);
106 for (host_sig
= 1; host_sig
< _NSIG
; host_sig
++) {
107 target_sig
= host_to_target_signal(host_sig
);
108 if (target_sig
< 1 || target_sig
> TARGET_NSIG
) {
111 if (sigismember(s
, host_sig
)) {
112 target_sigaddset(d
, target_sig
);
117 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
122 host_to_target_sigset_internal(&d1
, s
);
123 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
124 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
127 void target_to_host_sigset_internal(sigset_t
*d
,
128 const target_sigset_t
*s
)
130 int host_sig
, target_sig
;
132 for (target_sig
= 1; target_sig
<= TARGET_NSIG
; target_sig
++) {
133 host_sig
= target_to_host_signal(target_sig
);
134 if (host_sig
< 1 || host_sig
>= _NSIG
) {
137 if (target_sigismember(s
, target_sig
)) {
138 sigaddset(d
, host_sig
);
143 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
148 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
149 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
150 target_to_host_sigset_internal(d
, &s1
);
153 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
154 const sigset_t
*sigset
)
157 host_to_target_sigset(&d
, sigset
);
158 *old_sigset
= d
.sig
[0];
161 void target_to_host_old_sigset(sigset_t
*sigset
,
162 const abi_ulong
*old_sigset
)
167 d
.sig
[0] = *old_sigset
;
168 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
170 target_to_host_sigset(sigset
, &d
);
173 int block_signals(void)
175 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
178 /* It's OK to block everything including SIGSEGV, because we won't
179 * run any further guest code before unblocking signals in
180 * process_pending_signals().
183 sigprocmask(SIG_SETMASK
, &set
, 0);
185 return qatomic_xchg(&ts
->signal_pending
, 1);
188 /* Wrapper for sigprocmask function
189 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
190 * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
191 * a signal was already pending and the syscall must be restarted, or
193 * If set is NULL, this is guaranteed not to fail.
195 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
197 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
200 *oldset
= ts
->signal_mask
;
206 if (block_signals()) {
207 return -QEMU_ERESTARTSYS
;
212 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
215 for (i
= 1; i
<= NSIG
; ++i
) {
216 if (sigismember(set
, i
)) {
217 sigdelset(&ts
->signal_mask
, i
);
222 ts
->signal_mask
= *set
;
225 g_assert_not_reached();
228 /* Silently ignore attempts to change blocking status of KILL or STOP */
229 sigdelset(&ts
->signal_mask
, SIGKILL
);
230 sigdelset(&ts
->signal_mask
, SIGSTOP
);
235 /* Just set the guest's signal mask to the specified value; the
236 * caller is assumed to have called block_signals() already.
238 void set_sigmask(const sigset_t
*set
)
240 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
242 ts
->signal_mask
= *set
;
245 /* sigaltstack management */
247 int on_sig_stack(unsigned long sp
)
249 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
251 return (sp
- ts
->sigaltstack_used
.ss_sp
252 < ts
->sigaltstack_used
.ss_size
);
255 int sas_ss_flags(unsigned long sp
)
257 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
259 return (ts
->sigaltstack_used
.ss_size
== 0 ? SS_DISABLE
260 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
263 abi_ulong
target_sigsp(abi_ulong sp
, struct target_sigaction
*ka
)
266 * This is the X/Open sanctioned signal stack switching.
268 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
270 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
271 return ts
->sigaltstack_used
.ss_sp
+ ts
->sigaltstack_used
.ss_size
;
276 void target_save_altstack(target_stack_t
*uss
, CPUArchState
*env
)
278 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
280 __put_user(ts
->sigaltstack_used
.ss_sp
, &uss
->ss_sp
);
281 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &uss
->ss_flags
);
282 __put_user(ts
->sigaltstack_used
.ss_size
, &uss
->ss_size
);
285 abi_long
target_restore_altstack(target_stack_t
*uss
, CPUArchState
*env
)
287 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
288 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
291 #if defined(TARGET_PPC64)
292 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
293 struct image_info
*image
= ts
->info
;
294 if (get_ppc64_abi(image
) > 1) {
299 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
300 __get_user(ss
.ss_size
, &uss
->ss_size
);
301 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
303 if (on_sig_stack(get_sp_from_cpustate(env
))) {
304 return -TARGET_EPERM
;
307 switch (ss
.ss_flags
) {
309 return -TARGET_EINVAL
;
311 case TARGET_SS_DISABLE
:
316 case TARGET_SS_ONSTACK
:
318 if (ss
.ss_size
< minstacksize
) {
319 return -TARGET_ENOMEM
;
324 ts
->sigaltstack_used
.ss_sp
= ss
.ss_sp
;
325 ts
->sigaltstack_used
.ss_size
= ss
.ss_size
;
329 /* siginfo conversion */
331 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
332 const siginfo_t
*info
)
334 int sig
= host_to_target_signal(info
->si_signo
);
335 int si_code
= info
->si_code
;
337 tinfo
->si_signo
= sig
;
339 tinfo
->si_code
= info
->si_code
;
341 /* This memset serves two purposes:
342 * (1) ensure we don't leak random junk to the guest later
343 * (2) placate false positives from gcc about fields
344 * being used uninitialized if it chooses to inline both this
345 * function and tswap_siginfo() into host_to_target_siginfo().
347 memset(tinfo
->_sifields
._pad
, 0, sizeof(tinfo
->_sifields
._pad
));
349 /* This is awkward, because we have to use a combination of
350 * the si_code and si_signo to figure out which of the union's
351 * members are valid. (Within the host kernel it is always possible
352 * to tell, but the kernel carefully avoids giving userspace the
353 * high 16 bits of si_code, so we don't have the information to
354 * do this the easy way...) We therefore make our best guess,
355 * bearing in mind that a guest can spoof most of the si_codes
356 * via rt_sigqueueinfo() if it likes.
358 * Once we have made our guess, we record it in the top 16 bits of
359 * the si_code, so that tswap_siginfo() later can use it.
360 * tswap_siginfo() will strip these top bits out before writing
361 * si_code to the guest (sign-extending the lower bits).
368 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
369 * These are the only unspoofable si_code values.
371 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
372 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
373 si_type
= QEMU_SI_KILL
;
376 /* Everything else is spoofable. Make best guess based on signal */
379 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
380 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
381 if (si_code
== CLD_EXITED
)
382 tinfo
->_sifields
._sigchld
._status
= info
->si_status
;
384 tinfo
->_sifields
._sigchld
._status
385 = host_to_target_signal(info
->si_status
& 0x7f)
386 | (info
->si_status
& ~0x7f);
387 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
388 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
389 si_type
= QEMU_SI_CHLD
;
392 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
393 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
394 si_type
= QEMU_SI_POLL
;
397 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
398 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
399 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
400 /* XXX: potential problem if 64 bit */
401 tinfo
->_sifields
._rt
._sigval
.sival_ptr
402 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
403 si_type
= QEMU_SI_RT
;
409 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
412 void tswap_siginfo(target_siginfo_t
*tinfo
,
413 const target_siginfo_t
*info
)
415 int si_type
= extract32(info
->si_code
, 16, 16);
416 int si_code
= sextract32(info
->si_code
, 0, 16);
418 __put_user(info
->si_signo
, &tinfo
->si_signo
);
419 __put_user(info
->si_errno
, &tinfo
->si_errno
);
420 __put_user(si_code
, &tinfo
->si_code
);
422 /* We can use our internal marker of which fields in the structure
423 * are valid, rather than duplicating the guesswork of
424 * host_to_target_siginfo_noswap() here.
428 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
429 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
432 __put_user(info
->_sifields
._timer
._timer1
,
433 &tinfo
->_sifields
._timer
._timer1
);
434 __put_user(info
->_sifields
._timer
._timer2
,
435 &tinfo
->_sifields
._timer
._timer2
);
438 __put_user(info
->_sifields
._sigpoll
._band
,
439 &tinfo
->_sifields
._sigpoll
._band
);
440 __put_user(info
->_sifields
._sigpoll
._fd
,
441 &tinfo
->_sifields
._sigpoll
._fd
);
444 __put_user(info
->_sifields
._sigfault
._addr
,
445 &tinfo
->_sifields
._sigfault
._addr
);
448 __put_user(info
->_sifields
._sigchld
._pid
,
449 &tinfo
->_sifields
._sigchld
._pid
);
450 __put_user(info
->_sifields
._sigchld
._uid
,
451 &tinfo
->_sifields
._sigchld
._uid
);
452 __put_user(info
->_sifields
._sigchld
._status
,
453 &tinfo
->_sifields
._sigchld
._status
);
454 __put_user(info
->_sifields
._sigchld
._utime
,
455 &tinfo
->_sifields
._sigchld
._utime
);
456 __put_user(info
->_sifields
._sigchld
._stime
,
457 &tinfo
->_sifields
._sigchld
._stime
);
460 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
461 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
462 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
463 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
466 g_assert_not_reached();
470 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
472 target_siginfo_t tgt_tmp
;
473 host_to_target_siginfo_noswap(&tgt_tmp
, info
);
474 tswap_siginfo(tinfo
, &tgt_tmp
);
477 /* XXX: we support only POSIX RT signals are used. */
478 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
479 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
481 /* This conversion is used only for the rt_sigqueueinfo syscall,
482 * and so we know that the _rt fields are the valid ones.
486 __get_user(info
->si_signo
, &tinfo
->si_signo
);
487 __get_user(info
->si_errno
, &tinfo
->si_errno
);
488 __get_user(info
->si_code
, &tinfo
->si_code
);
489 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
490 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
491 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
492 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
495 /* returns 1 if given signal should dump core if not handled */
496 static int core_dump_signal(int sig
)
512 static void signal_table_init(void)
514 int hsig
, tsig
, count
;
517 * Signals are supported starting from TARGET_SIGRTMIN and going up
518 * until we run out of host realtime signals. Glibc uses the lower 2
519 * RT signals and (hopefully) nobody uses the upper ones.
520 * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
521 * To fix this properly we would need to do manual signal delivery
522 * multiplexed over a single host signal.
523 * Attempts for configure "missing" signals via sigaction will be
526 * Remap the target SIGABRT, so that we can distinguish host abort
527 * from guest abort. When the guest registers a signal handler or
528 * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
529 * arrives at dump_core_and_abort(), we will map back to host SIGABRT
530 * so that the parent (native or emulated) sees the correct signal.
531 * Finally, also map host to guest SIGABRT so that the emulated
532 * parent sees the correct mapping from wait status.
536 host_to_target_signal_table
[SIGABRT
] = 0;
537 host_to_target_signal_table
[hsig
++] = TARGET_SIGABRT
;
539 for (tsig
= TARGET_SIGRTMIN
;
540 hsig
<= SIGRTMAX
&& tsig
<= TARGET_NSIG
;
542 host_to_target_signal_table
[hsig
] = tsig
;
545 /* Invert the mapping that has already been assigned. */
546 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
547 tsig
= host_to_target_signal_table
[hsig
];
549 assert(target_to_host_signal_table
[tsig
] == 0);
550 target_to_host_signal_table
[tsig
] = hsig
;
554 host_to_target_signal_table
[SIGABRT
] = TARGET_SIGABRT
;
556 /* Map everything else out-of-bounds. */
557 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
558 if (host_to_target_signal_table
[hsig
] == 0) {
559 host_to_target_signal_table
[hsig
] = TARGET_NSIG
+ 1;
562 for (count
= 0, tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
563 if (target_to_host_signal_table
[tsig
] == 0) {
564 target_to_host_signal_table
[tsig
] = _NSIG
;
569 trace_signal_table_init(count
);
572 void signal_init(void)
574 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
575 struct sigaction act
, oact
;
577 /* initialize signal conversion tables */
580 /* Set the signal mask from the host mask. */
581 sigprocmask(0, 0, &ts
->signal_mask
);
583 sigfillset(&act
.sa_mask
);
584 act
.sa_flags
= SA_SIGINFO
;
585 act
.sa_sigaction
= host_signal_handler
;
588 * A parent process may configure ignored signals, but all other
589 * signals are default. For any target signals that have no host
590 * mapping, set to ignore. For all core_dump_signal, install our
591 * host signal handler so that we may invoke dump_core_and_abort.
592 * This includes SIGSEGV and SIGBUS, which are also need our signal
593 * handler for paging and exceptions.
595 for (int tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
596 int hsig
= target_to_host_signal(tsig
);
597 abi_ptr thand
= TARGET_SIG_IGN
;
603 /* As we force remap SIGABRT, cannot probe and install in one step. */
604 if (tsig
== TARGET_SIGABRT
) {
605 sigaction(SIGABRT
, NULL
, &oact
);
606 sigaction(hsig
, &act
, NULL
);
608 struct sigaction
*iact
= core_dump_signal(tsig
) ? &act
: NULL
;
609 sigaction(hsig
, iact
, &oact
);
612 if (oact
.sa_sigaction
!= (void *)SIG_IGN
) {
613 thand
= TARGET_SIG_DFL
;
615 sigact_table
[tsig
- 1]._sa_handler
= thand
;
619 /* Force a synchronously taken signal. The kernel force_sig() function
620 * also forces the signal to "not blocked, not ignored", but for QEMU
621 * that work is done in process_pending_signals().
623 void force_sig(int sig
)
625 CPUState
*cpu
= thread_cpu
;
626 CPUArchState
*env
= cpu_env(cpu
);
627 target_siginfo_t info
= {};
631 info
.si_code
= TARGET_SI_KERNEL
;
632 info
._sifields
._kill
._pid
= 0;
633 info
._sifields
._kill
._uid
= 0;
634 queue_signal(env
, info
.si_signo
, QEMU_SI_KILL
, &info
);
638 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
639 * 'force' part is handled in process_pending_signals().
641 void force_sig_fault(int sig
, int code
, abi_ulong addr
)
643 CPUState
*cpu
= thread_cpu
;
644 CPUArchState
*env
= cpu_env(cpu
);
645 target_siginfo_t info
= {};
650 info
._sifields
._sigfault
._addr
= addr
;
651 queue_signal(env
, sig
, QEMU_SI_FAULT
, &info
);
654 /* Force a SIGSEGV if we couldn't write to memory trying to set
655 * up the signal frame. oldsig is the signal we were trying to handle
656 * at the point of failure.
658 #if !defined(TARGET_RISCV)
659 void force_sigsegv(int oldsig
)
661 if (oldsig
== SIGSEGV
) {
662 /* Make sure we don't try to deliver the signal again; this will
663 * end up with handle_pending_signal() calling dump_core_and_abort().
665 sigact_table
[oldsig
- 1]._sa_handler
= TARGET_SIG_DFL
;
667 force_sig(TARGET_SIGSEGV
);
671 void cpu_loop_exit_sigsegv(CPUState
*cpu
, target_ulong addr
,
672 MMUAccessType access_type
, bool maperr
, uintptr_t ra
)
674 const struct TCGCPUOps
*tcg_ops
= CPU_GET_CLASS(cpu
)->tcg_ops
;
676 if (tcg_ops
->record_sigsegv
) {
677 tcg_ops
->record_sigsegv(cpu
, addr
, access_type
, maperr
, ra
);
680 force_sig_fault(TARGET_SIGSEGV
,
681 maperr
? TARGET_SEGV_MAPERR
: TARGET_SEGV_ACCERR
,
683 cpu
->exception_index
= EXCP_INTERRUPT
;
684 cpu_loop_exit_restore(cpu
, ra
);
687 void cpu_loop_exit_sigbus(CPUState
*cpu
, target_ulong addr
,
688 MMUAccessType access_type
, uintptr_t ra
)
690 const struct TCGCPUOps
*tcg_ops
= CPU_GET_CLASS(cpu
)->tcg_ops
;
692 if (tcg_ops
->record_sigbus
) {
693 tcg_ops
->record_sigbus(cpu
, addr
, access_type
, ra
);
696 force_sig_fault(TARGET_SIGBUS
, TARGET_BUS_ADRALN
, addr
);
697 cpu
->exception_index
= EXCP_INTERRUPT
;
698 cpu_loop_exit_restore(cpu
, ra
);
701 /* abort execution with signal */
703 void die_with_signal(int host_sig
)
705 struct sigaction act
= {
706 .sa_handler
= SIG_DFL
,
710 * The proper exit code for dying from an uncaught signal is -<signal>.
711 * The kernel doesn't allow exit() or _exit() to pass a negative value.
712 * To get the proper exit code we need to actually die from an uncaught
713 * signal. Here the default signal handler is installed, we send
714 * the signal and we wait for it to arrive.
716 sigfillset(&act
.sa_mask
);
717 sigaction(host_sig
, &act
, NULL
);
719 kill(getpid(), host_sig
);
721 /* Make sure the signal isn't masked (reusing the mask inside of act). */
722 sigdelset(&act
.sa_mask
, host_sig
);
723 sigsuspend(&act
.sa_mask
);
730 void dump_core_and_abort(CPUArchState
*env
, int target_sig
)
732 CPUState
*cpu
= env_cpu(env
);
733 TaskState
*ts
= (TaskState
*)cpu
->opaque
;
734 int host_sig
, core_dumped
= 0;
736 /* On exit, undo the remapping of SIGABRT. */
737 if (target_sig
== TARGET_SIGABRT
) {
740 host_sig
= target_to_host_signal(target_sig
);
742 trace_user_dump_core_and_abort(env
, target_sig
, host_sig
);
743 gdb_signalled(env
, target_sig
);
745 /* dump core if supported by target binary format */
746 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
749 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
752 /* we already dumped the core of target process, we don't want
753 * a coredump of qemu itself */
754 struct rlimit nodump
;
755 getrlimit(RLIMIT_CORE
, &nodump
);
757 setrlimit(RLIMIT_CORE
, &nodump
);
758 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
759 target_sig
, strsignal(host_sig
), "core dumped" );
762 preexit_cleanup(env
, 128 + target_sig
);
763 die_with_signal(host_sig
);
766 /* queue a signal so that it will be send to the virtual CPU as soon
768 void queue_signal(CPUArchState
*env
, int sig
, int si_type
,
769 target_siginfo_t
*info
)
771 CPUState
*cpu
= env_cpu(env
);
772 TaskState
*ts
= cpu
->opaque
;
774 trace_user_queue_signal(env
, sig
);
776 info
->si_code
= deposit32(info
->si_code
, 16, 16, si_type
);
778 ts
->sync_signal
.info
= *info
;
779 ts
->sync_signal
.pending
= sig
;
780 /* signal that a new signal is pending */
781 qatomic_set(&ts
->signal_pending
, 1);
785 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
786 static inline void rewind_if_in_safe_syscall(void *puc
)
788 host_sigcontext
*uc
= (host_sigcontext
*)puc
;
789 uintptr_t pcreg
= host_signal_pc(uc
);
791 if (pcreg
> (uintptr_t)safe_syscall_start
792 && pcreg
< (uintptr_t)safe_syscall_end
) {
793 host_signal_set_pc(uc
, (uintptr_t)safe_syscall_start
);
798 void die_from_signal(siginfo_t
*info
)
800 char sigbuf
[4], codebuf
[12];
801 const char *sig
, *code
= NULL
;
803 switch (info
->si_signo
) {
806 switch (info
->si_code
) {
817 switch (info
->si_code
) {
828 switch (info
->si_code
) {
851 switch (info
->si_code
) {
864 snprintf(sigbuf
, sizeof(sigbuf
), "%d", info
->si_signo
);
869 snprintf(codebuf
, sizeof(sigbuf
), "%d", info
->si_code
);
873 error_report("QEMU internal SIG%s {code=%s, addr=%p}",
874 sig
, code
, info
->si_addr
);
875 die_with_signal(info
->si_signo
);
878 static void host_sigsegv_handler(CPUState
*cpu
, siginfo_t
*info
,
881 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
883 * Convert forcefully to guest address space: addresses outside
884 * reserved_va are still valid to report via SEGV_MAPERR.
886 bool is_valid
= h2g_valid(host_addr
);
887 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
888 uintptr_t pc
= host_signal_pc(uc
);
889 bool is_write
= host_signal_write(info
, uc
);
890 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
893 /* If this was a write to a TB protected page, restart. */
896 && info
->si_code
== SEGV_ACCERR
897 && handle_sigsegv_accerr_write(cpu
, host_signal_mask(uc
),
903 * If the access was not on behalf of the guest, within the executable
904 * mapping of the generated code buffer, then it is a host bug.
906 if (access_type
!= MMU_INST_FETCH
907 && !in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
908 die_from_signal(info
);
912 if (is_valid
&& info
->si_code
== SEGV_ACCERR
) {
914 * With reserved_va, the whole address space is PROT_NONE,
915 * which means that we may get ACCERR when we want MAPERR.
917 if (page_get_flags(guest_addr
) & PAGE_VALID
) {
920 info
->si_code
= SEGV_MAPERR
;
924 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
925 cpu_loop_exit_sigsegv(cpu
, guest_addr
, access_type
, maperr
, pc
);
928 static void host_sigbus_handler(CPUState
*cpu
, siginfo_t
*info
,
931 uintptr_t pc
= host_signal_pc(uc
);
932 bool is_write
= host_signal_write(info
, uc
);
933 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
936 * If the access was not on behalf of the guest, within the executable
937 * mapping of the generated code buffer, then it is a host bug.
939 if (!in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
940 die_from_signal(info
);
943 if (info
->si_code
== BUS_ADRALN
) {
944 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
945 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
947 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
948 cpu_loop_exit_sigbus(cpu
, guest_addr
, access_type
, pc
);
952 static void host_signal_handler(int host_sig
, siginfo_t
*info
, void *puc
)
954 CPUState
*cpu
= thread_cpu
;
955 CPUArchState
*env
= cpu_env(cpu
);
956 TaskState
*ts
= cpu
->opaque
;
957 target_siginfo_t tinfo
;
958 host_sigcontext
*uc
= puc
;
959 struct emulated_sigtable
*k
;
962 bool sync_sig
= false;
966 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
967 * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
968 * SIGFPE, SIGTRAP are always host bugs.
970 if (info
->si_code
> 0) {
973 /* Only returns on handle_sigsegv_accerr_write success. */
974 host_sigsegv_handler(cpu
, info
, uc
);
977 host_sigbus_handler(cpu
, info
, uc
);
983 die_from_signal(info
);
987 /* get target signal number */
988 guest_sig
= host_to_target_signal(host_sig
);
989 if (guest_sig
< 1 || guest_sig
> TARGET_NSIG
) {
992 trace_user_host_signal(env
, host_sig
, guest_sig
);
994 host_to_target_siginfo_noswap(&tinfo
, info
);
995 k
= &ts
->sigtab
[guest_sig
- 1];
997 k
->pending
= guest_sig
;
998 ts
->signal_pending
= 1;
1001 * For synchronous signals, unwind the cpu state to the faulting
1002 * insn and then exit back to the main loop so that the signal
1003 * is delivered immediately.
1006 cpu
->exception_index
= EXCP_INTERRUPT
;
1007 cpu_loop_exit_restore(cpu
, pc
);
1010 rewind_if_in_safe_syscall(puc
);
1013 * Block host signals until target signal handler entered. We
1014 * can't block SIGSEGV or SIGBUS while we're executing guest
1015 * code in case the guest code provokes one in the window between
1016 * now and it getting out to the main loop. Signals will be
1017 * unblocked again in process_pending_signals().
1019 * WARNING: we cannot use sigfillset() here because the sigmask
1020 * field is a kernel sigset_t, which is much smaller than the
1021 * libc sigset_t which sigfillset() operates on. Using sigfillset()
1022 * would write 0xff bytes off the end of the structure and trash
1023 * data on the struct.
1025 sigmask
= host_signal_mask(uc
);
1026 memset(sigmask
, 0xff, SIGSET_T_SIZE
);
1027 sigdelset(sigmask
, SIGSEGV
);
1028 sigdelset(sigmask
, SIGBUS
);
1030 /* interrupt the virtual CPU as soon as possible */
1031 cpu_exit(thread_cpu
);
1034 /* do_sigaltstack() returns target values and errnos. */
1035 /* compare linux/kernel/signal.c:do_sigaltstack() */
1036 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
,
1039 target_stack_t oss
, *uoss
= NULL
;
1040 abi_long ret
= -TARGET_EFAULT
;
1043 /* Verify writability now, but do not alter user memory yet. */
1044 if (!lock_user_struct(VERIFY_WRITE
, uoss
, uoss_addr
, 0)) {
1047 target_save_altstack(&oss
, env
);
1051 target_stack_t
*uss
;
1053 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
1056 ret
= target_restore_altstack(uss
, env
);
1063 memcpy(uoss
, &oss
, sizeof(oss
));
1064 unlock_user_struct(uoss
, uoss_addr
, 1);
1071 unlock_user_struct(uoss
, uoss_addr
, 0);
1076 /* do_sigaction() return target values and host errnos */
1077 int do_sigaction(int sig
, const struct target_sigaction
*act
,
1078 struct target_sigaction
*oact
, abi_ulong ka_restorer
)
1080 struct target_sigaction
*k
;
1084 trace_signal_do_sigaction_guest(sig
, TARGET_NSIG
);
1086 if (sig
< 1 || sig
> TARGET_NSIG
) {
1087 return -TARGET_EINVAL
;
1090 if (act
&& (sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
)) {
1091 return -TARGET_EINVAL
;
1094 if (block_signals()) {
1095 return -QEMU_ERESTARTSYS
;
1098 k
= &sigact_table
[sig
- 1];
1100 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
1101 __put_user(k
->sa_flags
, &oact
->sa_flags
);
1102 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1103 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
1106 oact
->sa_mask
= k
->sa_mask
;
1109 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
1110 __get_user(k
->sa_flags
, &act
->sa_flags
);
1111 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1112 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
1114 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1115 k
->ka_restorer
= ka_restorer
;
1117 /* To be swapped in target_to_host_sigset. */
1118 k
->sa_mask
= act
->sa_mask
;
1120 /* we update the host linux signal state */
1121 host_sig
= target_to_host_signal(sig
);
1122 trace_signal_do_sigaction_host(host_sig
, TARGET_NSIG
);
1123 if (host_sig
> SIGRTMAX
) {
1124 /* we don't have enough host signals to map all target signals */
1125 qemu_log_mask(LOG_UNIMP
, "Unsupported target signal #%d, ignored\n",
1128 * we don't return an error here because some programs try to
1129 * register an handler for all possible rt signals even if they
1131 * An error here can abort them whereas there can be no problem
1132 * to not have the signal available later.
1133 * This is the case for golang,
1134 * See https://github.com/golang/go/issues/33746
1135 * So we silently ignore the error.
1139 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
1140 struct sigaction act1
;
1142 sigfillset(&act1
.sa_mask
);
1143 act1
.sa_flags
= SA_SIGINFO
;
1144 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
1146 * It is important to update the host kernel signal ignore
1147 * state to avoid getting unexpected interrupted syscalls.
1149 act1
.sa_sigaction
= (void *)SIG_IGN
;
1150 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
1151 if (core_dump_signal(sig
)) {
1152 act1
.sa_sigaction
= host_signal_handler
;
1154 act1
.sa_sigaction
= (void *)SIG_DFL
;
1157 act1
.sa_sigaction
= host_signal_handler
;
1158 if (k
->sa_flags
& TARGET_SA_RESTART
) {
1159 act1
.sa_flags
|= SA_RESTART
;
1162 ret
= sigaction(host_sig
, &act1
, NULL
);
1168 static void handle_pending_signal(CPUArchState
*cpu_env
, int sig
,
1169 struct emulated_sigtable
*k
)
1171 CPUState
*cpu
= env_cpu(cpu_env
);
1174 target_sigset_t target_old_set
;
1175 struct target_sigaction
*sa
;
1176 TaskState
*ts
= cpu
->opaque
;
1178 trace_user_handle_signal(cpu_env
, sig
);
1179 /* dequeue signal */
1182 sig
= gdb_handlesig(cpu
, sig
);
1185 handler
= TARGET_SIG_IGN
;
1187 sa
= &sigact_table
[sig
- 1];
1188 handler
= sa
->_sa_handler
;
1191 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
1192 print_taken_signal(sig
, &k
->info
);
1195 if (handler
== TARGET_SIG_DFL
) {
1196 /* default handler : ignore some signal. The other are job control or fatal */
1197 if (sig
== TARGET_SIGTSTP
|| sig
== TARGET_SIGTTIN
|| sig
== TARGET_SIGTTOU
) {
1198 kill(getpid(),SIGSTOP
);
1199 } else if (sig
!= TARGET_SIGCHLD
&&
1200 sig
!= TARGET_SIGURG
&&
1201 sig
!= TARGET_SIGWINCH
&&
1202 sig
!= TARGET_SIGCONT
) {
1203 dump_core_and_abort(cpu_env
, sig
);
1205 } else if (handler
== TARGET_SIG_IGN
) {
1207 } else if (handler
== TARGET_SIG_ERR
) {
1208 dump_core_and_abort(cpu_env
, sig
);
1210 /* compute the blocked signals during the handler execution */
1211 sigset_t
*blocked_set
;
1213 target_to_host_sigset(&set
, &sa
->sa_mask
);
1214 /* SA_NODEFER indicates that the current signal should not be
1215 blocked during the handler */
1216 if (!(sa
->sa_flags
& TARGET_SA_NODEFER
))
1217 sigaddset(&set
, target_to_host_signal(sig
));
1219 /* save the previous blocked signal state to restore it at the
1220 end of the signal execution (see do_sigreturn) */
1221 host_to_target_sigset_internal(&target_old_set
, &ts
->signal_mask
);
1223 /* block signals in the handler */
1224 blocked_set
= ts
->in_sigsuspend
?
1225 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1226 sigorset(&ts
->signal_mask
, blocked_set
, &set
);
1227 ts
->in_sigsuspend
= 0;
1229 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1230 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1232 CPUX86State
*env
= cpu_env
;
1233 if (env
->eflags
& VM_MASK
)
1234 save_v86_state(env
);
1237 /* prepare the stack frame of the virtual CPU */
1238 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1239 if (sa
->sa_flags
& TARGET_SA_SIGINFO
) {
1240 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1242 setup_frame(sig
, sa
, &target_old_set
, cpu_env
);
1245 /* These targets do not have traditional signals. */
1246 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1248 if (sa
->sa_flags
& TARGET_SA_RESETHAND
) {
1249 sa
->_sa_handler
= TARGET_SIG_DFL
;
1254 void process_pending_signals(CPUArchState
*cpu_env
)
1256 CPUState
*cpu
= env_cpu(cpu_env
);
1258 TaskState
*ts
= cpu
->opaque
;
1260 sigset_t
*blocked_set
;
1262 while (qatomic_read(&ts
->signal_pending
)) {
1264 sigprocmask(SIG_SETMASK
, &set
, 0);
1267 sig
= ts
->sync_signal
.pending
;
1269 /* Synchronous signals are forced,
1270 * see force_sig_info() and callers in Linux
1271 * Note that not all of our queue_signal() calls in QEMU correspond
1272 * to force_sig_info() calls in Linux (some are send_sig_info()).
1273 * However it seems like a kernel bug to me to allow the process
1274 * to block a synchronous signal since it could then just end up
1275 * looping round and round indefinitely.
1277 if (sigismember(&ts
->signal_mask
, target_to_host_signal_table
[sig
])
1278 || sigact_table
[sig
- 1]._sa_handler
== TARGET_SIG_IGN
) {
1279 sigdelset(&ts
->signal_mask
, target_to_host_signal_table
[sig
]);
1280 sigact_table
[sig
- 1]._sa_handler
= TARGET_SIG_DFL
;
1283 handle_pending_signal(cpu_env
, sig
, &ts
->sync_signal
);
1286 for (sig
= 1; sig
<= TARGET_NSIG
; sig
++) {
1287 blocked_set
= ts
->in_sigsuspend
?
1288 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1290 if (ts
->sigtab
[sig
- 1].pending
&&
1291 (!sigismember(blocked_set
,
1292 target_to_host_signal_table
[sig
]))) {
1293 handle_pending_signal(cpu_env
, sig
, &ts
->sigtab
[sig
- 1]);
1294 /* Restart scan from the beginning, as handle_pending_signal
1295 * might have resulted in a new synchronous signal (eg SIGSEGV).
1301 /* if no signal is pending, unblock signals and recheck (the act
1302 * of unblocking might cause us to take another host signal which
1303 * will set signal_pending again).
1305 qatomic_set(&ts
->signal_pending
, 0);
1306 ts
->in_sigsuspend
= 0;
1307 set
= ts
->signal_mask
;
1308 sigdelset(&set
, SIGSEGV
);
1309 sigdelset(&set
, SIGBUS
);
1310 sigprocmask(SIG_SETMASK
, &set
, 0);
1312 ts
->in_sigsuspend
= 0;
1315 int process_sigsuspend_mask(sigset_t
**pset
, target_ulong sigset
,
1316 target_ulong sigsize
)
1318 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
1319 sigset_t
*host_set
= &ts
->sigsuspend_mask
;
1320 target_sigset_t
*target_sigset
;
1322 if (sigsize
!= sizeof(*target_sigset
)) {
1323 /* Like the kernel, we enforce correct size sigsets */
1324 return -TARGET_EINVAL
;
1327 target_sigset
= lock_user(VERIFY_READ
, sigset
, sigsize
, 1);
1328 if (!target_sigset
) {
1329 return -TARGET_EFAULT
;
1331 target_to_host_sigset(host_set
, target_sigset
);
1332 unlock_user(target_sigset
, sigset
, 0);