2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "gdbstub/user.h"
22 #include "hw/core/tcg-cpu-ops.h"
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
28 #include "user-internals.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
34 #include "user/safe-syscall.h"
37 static struct target_sigaction sigact_table
[TARGET_NSIG
];
39 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
42 /* Fallback addresses into sigtramp page. */
43 abi_ulong default_sigreturn
;
44 abi_ulong default_rt_sigreturn
;
47 * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
48 * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
49 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
50 * a process exists without sending it a signal.
53 QEMU_BUILD_BUG_ON(__SIGRTMAX
+ 1 != _NSIG
);
55 static uint8_t host_to_target_signal_table
[_NSIG
] = {
56 #define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
61 static uint8_t target_to_host_signal_table
[TARGET_NSIG
+ 1];
63 /* valid sig is between 1 and _NSIG - 1 */
64 int host_to_target_signal(int sig
)
70 return TARGET_NSIG
+ 1;
72 return host_to_target_signal_table
[sig
];
75 /* valid sig is between 1 and TARGET_NSIG */
76 int target_to_host_signal(int sig
)
81 if (sig
> TARGET_NSIG
) {
84 return target_to_host_signal_table
[sig
];
87 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
90 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
91 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
94 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
97 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
98 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
101 void host_to_target_sigset_internal(target_sigset_t
*d
,
104 int host_sig
, target_sig
;
105 target_sigemptyset(d
);
106 for (host_sig
= 1; host_sig
< _NSIG
; host_sig
++) {
107 target_sig
= host_to_target_signal(host_sig
);
108 if (target_sig
< 1 || target_sig
> TARGET_NSIG
) {
111 if (sigismember(s
, host_sig
)) {
112 target_sigaddset(d
, target_sig
);
117 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
122 host_to_target_sigset_internal(&d1
, s
);
123 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
124 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
127 void target_to_host_sigset_internal(sigset_t
*d
,
128 const target_sigset_t
*s
)
130 int host_sig
, target_sig
;
132 for (target_sig
= 1; target_sig
<= TARGET_NSIG
; target_sig
++) {
133 host_sig
= target_to_host_signal(target_sig
);
134 if (host_sig
< 1 || host_sig
>= _NSIG
) {
137 if (target_sigismember(s
, target_sig
)) {
138 sigaddset(d
, host_sig
);
143 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
148 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
149 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
150 target_to_host_sigset_internal(d
, &s1
);
153 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
154 const sigset_t
*sigset
)
157 host_to_target_sigset(&d
, sigset
);
158 *old_sigset
= d
.sig
[0];
161 void target_to_host_old_sigset(sigset_t
*sigset
,
162 const abi_ulong
*old_sigset
)
167 d
.sig
[0] = *old_sigset
;
168 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
170 target_to_host_sigset(sigset
, &d
);
173 int block_signals(void)
175 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
178 /* It's OK to block everything including SIGSEGV, because we won't
179 * run any further guest code before unblocking signals in
180 * process_pending_signals().
183 sigprocmask(SIG_SETMASK
, &set
, 0);
185 return qatomic_xchg(&ts
->signal_pending
, 1);
188 /* Wrapper for sigprocmask function
189 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
190 * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
191 * a signal was already pending and the syscall must be restarted, or
193 * If set is NULL, this is guaranteed not to fail.
195 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
197 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
200 *oldset
= ts
->signal_mask
;
206 if (block_signals()) {
207 return -QEMU_ERESTARTSYS
;
212 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
215 for (i
= 1; i
<= NSIG
; ++i
) {
216 if (sigismember(set
, i
)) {
217 sigdelset(&ts
->signal_mask
, i
);
222 ts
->signal_mask
= *set
;
225 g_assert_not_reached();
228 /* Silently ignore attempts to change blocking status of KILL or STOP */
229 sigdelset(&ts
->signal_mask
, SIGKILL
);
230 sigdelset(&ts
->signal_mask
, SIGSTOP
);
235 /* Just set the guest's signal mask to the specified value; the
236 * caller is assumed to have called block_signals() already.
238 void set_sigmask(const sigset_t
*set
)
240 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
242 ts
->signal_mask
= *set
;
245 /* sigaltstack management */
247 int on_sig_stack(unsigned long sp
)
249 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
251 return (sp
- ts
->sigaltstack_used
.ss_sp
252 < ts
->sigaltstack_used
.ss_size
);
255 int sas_ss_flags(unsigned long sp
)
257 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
259 return (ts
->sigaltstack_used
.ss_size
== 0 ? SS_DISABLE
260 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
263 abi_ulong
target_sigsp(abi_ulong sp
, struct target_sigaction
*ka
)
266 * This is the X/Open sanctioned signal stack switching.
268 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
270 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
271 return ts
->sigaltstack_used
.ss_sp
+ ts
->sigaltstack_used
.ss_size
;
276 void target_save_altstack(target_stack_t
*uss
, CPUArchState
*env
)
278 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
280 __put_user(ts
->sigaltstack_used
.ss_sp
, &uss
->ss_sp
);
281 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &uss
->ss_flags
);
282 __put_user(ts
->sigaltstack_used
.ss_size
, &uss
->ss_size
);
285 abi_long
target_restore_altstack(target_stack_t
*uss
, CPUArchState
*env
)
287 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
288 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
291 #if defined(TARGET_PPC64)
292 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
293 struct image_info
*image
= ts
->info
;
294 if (get_ppc64_abi(image
) > 1) {
299 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
300 __get_user(ss
.ss_size
, &uss
->ss_size
);
301 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
303 if (on_sig_stack(get_sp_from_cpustate(env
))) {
304 return -TARGET_EPERM
;
307 switch (ss
.ss_flags
) {
309 return -TARGET_EINVAL
;
311 case TARGET_SS_DISABLE
:
316 case TARGET_SS_ONSTACK
:
318 if (ss
.ss_size
< minstacksize
) {
319 return -TARGET_ENOMEM
;
324 ts
->sigaltstack_used
.ss_sp
= ss
.ss_sp
;
325 ts
->sigaltstack_used
.ss_size
= ss
.ss_size
;
329 /* siginfo conversion */
331 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
332 const siginfo_t
*info
)
334 int sig
= host_to_target_signal(info
->si_signo
);
335 int si_code
= info
->si_code
;
337 tinfo
->si_signo
= sig
;
339 tinfo
->si_code
= info
->si_code
;
341 /* This memset serves two purposes:
342 * (1) ensure we don't leak random junk to the guest later
343 * (2) placate false positives from gcc about fields
344 * being used uninitialized if it chooses to inline both this
345 * function and tswap_siginfo() into host_to_target_siginfo().
347 memset(tinfo
->_sifields
._pad
, 0, sizeof(tinfo
->_sifields
._pad
));
349 /* This is awkward, because we have to use a combination of
350 * the si_code and si_signo to figure out which of the union's
351 * members are valid. (Within the host kernel it is always possible
352 * to tell, but the kernel carefully avoids giving userspace the
353 * high 16 bits of si_code, so we don't have the information to
354 * do this the easy way...) We therefore make our best guess,
355 * bearing in mind that a guest can spoof most of the si_codes
356 * via rt_sigqueueinfo() if it likes.
358 * Once we have made our guess, we record it in the top 16 bits of
359 * the si_code, so that tswap_siginfo() later can use it.
360 * tswap_siginfo() will strip these top bits out before writing
361 * si_code to the guest (sign-extending the lower bits).
368 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
369 * These are the only unspoofable si_code values.
371 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
372 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
373 si_type
= QEMU_SI_KILL
;
376 /* Everything else is spoofable. Make best guess based on signal */
379 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
380 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
381 if (si_code
== CLD_EXITED
)
382 tinfo
->_sifields
._sigchld
._status
= info
->si_status
;
384 tinfo
->_sifields
._sigchld
._status
385 = host_to_target_signal(info
->si_status
& 0x7f)
386 | (info
->si_status
& ~0x7f);
387 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
388 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
389 si_type
= QEMU_SI_CHLD
;
392 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
393 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
394 si_type
= QEMU_SI_POLL
;
397 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
398 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
399 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
400 /* XXX: potential problem if 64 bit */
401 tinfo
->_sifields
._rt
._sigval
.sival_ptr
402 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
403 si_type
= QEMU_SI_RT
;
409 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
412 void tswap_siginfo(target_siginfo_t
*tinfo
,
413 const target_siginfo_t
*info
)
415 int si_type
= extract32(info
->si_code
, 16, 16);
416 int si_code
= sextract32(info
->si_code
, 0, 16);
418 __put_user(info
->si_signo
, &tinfo
->si_signo
);
419 __put_user(info
->si_errno
, &tinfo
->si_errno
);
420 __put_user(si_code
, &tinfo
->si_code
);
422 /* We can use our internal marker of which fields in the structure
423 * are valid, rather than duplicating the guesswork of
424 * host_to_target_siginfo_noswap() here.
428 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
429 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
432 __put_user(info
->_sifields
._timer
._timer1
,
433 &tinfo
->_sifields
._timer
._timer1
);
434 __put_user(info
->_sifields
._timer
._timer2
,
435 &tinfo
->_sifields
._timer
._timer2
);
438 __put_user(info
->_sifields
._sigpoll
._band
,
439 &tinfo
->_sifields
._sigpoll
._band
);
440 __put_user(info
->_sifields
._sigpoll
._fd
,
441 &tinfo
->_sifields
._sigpoll
._fd
);
444 __put_user(info
->_sifields
._sigfault
._addr
,
445 &tinfo
->_sifields
._sigfault
._addr
);
448 __put_user(info
->_sifields
._sigchld
._pid
,
449 &tinfo
->_sifields
._sigchld
._pid
);
450 __put_user(info
->_sifields
._sigchld
._uid
,
451 &tinfo
->_sifields
._sigchld
._uid
);
452 __put_user(info
->_sifields
._sigchld
._status
,
453 &tinfo
->_sifields
._sigchld
._status
);
454 __put_user(info
->_sifields
._sigchld
._utime
,
455 &tinfo
->_sifields
._sigchld
._utime
);
456 __put_user(info
->_sifields
._sigchld
._stime
,
457 &tinfo
->_sifields
._sigchld
._stime
);
460 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
461 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
462 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
463 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
466 g_assert_not_reached();
470 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
472 target_siginfo_t tgt_tmp
;
473 host_to_target_siginfo_noswap(&tgt_tmp
, info
);
474 tswap_siginfo(tinfo
, &tgt_tmp
);
477 /* XXX: we support only POSIX RT signals are used. */
478 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
479 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
481 /* This conversion is used only for the rt_sigqueueinfo syscall,
482 * and so we know that the _rt fields are the valid ones.
486 __get_user(info
->si_signo
, &tinfo
->si_signo
);
487 __get_user(info
->si_errno
, &tinfo
->si_errno
);
488 __get_user(info
->si_code
, &tinfo
->si_code
);
489 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
490 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
491 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
492 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
495 /* returns 1 if given signal should dump core if not handled */
496 static int core_dump_signal(int sig
)
512 static void signal_table_init(void)
514 int hsig
, tsig
, count
;
517 * Signals are supported starting from TARGET_SIGRTMIN and going up
518 * until we run out of host realtime signals. Glibc uses the lower 2
519 * RT signals and (hopefully) nobody uses the upper ones.
520 * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
521 * To fix this properly we would need to do manual signal delivery
522 * multiplexed over a single host signal.
523 * Attempts for configure "missing" signals via sigaction will be
526 * Remap the target SIGABRT, so that we can distinguish host abort
527 * from guest abort. When the guest registers a signal handler or
528 * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
529 * arrives at dump_core_and_abort(), we will map back to host SIGABRT
530 * so that the parent (native or emulated) sees the correct signal.
531 * Finally, also map host to guest SIGABRT so that the emulated
532 * parent sees the correct mapping from wait status.
536 host_to_target_signal_table
[SIGABRT
] = 0;
537 host_to_target_signal_table
[hsig
++] = TARGET_SIGABRT
;
539 for (tsig
= TARGET_SIGRTMIN
;
540 hsig
<= SIGRTMAX
&& tsig
<= TARGET_NSIG
;
542 host_to_target_signal_table
[hsig
] = tsig
;
545 /* Invert the mapping that has already been assigned. */
546 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
547 tsig
= host_to_target_signal_table
[hsig
];
549 assert(target_to_host_signal_table
[tsig
] == 0);
550 target_to_host_signal_table
[tsig
] = hsig
;
554 host_to_target_signal_table
[SIGABRT
] = TARGET_SIGABRT
;
556 /* Map everything else out-of-bounds. */
557 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
558 if (host_to_target_signal_table
[hsig
] == 0) {
559 host_to_target_signal_table
[hsig
] = TARGET_NSIG
+ 1;
562 for (count
= 0, tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
563 if (target_to_host_signal_table
[tsig
] == 0) {
564 target_to_host_signal_table
[tsig
] = _NSIG
;
569 trace_signal_table_init(count
);
572 void signal_init(void)
574 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
575 struct sigaction act
, oact
;
577 /* initialize signal conversion tables */
580 /* Set the signal mask from the host mask. */
581 sigprocmask(0, 0, &ts
->signal_mask
);
583 sigfillset(&act
.sa_mask
);
584 act
.sa_flags
= SA_SIGINFO
;
585 act
.sa_sigaction
= host_signal_handler
;
588 * A parent process may configure ignored signals, but all other
589 * signals are default. For any target signals that have no host
590 * mapping, set to ignore. For all core_dump_signal, install our
591 * host signal handler so that we may invoke dump_core_and_abort.
592 * This includes SIGSEGV and SIGBUS, which are also need our signal
593 * handler for paging and exceptions.
595 for (int tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
596 int hsig
= target_to_host_signal(tsig
);
597 abi_ptr thand
= TARGET_SIG_IGN
;
603 /* As we force remap SIGABRT, cannot probe and install in one step. */
604 if (tsig
== TARGET_SIGABRT
) {
605 sigaction(SIGABRT
, NULL
, &oact
);
606 sigaction(hsig
, &act
, NULL
);
608 struct sigaction
*iact
= core_dump_signal(tsig
) ? &act
: NULL
;
609 sigaction(hsig
, iact
, &oact
);
612 if (oact
.sa_sigaction
!= (void *)SIG_IGN
) {
613 thand
= TARGET_SIG_DFL
;
615 sigact_table
[tsig
- 1]._sa_handler
= thand
;
619 /* Force a synchronously taken signal. The kernel force_sig() function
620 * also forces the signal to "not blocked, not ignored", but for QEMU
621 * that work is done in process_pending_signals().
623 void force_sig(int sig
)
625 CPUState
*cpu
= thread_cpu
;
626 CPUArchState
*env
= cpu_env(cpu
);
627 target_siginfo_t info
= {};
631 info
.si_code
= TARGET_SI_KERNEL
;
632 info
._sifields
._kill
._pid
= 0;
633 info
._sifields
._kill
._uid
= 0;
634 queue_signal(env
, info
.si_signo
, QEMU_SI_KILL
, &info
);
638 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
639 * 'force' part is handled in process_pending_signals().
641 void force_sig_fault(int sig
, int code
, abi_ulong addr
)
643 CPUState
*cpu
= thread_cpu
;
644 CPUArchState
*env
= cpu_env(cpu
);
645 target_siginfo_t info
= {};
650 info
._sifields
._sigfault
._addr
= addr
;
651 queue_signal(env
, sig
, QEMU_SI_FAULT
, &info
);
654 /* Force a SIGSEGV if we couldn't write to memory trying to set
655 * up the signal frame. oldsig is the signal we were trying to handle
656 * at the point of failure.
658 #if !defined(TARGET_RISCV)
659 void force_sigsegv(int oldsig
)
661 if (oldsig
== SIGSEGV
) {
662 /* Make sure we don't try to deliver the signal again; this will
663 * end up with handle_pending_signal() calling dump_core_and_abort().
665 sigact_table
[oldsig
- 1]._sa_handler
= TARGET_SIG_DFL
;
667 force_sig(TARGET_SIGSEGV
);
671 void cpu_loop_exit_sigsegv(CPUState
*cpu
, target_ulong addr
,
672 MMUAccessType access_type
, bool maperr
, uintptr_t ra
)
674 const struct TCGCPUOps
*tcg_ops
= CPU_GET_CLASS(cpu
)->tcg_ops
;
676 if (tcg_ops
->record_sigsegv
) {
677 tcg_ops
->record_sigsegv(cpu
, addr
, access_type
, maperr
, ra
);
680 force_sig_fault(TARGET_SIGSEGV
,
681 maperr
? TARGET_SEGV_MAPERR
: TARGET_SEGV_ACCERR
,
683 cpu
->exception_index
= EXCP_INTERRUPT
;
684 cpu_loop_exit_restore(cpu
, ra
);
687 void cpu_loop_exit_sigbus(CPUState
*cpu
, target_ulong addr
,
688 MMUAccessType access_type
, uintptr_t ra
)
690 const struct TCGCPUOps
*tcg_ops
= CPU_GET_CLASS(cpu
)->tcg_ops
;
692 if (tcg_ops
->record_sigbus
) {
693 tcg_ops
->record_sigbus(cpu
, addr
, access_type
, ra
);
696 force_sig_fault(TARGET_SIGBUS
, TARGET_BUS_ADRALN
, addr
);
697 cpu
->exception_index
= EXCP_INTERRUPT
;
698 cpu_loop_exit_restore(cpu
, ra
);
701 /* abort execution with signal */
703 void die_with_signal(int host_sig
)
705 struct sigaction act
= {
706 .sa_handler
= SIG_DFL
,
710 * The proper exit code for dying from an uncaught signal is -<signal>.
711 * The kernel doesn't allow exit() or _exit() to pass a negative value.
712 * To get the proper exit code we need to actually die from an uncaught
713 * signal. Here the default signal handler is installed, we send
714 * the signal and we wait for it to arrive.
716 sigfillset(&act
.sa_mask
);
717 sigaction(host_sig
, &act
, NULL
);
719 kill(getpid(), host_sig
);
721 /* Make sure the signal isn't masked (reusing the mask inside of act). */
722 sigdelset(&act
.sa_mask
, host_sig
);
723 sigsuspend(&act
.sa_mask
);
730 void dump_core_and_abort(CPUArchState
*env
, int target_sig
)
732 CPUState
*cpu
= env_cpu(env
);
733 TaskState
*ts
= (TaskState
*)cpu
->opaque
;
734 int host_sig
, core_dumped
= 0;
736 /* On exit, undo the remapping of SIGABRT. */
737 if (target_sig
== TARGET_SIGABRT
) {
740 host_sig
= target_to_host_signal(target_sig
);
742 trace_user_dump_core_and_abort(env
, target_sig
, host_sig
);
743 gdb_signalled(env
, target_sig
);
745 /* dump core if supported by target binary format */
746 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
749 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
752 /* we already dumped the core of target process, we don't want
753 * a coredump of qemu itself */
754 struct rlimit nodump
;
755 getrlimit(RLIMIT_CORE
, &nodump
);
757 setrlimit(RLIMIT_CORE
, &nodump
);
758 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
759 target_sig
, strsignal(host_sig
), "core dumped" );
762 preexit_cleanup(env
, 128 + target_sig
);
763 die_with_signal(host_sig
);
766 /* queue a signal so that it will be send to the virtual CPU as soon
768 void queue_signal(CPUArchState
*env
, int sig
, int si_type
,
769 target_siginfo_t
*info
)
771 CPUState
*cpu
= env_cpu(env
);
772 TaskState
*ts
= cpu
->opaque
;
774 trace_user_queue_signal(env
, sig
);
776 info
->si_code
= deposit32(info
->si_code
, 16, 16, si_type
);
778 ts
->sync_signal
.info
= *info
;
779 ts
->sync_signal
.pending
= sig
;
780 /* signal that a new signal is pending */
781 qatomic_set(&ts
->signal_pending
, 1);
785 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
786 static inline void rewind_if_in_safe_syscall(void *puc
)
788 host_sigcontext
*uc
= (host_sigcontext
*)puc
;
789 uintptr_t pcreg
= host_signal_pc(uc
);
791 if (pcreg
> (uintptr_t)safe_syscall_start
792 && pcreg
< (uintptr_t)safe_syscall_end
) {
793 host_signal_set_pc(uc
, (uintptr_t)safe_syscall_start
);
798 void die_from_signal(siginfo_t
*info
)
800 char sigbuf
[4], codebuf
[12];
801 const char *sig
, *code
= NULL
;
803 switch (info
->si_signo
) {
806 switch (info
->si_code
) {
817 switch (info
->si_code
) {
828 switch (info
->si_code
) {
851 switch (info
->si_code
) {
864 snprintf(sigbuf
, sizeof(sigbuf
), "%d", info
->si_signo
);
869 snprintf(codebuf
, sizeof(sigbuf
), "%d", info
->si_code
);
873 error_report("QEMU internal SIG%s {code=%s, addr=%p}",
874 sig
, code
, info
->si_addr
);
875 die_with_signal(info
->si_signo
);
878 static void host_sigsegv_handler(CPUState
*cpu
, siginfo_t
*info
,
881 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
883 * Convert forcefully to guest address space: addresses outside
884 * reserved_va are still valid to report via SEGV_MAPERR.
886 bool is_valid
= h2g_valid(host_addr
);
887 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
888 uintptr_t pc
= host_signal_pc(uc
);
889 bool is_write
= host_signal_write(info
, uc
);
890 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
893 /* If this was a write to a TB protected page, restart. */
896 && info
->si_code
== SEGV_ACCERR
897 && handle_sigsegv_accerr_write(cpu
, host_signal_mask(uc
),
903 * If the access was not on behalf of the guest, within the executable
904 * mapping of the generated code buffer, then it is a host bug.
906 if (access_type
!= MMU_INST_FETCH
907 && !in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
908 die_from_signal(info
);
912 if (is_valid
&& info
->si_code
== SEGV_ACCERR
) {
914 * With reserved_va, the whole address space is PROT_NONE,
915 * which means that we may get ACCERR when we want MAPERR.
917 if (page_get_flags(guest_addr
) & PAGE_VALID
) {
920 info
->si_code
= SEGV_MAPERR
;
924 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
925 cpu_loop_exit_sigsegv(cpu
, guest_addr
, access_type
, maperr
, pc
);
928 static uintptr_t host_sigbus_handler(CPUState
*cpu
, siginfo_t
*info
,
931 uintptr_t pc
= host_signal_pc(uc
);
932 bool is_write
= host_signal_write(info
, uc
);
933 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
936 * If the access was not on behalf of the guest, within the executable
937 * mapping of the generated code buffer, then it is a host bug.
939 if (!in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
940 die_from_signal(info
);
943 if (info
->si_code
== BUS_ADRALN
) {
944 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
945 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
947 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
948 cpu_loop_exit_sigbus(cpu
, guest_addr
, access_type
, pc
);
953 static void host_signal_handler(int host_sig
, siginfo_t
*info
, void *puc
)
955 CPUState
*cpu
= thread_cpu
;
956 CPUArchState
*env
= cpu_env(cpu
);
957 TaskState
*ts
= cpu
->opaque
;
958 target_siginfo_t tinfo
;
959 host_sigcontext
*uc
= puc
;
960 struct emulated_sigtable
*k
;
963 bool sync_sig
= false;
967 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
968 * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
969 * SIGFPE, SIGTRAP are always host bugs.
971 if (info
->si_code
> 0) {
974 /* Only returns on handle_sigsegv_accerr_write success. */
975 host_sigsegv_handler(cpu
, info
, uc
);
978 pc
= host_sigbus_handler(cpu
, info
, uc
);
984 die_from_signal(info
);
988 /* get target signal number */
989 guest_sig
= host_to_target_signal(host_sig
);
990 if (guest_sig
< 1 || guest_sig
> TARGET_NSIG
) {
993 trace_user_host_signal(env
, host_sig
, guest_sig
);
995 host_to_target_siginfo_noswap(&tinfo
, info
);
996 k
= &ts
->sigtab
[guest_sig
- 1];
998 k
->pending
= guest_sig
;
999 ts
->signal_pending
= 1;
1002 * For synchronous signals, unwind the cpu state to the faulting
1003 * insn and then exit back to the main loop so that the signal
1004 * is delivered immediately.
1007 cpu
->exception_index
= EXCP_INTERRUPT
;
1008 cpu_loop_exit_restore(cpu
, pc
);
1011 rewind_if_in_safe_syscall(puc
);
1014 * Block host signals until target signal handler entered. We
1015 * can't block SIGSEGV or SIGBUS while we're executing guest
1016 * code in case the guest code provokes one in the window between
1017 * now and it getting out to the main loop. Signals will be
1018 * unblocked again in process_pending_signals().
1020 * WARNING: we cannot use sigfillset() here because the sigmask
1021 * field is a kernel sigset_t, which is much smaller than the
1022 * libc sigset_t which sigfillset() operates on. Using sigfillset()
1023 * would write 0xff bytes off the end of the structure and trash
1024 * data on the struct.
1026 sigmask
= host_signal_mask(uc
);
1027 memset(sigmask
, 0xff, SIGSET_T_SIZE
);
1028 sigdelset(sigmask
, SIGSEGV
);
1029 sigdelset(sigmask
, SIGBUS
);
1031 /* interrupt the virtual CPU as soon as possible */
1032 cpu_exit(thread_cpu
);
1035 /* do_sigaltstack() returns target values and errnos. */
1036 /* compare linux/kernel/signal.c:do_sigaltstack() */
1037 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
,
1040 target_stack_t oss
, *uoss
= NULL
;
1041 abi_long ret
= -TARGET_EFAULT
;
1044 /* Verify writability now, but do not alter user memory yet. */
1045 if (!lock_user_struct(VERIFY_WRITE
, uoss
, uoss_addr
, 0)) {
1048 target_save_altstack(&oss
, env
);
1052 target_stack_t
*uss
;
1054 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
1057 ret
= target_restore_altstack(uss
, env
);
1064 memcpy(uoss
, &oss
, sizeof(oss
));
1065 unlock_user_struct(uoss
, uoss_addr
, 1);
1072 unlock_user_struct(uoss
, uoss_addr
, 0);
1077 /* do_sigaction() return target values and host errnos */
1078 int do_sigaction(int sig
, const struct target_sigaction
*act
,
1079 struct target_sigaction
*oact
, abi_ulong ka_restorer
)
1081 struct target_sigaction
*k
;
1085 trace_signal_do_sigaction_guest(sig
, TARGET_NSIG
);
1087 if (sig
< 1 || sig
> TARGET_NSIG
) {
1088 return -TARGET_EINVAL
;
1091 if (act
&& (sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
)) {
1092 return -TARGET_EINVAL
;
1095 if (block_signals()) {
1096 return -QEMU_ERESTARTSYS
;
1099 k
= &sigact_table
[sig
- 1];
1101 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
1102 __put_user(k
->sa_flags
, &oact
->sa_flags
);
1103 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1104 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
1107 oact
->sa_mask
= k
->sa_mask
;
1110 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
1111 __get_user(k
->sa_flags
, &act
->sa_flags
);
1112 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1113 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
1115 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1116 k
->ka_restorer
= ka_restorer
;
1118 /* To be swapped in target_to_host_sigset. */
1119 k
->sa_mask
= act
->sa_mask
;
1121 /* we update the host linux signal state */
1122 host_sig
= target_to_host_signal(sig
);
1123 trace_signal_do_sigaction_host(host_sig
, TARGET_NSIG
);
1124 if (host_sig
> SIGRTMAX
) {
1125 /* we don't have enough host signals to map all target signals */
1126 qemu_log_mask(LOG_UNIMP
, "Unsupported target signal #%d, ignored\n",
1129 * we don't return an error here because some programs try to
1130 * register an handler for all possible rt signals even if they
1132 * An error here can abort them whereas there can be no problem
1133 * to not have the signal available later.
1134 * This is the case for golang,
1135 * See https://github.com/golang/go/issues/33746
1136 * So we silently ignore the error.
1140 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
1141 struct sigaction act1
;
1143 sigfillset(&act1
.sa_mask
);
1144 act1
.sa_flags
= SA_SIGINFO
;
1145 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
1147 * It is important to update the host kernel signal ignore
1148 * state to avoid getting unexpected interrupted syscalls.
1150 act1
.sa_sigaction
= (void *)SIG_IGN
;
1151 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
1152 if (core_dump_signal(sig
)) {
1153 act1
.sa_sigaction
= host_signal_handler
;
1155 act1
.sa_sigaction
= (void *)SIG_DFL
;
1158 act1
.sa_sigaction
= host_signal_handler
;
1159 if (k
->sa_flags
& TARGET_SA_RESTART
) {
1160 act1
.sa_flags
|= SA_RESTART
;
1163 ret
= sigaction(host_sig
, &act1
, NULL
);
1169 static void handle_pending_signal(CPUArchState
*cpu_env
, int sig
,
1170 struct emulated_sigtable
*k
)
1172 CPUState
*cpu
= env_cpu(cpu_env
);
1175 target_sigset_t target_old_set
;
1176 struct target_sigaction
*sa
;
1177 TaskState
*ts
= cpu
->opaque
;
1179 trace_user_handle_signal(cpu_env
, sig
);
1180 /* dequeue signal */
1183 sig
= gdb_handlesig(cpu
, sig
);
1186 handler
= TARGET_SIG_IGN
;
1188 sa
= &sigact_table
[sig
- 1];
1189 handler
= sa
->_sa_handler
;
1192 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
1193 print_taken_signal(sig
, &k
->info
);
1196 if (handler
== TARGET_SIG_DFL
) {
1197 /* default handler : ignore some signal. The other are job control or fatal */
1198 if (sig
== TARGET_SIGTSTP
|| sig
== TARGET_SIGTTIN
|| sig
== TARGET_SIGTTOU
) {
1199 kill(getpid(),SIGSTOP
);
1200 } else if (sig
!= TARGET_SIGCHLD
&&
1201 sig
!= TARGET_SIGURG
&&
1202 sig
!= TARGET_SIGWINCH
&&
1203 sig
!= TARGET_SIGCONT
) {
1204 dump_core_and_abort(cpu_env
, sig
);
1206 } else if (handler
== TARGET_SIG_IGN
) {
1208 } else if (handler
== TARGET_SIG_ERR
) {
1209 dump_core_and_abort(cpu_env
, sig
);
1211 /* compute the blocked signals during the handler execution */
1212 sigset_t
*blocked_set
;
1214 target_to_host_sigset(&set
, &sa
->sa_mask
);
1215 /* SA_NODEFER indicates that the current signal should not be
1216 blocked during the handler */
1217 if (!(sa
->sa_flags
& TARGET_SA_NODEFER
))
1218 sigaddset(&set
, target_to_host_signal(sig
));
1220 /* save the previous blocked signal state to restore it at the
1221 end of the signal execution (see do_sigreturn) */
1222 host_to_target_sigset_internal(&target_old_set
, &ts
->signal_mask
);
1224 /* block signals in the handler */
1225 blocked_set
= ts
->in_sigsuspend
?
1226 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1227 sigorset(&ts
->signal_mask
, blocked_set
, &set
);
1228 ts
->in_sigsuspend
= 0;
1230 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1231 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1233 CPUX86State
*env
= cpu_env
;
1234 if (env
->eflags
& VM_MASK
)
1235 save_v86_state(env
);
1238 /* prepare the stack frame of the virtual CPU */
1239 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1240 if (sa
->sa_flags
& TARGET_SA_SIGINFO
) {
1241 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1243 setup_frame(sig
, sa
, &target_old_set
, cpu_env
);
1246 /* These targets do not have traditional signals. */
1247 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1249 if (sa
->sa_flags
& TARGET_SA_RESETHAND
) {
1250 sa
->_sa_handler
= TARGET_SIG_DFL
;
1255 void process_pending_signals(CPUArchState
*cpu_env
)
1257 CPUState
*cpu
= env_cpu(cpu_env
);
1259 TaskState
*ts
= cpu
->opaque
;
1261 sigset_t
*blocked_set
;
1263 while (qatomic_read(&ts
->signal_pending
)) {
1265 sigprocmask(SIG_SETMASK
, &set
, 0);
1268 sig
= ts
->sync_signal
.pending
;
1270 /* Synchronous signals are forced,
1271 * see force_sig_info() and callers in Linux
1272 * Note that not all of our queue_signal() calls in QEMU correspond
1273 * to force_sig_info() calls in Linux (some are send_sig_info()).
1274 * However it seems like a kernel bug to me to allow the process
1275 * to block a synchronous signal since it could then just end up
1276 * looping round and round indefinitely.
1278 if (sigismember(&ts
->signal_mask
, target_to_host_signal_table
[sig
])
1279 || sigact_table
[sig
- 1]._sa_handler
== TARGET_SIG_IGN
) {
1280 sigdelset(&ts
->signal_mask
, target_to_host_signal_table
[sig
]);
1281 sigact_table
[sig
- 1]._sa_handler
= TARGET_SIG_DFL
;
1284 handle_pending_signal(cpu_env
, sig
, &ts
->sync_signal
);
1287 for (sig
= 1; sig
<= TARGET_NSIG
; sig
++) {
1288 blocked_set
= ts
->in_sigsuspend
?
1289 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1291 if (ts
->sigtab
[sig
- 1].pending
&&
1292 (!sigismember(blocked_set
,
1293 target_to_host_signal_table
[sig
]))) {
1294 handle_pending_signal(cpu_env
, sig
, &ts
->sigtab
[sig
- 1]);
1295 /* Restart scan from the beginning, as handle_pending_signal
1296 * might have resulted in a new synchronous signal (eg SIGSEGV).
1302 /* if no signal is pending, unblock signals and recheck (the act
1303 * of unblocking might cause us to take another host signal which
1304 * will set signal_pending again).
1306 qatomic_set(&ts
->signal_pending
, 0);
1307 ts
->in_sigsuspend
= 0;
1308 set
= ts
->signal_mask
;
1309 sigdelset(&set
, SIGSEGV
);
1310 sigdelset(&set
, SIGBUS
);
1311 sigprocmask(SIG_SETMASK
, &set
, 0);
1313 ts
->in_sigsuspend
= 0;
1316 int process_sigsuspend_mask(sigset_t
**pset
, target_ulong sigset
,
1317 target_ulong sigsize
)
1319 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
1320 sigset_t
*host_set
= &ts
->sigsuspend_mask
;
1321 target_sigset_t
*target_sigset
;
1323 if (sigsize
!= sizeof(*target_sigset
)) {
1324 /* Like the kernel, we enforce correct size sigsets */
1325 return -TARGET_EINVAL
;
1328 target_sigset
= lock_user(VERIFY_READ
, sigset
, sigsize
, 1);
1329 if (!target_sigset
) {
1330 return -TARGET_EFAULT
;
1332 target_to_host_sigset(host_set
, target_sigset
);
1333 unlock_user(target_sigset
, sigset
, 0);