dump-guest-memory: more descriptive lookup_type failure
[qemu/ar7.git] / linux-user / signal.c
blob2ea3e0321f4d042aa2427304681dc66aaffe4d4c
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
220 if (oldset) {
221 *oldset = ts->signal_mask;
224 if (set) {
225 int i;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
253 return 0;
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
257 /* Just set the guest's signal mask to the specified value; the
258 * caller is assumed to have called block_signals() already.
260 static void set_sigmask(const sigset_t *set)
262 TaskState *ts = (TaskState *)thread_cpu->opaque;
264 ts->signal_mask = *set;
266 #endif
268 /* siginfo conversion */
270 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
271 const siginfo_t *info)
273 int sig = host_to_target_signal(info->si_signo);
274 int si_code = info->si_code;
275 int si_type;
276 tinfo->si_signo = sig;
277 tinfo->si_errno = 0;
278 tinfo->si_code = info->si_code;
280 /* This memset serves two purposes:
281 * (1) ensure we don't leak random junk to the guest later
282 * (2) placate false positives from gcc about fields
283 * being used uninitialized if it chooses to inline both this
284 * function and tswap_siginfo() into host_to_target_siginfo().
286 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
288 /* This is awkward, because we have to use a combination of
289 * the si_code and si_signo to figure out which of the union's
290 * members are valid. (Within the host kernel it is always possible
291 * to tell, but the kernel carefully avoids giving userspace the
292 * high 16 bits of si_code, so we don't have the information to
293 * do this the easy way...) We therefore make our best guess,
294 * bearing in mind that a guest can spoof most of the si_codes
295 * via rt_sigqueueinfo() if it likes.
297 * Once we have made our guess, we record it in the top 16 bits of
298 * the si_code, so that tswap_siginfo() later can use it.
299 * tswap_siginfo() will strip these top bits out before writing
300 * si_code to the guest (sign-extending the lower bits).
303 switch (si_code) {
304 case SI_USER:
305 case SI_TKILL:
306 case SI_KERNEL:
307 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
308 * These are the only unspoofable si_code values.
310 tinfo->_sifields._kill._pid = info->si_pid;
311 tinfo->_sifields._kill._uid = info->si_uid;
312 si_type = QEMU_SI_KILL;
313 break;
314 default:
315 /* Everything else is spoofable. Make best guess based on signal */
316 switch (sig) {
317 case TARGET_SIGCHLD:
318 tinfo->_sifields._sigchld._pid = info->si_pid;
319 tinfo->_sifields._sigchld._uid = info->si_uid;
320 tinfo->_sifields._sigchld._status
321 = host_to_target_waitstatus(info->si_status);
322 tinfo->_sifields._sigchld._utime = info->si_utime;
323 tinfo->_sifields._sigchld._stime = info->si_stime;
324 si_type = QEMU_SI_CHLD;
325 break;
326 case TARGET_SIGIO:
327 tinfo->_sifields._sigpoll._band = info->si_band;
328 tinfo->_sifields._sigpoll._fd = info->si_fd;
329 si_type = QEMU_SI_POLL;
330 break;
331 default:
332 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
333 tinfo->_sifields._rt._pid = info->si_pid;
334 tinfo->_sifields._rt._uid = info->si_uid;
335 /* XXX: potential problem if 64 bit */
336 tinfo->_sifields._rt._sigval.sival_ptr
337 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
338 si_type = QEMU_SI_RT;
339 break;
341 break;
344 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
347 static void tswap_siginfo(target_siginfo_t *tinfo,
348 const target_siginfo_t *info)
350 int si_type = extract32(info->si_code, 16, 16);
351 int si_code = sextract32(info->si_code, 0, 16);
353 __put_user(info->si_signo, &tinfo->si_signo);
354 __put_user(info->si_errno, &tinfo->si_errno);
355 __put_user(si_code, &tinfo->si_code);
357 /* We can use our internal marker of which fields in the structure
358 * are valid, rather than duplicating the guesswork of
359 * host_to_target_siginfo_noswap() here.
361 switch (si_type) {
362 case QEMU_SI_KILL:
363 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
364 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
365 break;
366 case QEMU_SI_TIMER:
367 __put_user(info->_sifields._timer._timer1,
368 &tinfo->_sifields._timer._timer1);
369 __put_user(info->_sifields._timer._timer2,
370 &tinfo->_sifields._timer._timer2);
371 break;
372 case QEMU_SI_POLL:
373 __put_user(info->_sifields._sigpoll._band,
374 &tinfo->_sifields._sigpoll._band);
375 __put_user(info->_sifields._sigpoll._fd,
376 &tinfo->_sifields._sigpoll._fd);
377 break;
378 case QEMU_SI_FAULT:
379 __put_user(info->_sifields._sigfault._addr,
380 &tinfo->_sifields._sigfault._addr);
381 break;
382 case QEMU_SI_CHLD:
383 __put_user(info->_sifields._sigchld._pid,
384 &tinfo->_sifields._sigchld._pid);
385 __put_user(info->_sifields._sigchld._uid,
386 &tinfo->_sifields._sigchld._uid);
387 __put_user(info->_sifields._sigchld._status,
388 &tinfo->_sifields._sigchld._status);
389 __put_user(info->_sifields._sigchld._utime,
390 &tinfo->_sifields._sigchld._utime);
391 __put_user(info->_sifields._sigchld._stime,
392 &tinfo->_sifields._sigchld._stime);
393 break;
394 case QEMU_SI_RT:
395 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
396 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
397 __put_user(info->_sifields._rt._sigval.sival_ptr,
398 &tinfo->_sifields._rt._sigval.sival_ptr);
399 break;
400 default:
401 g_assert_not_reached();
405 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
407 target_siginfo_t tgt_tmp;
408 host_to_target_siginfo_noswap(&tgt_tmp, info);
409 tswap_siginfo(tinfo, &tgt_tmp);
412 /* XXX: we support only POSIX RT signals are used. */
413 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
414 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
416 /* This conversion is used only for the rt_sigqueueinfo syscall,
417 * and so we know that the _rt fields are the valid ones.
419 abi_ulong sival_ptr;
421 __get_user(info->si_signo, &tinfo->si_signo);
422 __get_user(info->si_errno, &tinfo->si_errno);
423 __get_user(info->si_code, &tinfo->si_code);
424 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
425 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
426 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
427 info->si_value.sival_ptr = (void *)(long)sival_ptr;
430 static int fatal_signal (int sig)
432 switch (sig) {
433 case TARGET_SIGCHLD:
434 case TARGET_SIGURG:
435 case TARGET_SIGWINCH:
436 /* Ignored by default. */
437 return 0;
438 case TARGET_SIGCONT:
439 case TARGET_SIGSTOP:
440 case TARGET_SIGTSTP:
441 case TARGET_SIGTTIN:
442 case TARGET_SIGTTOU:
443 /* Job control signals. */
444 return 0;
445 default:
446 return 1;
450 /* returns 1 if given signal should dump core if not handled */
451 static int core_dump_signal(int sig)
453 switch (sig) {
454 case TARGET_SIGABRT:
455 case TARGET_SIGFPE:
456 case TARGET_SIGILL:
457 case TARGET_SIGQUIT:
458 case TARGET_SIGSEGV:
459 case TARGET_SIGTRAP:
460 case TARGET_SIGBUS:
461 return (1);
462 default:
463 return (0);
467 void signal_init(void)
469 TaskState *ts = (TaskState *)thread_cpu->opaque;
470 struct sigaction act;
471 struct sigaction oact;
472 int i, j;
473 int host_sig;
475 /* generate signal conversion tables */
476 for(i = 1; i < _NSIG; i++) {
477 if (host_to_target_signal_table[i] == 0)
478 host_to_target_signal_table[i] = i;
480 for(i = 1; i < _NSIG; i++) {
481 j = host_to_target_signal_table[i];
482 target_to_host_signal_table[j] = i;
485 /* Set the signal mask from the host mask. */
486 sigprocmask(0, 0, &ts->signal_mask);
488 /* set all host signal handlers. ALL signals are blocked during
489 the handlers to serialize them. */
490 memset(sigact_table, 0, sizeof(sigact_table));
492 sigfillset(&act.sa_mask);
493 act.sa_flags = SA_SIGINFO;
494 act.sa_sigaction = host_signal_handler;
495 for(i = 1; i <= TARGET_NSIG; i++) {
496 host_sig = target_to_host_signal(i);
497 sigaction(host_sig, NULL, &oact);
498 if (oact.sa_sigaction == (void *)SIG_IGN) {
499 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
500 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
501 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
503 /* If there's already a handler installed then something has
504 gone horribly wrong, so don't even try to handle that case. */
505 /* Install some handlers for our own use. We need at least
506 SIGSEGV and SIGBUS, to detect exceptions. We can not just
507 trap all signals because it affects syscall interrupt
508 behavior. But do trap all default-fatal signals. */
509 if (fatal_signal (i))
510 sigaction(host_sig, &act, NULL);
514 /* Force a synchronously taken signal. The kernel force_sig() function
515 * also forces the signal to "not blocked, not ignored", but for QEMU
516 * that work is done in process_pending_signals().
518 static void force_sig(int sig)
520 CPUState *cpu = thread_cpu;
521 CPUArchState *env = cpu->env_ptr;
522 target_siginfo_t info;
524 info.si_signo = sig;
525 info.si_errno = 0;
526 info.si_code = TARGET_SI_KERNEL;
527 info._sifields._kill._pid = 0;
528 info._sifields._kill._uid = 0;
529 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
532 /* Force a SIGSEGV if we couldn't write to memory trying to set
533 * up the signal frame. oldsig is the signal we were trying to handle
534 * at the point of failure.
536 #if !defined(TARGET_RISCV)
537 static void force_sigsegv(int oldsig)
539 if (oldsig == SIGSEGV) {
540 /* Make sure we don't try to deliver the signal again; this will
541 * end up with handle_pending_signal() calling dump_core_and_abort().
543 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
545 force_sig(TARGET_SIGSEGV);
548 #endif
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
600 /* unreachable */
601 abort();
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
612 trace_user_queue_signal(env, sig);
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
626 /* Default version: never rewind */
628 #endif
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
656 rewind_if_in_safe_syscall(puc);
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
690 int ret;
691 struct target_sigaltstack oss;
693 /* XXX: test errors */
694 if(uoss_addr)
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
701 if(uss_addr)
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
713 #endif
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
754 ret = 0;
755 out:
756 return ret;
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #ifdef TARGET_ARCH_HAS_SA_RESTORER
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #ifdef TARGET_ARCH_HAS_SA_RESTORER
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
816 ret = sigaction(host_sig, &act1, NULL);
819 return ret;
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
836 struct target_xmmreg {
837 uint32_t element[4];
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
928 uint64_t eflags;
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
980 #else
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
989 #endif
992 * Set up a signal frame.
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1075 * Determine which stack to use..
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1081 unsigned long esp;
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1102 #endif
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1127 __put_user(sig, &frame->sig);
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1163 unlock_user_struct(frame, frame_addr, 1);
1165 return;
1167 give_sigsegv:
1168 force_sigsegv(sig);
1170 #endif
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1259 unlock_user_struct(frame, frame_addr, 1);
1261 return;
1263 give_sigsegv:
1264 force_sigsegv(sig);
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1309 env->eip = tswapl(sc->rip);
1310 #endif
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1331 return err;
1332 badframe:
1333 return 1;
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1369 #endif
1371 long do_rt_sigreturn(CPUX86State *env)
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1402 #elif defined(TARGET_AARCH64)
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1446 #define TARGET_EXTRA_MAGIC 0x45585401
1448 struct target_extra_context {
1449 struct target_aarch64_ctx head;
1450 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
1451 uint32_t size; /* size in bytes of the extra space */
1452 uint32_t reserved[3];
1455 #define TARGET_SVE_MAGIC 0x53564501
1457 struct target_sve_context {
1458 struct target_aarch64_ctx head;
1459 uint16_t vl;
1460 uint16_t reserved[3];
1461 /* The actual SVE data immediately follows. It is layed out
1462 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
1463 * the original struct pointer.
1467 #define TARGET_SVE_VQ_BYTES 16
1469 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
1470 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
1472 #define TARGET_SVE_SIG_REGS_OFFSET \
1473 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
1474 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
1475 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
1476 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
1477 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
1478 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
1479 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
1480 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
1481 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
1483 struct target_rt_sigframe {
1484 struct target_siginfo info;
1485 struct target_ucontext uc;
1488 struct target_rt_frame_record {
1489 uint64_t fp;
1490 uint64_t lr;
1491 uint32_t tramp[2];
1494 static void target_setup_general_frame(struct target_rt_sigframe *sf,
1495 CPUARMState *env, target_sigset_t *set)
1497 int i;
1499 __put_user(0, &sf->uc.tuc_flags);
1500 __put_user(0, &sf->uc.tuc_link);
1502 __put_user(target_sigaltstack_used.ss_sp, &sf->uc.tuc_stack.ss_sp);
1503 __put_user(sas_ss_flags(env->xregs[31]), &sf->uc.tuc_stack.ss_flags);
1504 __put_user(target_sigaltstack_used.ss_size, &sf->uc.tuc_stack.ss_size);
1506 for (i = 0; i < 31; i++) {
1507 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1509 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1510 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1511 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1513 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1515 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1516 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1520 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
1521 CPUARMState *env)
1523 int i;
1525 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
1526 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
1527 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
1528 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
1530 for (i = 0; i < 32; i++) {
1531 uint64_t *q = aa64_vfp_qreg(env, i);
1532 #ifdef TARGET_WORDS_BIGENDIAN
1533 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1534 __put_user(q[1], &fpsimd->vregs[i * 2]);
1535 #else
1536 __put_user(q[0], &fpsimd->vregs[i * 2]);
1537 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1538 #endif
1542 static void target_setup_extra_record(struct target_extra_context *extra,
1543 uint64_t datap, uint32_t extra_size)
1545 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
1546 __put_user(sizeof(struct target_extra_context), &extra->head.size);
1547 __put_user(datap, &extra->datap);
1548 __put_user(extra_size, &extra->size);
1551 static void target_setup_end_record(struct target_aarch64_ctx *end)
1553 __put_user(0, &end->magic);
1554 __put_user(0, &end->size);
1557 static void target_setup_sve_record(struct target_sve_context *sve,
1558 CPUARMState *env, int vq, int size)
1560 int i, j;
1562 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
1563 __put_user(size, &sve->head.size);
1564 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
1566 /* Note that SVE regs are stored as a byte stream, with each byte element
1567 * at a subsequent address. This corresponds to a little-endian store
1568 * of our 64-bit hunks.
1570 for (i = 0; i < 32; ++i) {
1571 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1572 for (j = 0; j < vq * 2; ++j) {
1573 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
1576 for (i = 0; i <= 16; ++i) {
1577 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1578 for (j = 0; j < vq; ++j) {
1579 uint64_t r = env->vfp.pregs[i].p[j >> 2];
1580 __put_user_e(r >> ((j & 3) * 16), p + j, le);
1585 static void target_restore_general_frame(CPUARMState *env,
1586 struct target_rt_sigframe *sf)
1588 sigset_t set;
1589 uint64_t pstate;
1590 int i;
1592 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1593 set_sigmask(&set);
1595 for (i = 0; i < 31; i++) {
1596 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1599 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1600 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1601 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1602 pstate_write(env, pstate);
1605 static void target_restore_fpsimd_record(CPUARMState *env,
1606 struct target_fpsimd_context *fpsimd)
1608 uint32_t fpsr, fpcr;
1609 int i;
1611 __get_user(fpsr, &fpsimd->fpsr);
1612 vfp_set_fpsr(env, fpsr);
1613 __get_user(fpcr, &fpsimd->fpcr);
1614 vfp_set_fpcr(env, fpcr);
1616 for (i = 0; i < 32; i++) {
1617 uint64_t *q = aa64_vfp_qreg(env, i);
1618 #ifdef TARGET_WORDS_BIGENDIAN
1619 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
1620 __get_user(q[1], &fpsimd->vregs[i * 2]);
1621 #else
1622 __get_user(q[0], &fpsimd->vregs[i * 2]);
1623 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
1624 #endif
1628 static void target_restore_sve_record(CPUARMState *env,
1629 struct target_sve_context *sve, int vq)
1631 int i, j;
1633 /* Note that SVE regs are stored as a byte stream, with each byte element
1634 * at a subsequent address. This corresponds to a little-endian load
1635 * of our 64-bit hunks.
1637 for (i = 0; i < 32; ++i) {
1638 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
1639 for (j = 0; j < vq * 2; ++j) {
1640 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
1643 for (i = 0; i <= 16; ++i) {
1644 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
1645 for (j = 0; j < vq; ++j) {
1646 uint16_t r;
1647 __get_user_e(r, p + j, le);
1648 if (j & 3) {
1649 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
1650 } else {
1651 env->vfp.pregs[i].p[j >> 2] = r;
1657 static int target_restore_sigframe(CPUARMState *env,
1658 struct target_rt_sigframe *sf)
1660 struct target_aarch64_ctx *ctx, *extra = NULL;
1661 struct target_fpsimd_context *fpsimd = NULL;
1662 struct target_sve_context *sve = NULL;
1663 uint64_t extra_datap = 0;
1664 bool used_extra = false;
1665 bool err = false;
1666 int vq = 0, sve_size = 0;
1668 target_restore_general_frame(env, sf);
1670 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
1671 while (ctx) {
1672 uint32_t magic, size, extra_size;
1674 __get_user(magic, &ctx->magic);
1675 __get_user(size, &ctx->size);
1676 switch (magic) {
1677 case 0:
1678 if (size != 0) {
1679 err = true;
1680 goto exit;
1682 if (used_extra) {
1683 ctx = NULL;
1684 } else {
1685 ctx = extra;
1686 used_extra = true;
1688 continue;
1690 case TARGET_FPSIMD_MAGIC:
1691 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
1692 err = true;
1693 goto exit;
1695 fpsimd = (struct target_fpsimd_context *)ctx;
1696 break;
1698 case TARGET_SVE_MAGIC:
1699 if (arm_feature(env, ARM_FEATURE_SVE)) {
1700 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1701 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1702 if (!sve && size == sve_size) {
1703 sve = (struct target_sve_context *)ctx;
1704 break;
1707 err = true;
1708 goto exit;
1710 case TARGET_EXTRA_MAGIC:
1711 if (extra || size != sizeof(struct target_extra_context)) {
1712 err = true;
1713 goto exit;
1715 __get_user(extra_datap,
1716 &((struct target_extra_context *)ctx)->datap);
1717 __get_user(extra_size,
1718 &((struct target_extra_context *)ctx)->size);
1719 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
1720 break;
1722 default:
1723 /* Unknown record -- we certainly didn't generate it.
1724 * Did we in fact get out of sync?
1726 err = true;
1727 goto exit;
1729 ctx = (void *)ctx + size;
1732 /* Require FPSIMD always. */
1733 if (fpsimd) {
1734 target_restore_fpsimd_record(env, fpsimd);
1735 } else {
1736 err = true;
1739 /* SVE data, if present, overwrites FPSIMD data. */
1740 if (sve) {
1741 target_restore_sve_record(env, sve, vq);
1744 exit:
1745 unlock_user(extra, extra_datap, 0);
1746 return err;
1749 static abi_ulong get_sigframe(struct target_sigaction *ka,
1750 CPUARMState *env, int size)
1752 abi_ulong sp;
1754 sp = env->xregs[31];
1757 * This is the X/Open sanctioned signal stack switching.
1759 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1760 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1763 sp = (sp - size) & ~15;
1765 return sp;
1768 typedef struct {
1769 int total_size;
1770 int extra_base;
1771 int extra_size;
1772 int std_end_ofs;
1773 int extra_ofs;
1774 int extra_end_ofs;
1775 } target_sigframe_layout;
1777 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
1779 /* Make sure there will always be space for the end marker. */
1780 const int std_size = sizeof(struct target_rt_sigframe)
1781 - sizeof(struct target_aarch64_ctx);
1782 int this_loc = l->total_size;
1784 if (l->extra_base) {
1785 /* Once we have begun an extra space, all allocations go there. */
1786 l->extra_size += this_size;
1787 } else if (this_size + this_loc > std_size) {
1788 /* This allocation does not fit in the standard space. */
1789 /* Allocate the extra record. */
1790 l->extra_ofs = this_loc;
1791 l->total_size += sizeof(struct target_extra_context);
1793 /* Allocate the standard end record. */
1794 l->std_end_ofs = l->total_size;
1795 l->total_size += sizeof(struct target_aarch64_ctx);
1797 /* Allocate the requested record. */
1798 l->extra_base = this_loc = l->total_size;
1799 l->extra_size = this_size;
1801 l->total_size += this_size;
1803 return this_loc;
1806 static void target_setup_frame(int usig, struct target_sigaction *ka,
1807 target_siginfo_t *info, target_sigset_t *set,
1808 CPUARMState *env)
1810 target_sigframe_layout layout = {
1811 /* Begin with the size pointing to the reserved space. */
1812 .total_size = offsetof(struct target_rt_sigframe,
1813 uc.tuc_mcontext.__reserved),
1815 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
1816 struct target_rt_sigframe *frame;
1817 struct target_rt_frame_record *fr;
1818 abi_ulong frame_addr, return_addr;
1820 /* FPSIMD record is always in the standard space. */
1821 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
1822 &layout);
1824 /* SVE state needs saving only if it exists. */
1825 if (arm_feature(env, ARM_FEATURE_SVE)) {
1826 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
1827 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
1828 sve_ofs = alloc_sigframe_space(sve_size, &layout);
1831 if (layout.extra_ofs) {
1832 /* Reserve space for the extra end marker. The standard end marker
1833 * will have been allocated when we allocated the extra record.
1835 layout.extra_end_ofs
1836 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
1837 } else {
1838 /* Reserve space for the standard end marker.
1839 * Do not use alloc_sigframe_space because we cheat
1840 * std_size therein to reserve space for this.
1842 layout.std_end_ofs = layout.total_size;
1843 layout.total_size += sizeof(struct target_aarch64_ctx);
1846 /* Reserve space for the return code. On a real system this would
1847 * be within the VDSO. So, despite the name this is not a "real"
1848 * record within the frame.
1850 fr_ofs = layout.total_size;
1851 layout.total_size += sizeof(struct target_rt_frame_record);
1853 frame_addr = get_sigframe(ka, env, layout.total_size);
1854 trace_user_setup_frame(env, frame_addr);
1855 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1856 goto give_sigsegv;
1859 target_setup_general_frame(frame, env, set);
1860 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
1861 target_setup_end_record((void *)frame + layout.std_end_ofs);
1862 if (layout.extra_ofs) {
1863 target_setup_extra_record((void *)frame + layout.extra_ofs,
1864 frame_addr + layout.extra_base,
1865 layout.extra_size);
1866 target_setup_end_record((void *)frame + layout.extra_end_ofs);
1868 if (sve_ofs) {
1869 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
1872 /* Set up the stack frame for unwinding. */
1873 fr = (void *)frame + fr_ofs;
1874 __put_user(env->xregs[29], &fr->fp);
1875 __put_user(env->xregs[30], &fr->lr);
1877 if (ka->sa_flags & TARGET_SA_RESTORER) {
1878 return_addr = ka->sa_restorer;
1879 } else {
1881 * mov x8,#__NR_rt_sigreturn; svc #0
1882 * Since these are instructions they need to be put as little-endian
1883 * regardless of target default or current CPU endianness.
1885 __put_user_e(0xd2801168, &fr->tramp[0], le);
1886 __put_user_e(0xd4000001, &fr->tramp[1], le);
1887 return_addr = frame_addr + fr_ofs
1888 + offsetof(struct target_rt_frame_record, tramp);
1890 env->xregs[0] = usig;
1891 env->xregs[31] = frame_addr;
1892 env->xregs[29] = frame_addr + fr_ofs;
1893 env->pc = ka->_sa_handler;
1894 env->xregs[30] = return_addr;
1895 if (info) {
1896 tswap_siginfo(&frame->info, info);
1897 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1898 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1901 unlock_user_struct(frame, frame_addr, 1);
1902 return;
1904 give_sigsegv:
1905 unlock_user_struct(frame, frame_addr, 1);
1906 force_sigsegv(usig);
1909 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1910 target_siginfo_t *info, target_sigset_t *set,
1911 CPUARMState *env)
1913 target_setup_frame(sig, ka, info, set, env);
1916 static void setup_frame(int sig, struct target_sigaction *ka,
1917 target_sigset_t *set, CPUARMState *env)
1919 target_setup_frame(sig, ka, 0, set, env);
1922 long do_rt_sigreturn(CPUARMState *env)
1924 struct target_rt_sigframe *frame = NULL;
1925 abi_ulong frame_addr = env->xregs[31];
1927 trace_user_do_rt_sigreturn(env, frame_addr);
1928 if (frame_addr & 15) {
1929 goto badframe;
1932 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1933 goto badframe;
1936 if (target_restore_sigframe(env, frame)) {
1937 goto badframe;
1940 if (do_sigaltstack(frame_addr +
1941 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1942 0, get_sp_from_cpustate(env)) == -EFAULT) {
1943 goto badframe;
1946 unlock_user_struct(frame, frame_addr, 0);
1947 return -TARGET_QEMU_ESIGRETURN;
1949 badframe:
1950 unlock_user_struct(frame, frame_addr, 0);
1951 force_sig(TARGET_SIGSEGV);
1952 return -TARGET_QEMU_ESIGRETURN;
1955 long do_sigreturn(CPUARMState *env)
1957 return do_rt_sigreturn(env);
1960 #elif defined(TARGET_ARM)
1962 struct target_sigcontext {
1963 abi_ulong trap_no;
1964 abi_ulong error_code;
1965 abi_ulong oldmask;
1966 abi_ulong arm_r0;
1967 abi_ulong arm_r1;
1968 abi_ulong arm_r2;
1969 abi_ulong arm_r3;
1970 abi_ulong arm_r4;
1971 abi_ulong arm_r5;
1972 abi_ulong arm_r6;
1973 abi_ulong arm_r7;
1974 abi_ulong arm_r8;
1975 abi_ulong arm_r9;
1976 abi_ulong arm_r10;
1977 abi_ulong arm_fp;
1978 abi_ulong arm_ip;
1979 abi_ulong arm_sp;
1980 abi_ulong arm_lr;
1981 abi_ulong arm_pc;
1982 abi_ulong arm_cpsr;
1983 abi_ulong fault_address;
1986 struct target_ucontext_v1 {
1987 abi_ulong tuc_flags;
1988 abi_ulong tuc_link;
1989 target_stack_t tuc_stack;
1990 struct target_sigcontext tuc_mcontext;
1991 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1994 struct target_ucontext_v2 {
1995 abi_ulong tuc_flags;
1996 abi_ulong tuc_link;
1997 target_stack_t tuc_stack;
1998 struct target_sigcontext tuc_mcontext;
1999 target_sigset_t tuc_sigmask; /* mask last for extensibility */
2000 char __unused[128 - sizeof(target_sigset_t)];
2001 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
2004 struct target_user_vfp {
2005 uint64_t fpregs[32];
2006 abi_ulong fpscr;
2009 struct target_user_vfp_exc {
2010 abi_ulong fpexc;
2011 abi_ulong fpinst;
2012 abi_ulong fpinst2;
2015 struct target_vfp_sigframe {
2016 abi_ulong magic;
2017 abi_ulong size;
2018 struct target_user_vfp ufp;
2019 struct target_user_vfp_exc ufp_exc;
2020 } __attribute__((__aligned__(8)));
2022 struct target_iwmmxt_sigframe {
2023 abi_ulong magic;
2024 abi_ulong size;
2025 uint64_t regs[16];
2026 /* Note that not all the coprocessor control registers are stored here */
2027 uint32_t wcssf;
2028 uint32_t wcasf;
2029 uint32_t wcgr0;
2030 uint32_t wcgr1;
2031 uint32_t wcgr2;
2032 uint32_t wcgr3;
2033 } __attribute__((__aligned__(8)));
2035 #define TARGET_VFP_MAGIC 0x56465001
2036 #define TARGET_IWMMXT_MAGIC 0x12ef842a
2038 struct sigframe_v1
2040 struct target_sigcontext sc;
2041 abi_ulong extramask[TARGET_NSIG_WORDS-1];
2042 abi_ulong retcode;
2045 struct sigframe_v2
2047 struct target_ucontext_v2 uc;
2048 abi_ulong retcode;
2051 struct rt_sigframe_v1
2053 abi_ulong pinfo;
2054 abi_ulong puc;
2055 struct target_siginfo info;
2056 struct target_ucontext_v1 uc;
2057 abi_ulong retcode;
2060 struct rt_sigframe_v2
2062 struct target_siginfo info;
2063 struct target_ucontext_v2 uc;
2064 abi_ulong retcode;
2067 #define TARGET_CONFIG_CPU_32 1
2070 * For ARM syscalls, we encode the syscall number into the instruction.
2072 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
2073 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
2076 * For Thumb syscalls, we pass the syscall number via r7. We therefore
2077 * need two 16-bit instructions.
2079 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
2080 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
2082 static const abi_ulong retcodes[4] = {
2083 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
2084 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
2088 static inline int valid_user_regs(CPUARMState *regs)
2090 return 1;
2093 static void
2094 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2095 CPUARMState *env, abi_ulong mask)
2097 __put_user(env->regs[0], &sc->arm_r0);
2098 __put_user(env->regs[1], &sc->arm_r1);
2099 __put_user(env->regs[2], &sc->arm_r2);
2100 __put_user(env->regs[3], &sc->arm_r3);
2101 __put_user(env->regs[4], &sc->arm_r4);
2102 __put_user(env->regs[5], &sc->arm_r5);
2103 __put_user(env->regs[6], &sc->arm_r6);
2104 __put_user(env->regs[7], &sc->arm_r7);
2105 __put_user(env->regs[8], &sc->arm_r8);
2106 __put_user(env->regs[9], &sc->arm_r9);
2107 __put_user(env->regs[10], &sc->arm_r10);
2108 __put_user(env->regs[11], &sc->arm_fp);
2109 __put_user(env->regs[12], &sc->arm_ip);
2110 __put_user(env->regs[13], &sc->arm_sp);
2111 __put_user(env->regs[14], &sc->arm_lr);
2112 __put_user(env->regs[15], &sc->arm_pc);
2113 #ifdef TARGET_CONFIG_CPU_32
2114 __put_user(cpsr_read(env), &sc->arm_cpsr);
2115 #endif
2117 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
2118 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
2119 __put_user(/* current->thread.address */ 0, &sc->fault_address);
2120 __put_user(mask, &sc->oldmask);
2123 static inline abi_ulong
2124 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
2126 unsigned long sp = regs->regs[13];
2129 * This is the X/Open sanctioned signal stack switching.
2131 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
2132 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2135 * ATPCS B01 mandates 8-byte alignment
2137 return (sp - framesize) & ~7;
2140 static void
2141 setup_return(CPUARMState *env, struct target_sigaction *ka,
2142 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
2144 abi_ulong handler = ka->_sa_handler;
2145 abi_ulong retcode;
2146 int thumb = handler & 1;
2147 uint32_t cpsr = cpsr_read(env);
2149 cpsr &= ~CPSR_IT;
2150 if (thumb) {
2151 cpsr |= CPSR_T;
2152 } else {
2153 cpsr &= ~CPSR_T;
2156 if (ka->sa_flags & TARGET_SA_RESTORER) {
2157 retcode = ka->sa_restorer;
2158 } else {
2159 unsigned int idx = thumb;
2161 if (ka->sa_flags & TARGET_SA_SIGINFO) {
2162 idx += 2;
2165 __put_user(retcodes[idx], rc);
2167 retcode = rc_addr + thumb;
2170 env->regs[0] = usig;
2171 env->regs[13] = frame_addr;
2172 env->regs[14] = retcode;
2173 env->regs[15] = handler & (thumb ? ~1 : ~3);
2174 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
2177 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
2179 int i;
2180 struct target_vfp_sigframe *vfpframe;
2181 vfpframe = (struct target_vfp_sigframe *)regspace;
2182 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
2183 __put_user(sizeof(*vfpframe), &vfpframe->size);
2184 for (i = 0; i < 32; i++) {
2185 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2187 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
2188 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
2189 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2190 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2191 return (abi_ulong*)(vfpframe+1);
2194 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
2195 CPUARMState *env)
2197 int i;
2198 struct target_iwmmxt_sigframe *iwmmxtframe;
2199 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2200 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
2201 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
2202 for (i = 0; i < 16; i++) {
2203 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2205 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2206 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2207 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2208 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2209 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2210 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2211 return (abi_ulong*)(iwmmxtframe+1);
2214 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
2215 target_sigset_t *set, CPUARMState *env)
2217 struct target_sigaltstack stack;
2218 int i;
2219 abi_ulong *regspace;
2221 /* Clear all the bits of the ucontext we don't use. */
2222 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
2224 memset(&stack, 0, sizeof(stack));
2225 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2226 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2227 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2228 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
2230 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
2231 /* Save coprocessor signal frame. */
2232 regspace = uc->tuc_regspace;
2233 if (arm_feature(env, ARM_FEATURE_VFP)) {
2234 regspace = setup_sigframe_v2_vfp(regspace, env);
2236 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2237 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
2240 /* Write terminating magic word */
2241 __put_user(0, regspace);
2243 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2244 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
2248 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
2249 static void setup_frame_v1(int usig, struct target_sigaction *ka,
2250 target_sigset_t *set, CPUARMState *regs)
2252 struct sigframe_v1 *frame;
2253 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2254 int i;
2256 trace_user_setup_frame(regs, frame_addr);
2257 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2258 goto sigsegv;
2261 setup_sigcontext(&frame->sc, regs, set->sig[0]);
2263 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2264 __put_user(set->sig[i], &frame->extramask[i - 1]);
2267 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2268 frame_addr + offsetof(struct sigframe_v1, retcode));
2270 unlock_user_struct(frame, frame_addr, 1);
2271 return;
2272 sigsegv:
2273 force_sigsegv(usig);
2276 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2277 target_sigset_t *set, CPUARMState *regs)
2279 struct sigframe_v2 *frame;
2280 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2282 trace_user_setup_frame(regs, frame_addr);
2283 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2284 goto sigsegv;
2287 setup_sigframe_v2(&frame->uc, set, regs);
2289 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2290 frame_addr + offsetof(struct sigframe_v2, retcode));
2292 unlock_user_struct(frame, frame_addr, 1);
2293 return;
2294 sigsegv:
2295 force_sigsegv(usig);
2298 static void setup_frame(int usig, struct target_sigaction *ka,
2299 target_sigset_t *set, CPUARMState *regs)
2301 if (get_osversion() >= 0x020612) {
2302 setup_frame_v2(usig, ka, set, regs);
2303 } else {
2304 setup_frame_v1(usig, ka, set, regs);
2308 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2309 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2310 target_siginfo_t *info,
2311 target_sigset_t *set, CPUARMState *env)
2313 struct rt_sigframe_v1 *frame;
2314 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2315 struct target_sigaltstack stack;
2316 int i;
2317 abi_ulong info_addr, uc_addr;
2319 trace_user_setup_rt_frame(env, frame_addr);
2320 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2321 goto sigsegv;
2324 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2325 __put_user(info_addr, &frame->pinfo);
2326 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2327 __put_user(uc_addr, &frame->puc);
2328 tswap_siginfo(&frame->info, info);
2330 /* Clear all the bits of the ucontext we don't use. */
2331 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2333 memset(&stack, 0, sizeof(stack));
2334 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2335 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2336 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2337 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2339 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2340 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2341 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2344 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2345 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2347 env->regs[1] = info_addr;
2348 env->regs[2] = uc_addr;
2350 unlock_user_struct(frame, frame_addr, 1);
2351 return;
2352 sigsegv:
2353 force_sigsegv(usig);
2356 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2357 target_siginfo_t *info,
2358 target_sigset_t *set, CPUARMState *env)
2360 struct rt_sigframe_v2 *frame;
2361 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2362 abi_ulong info_addr, uc_addr;
2364 trace_user_setup_rt_frame(env, frame_addr);
2365 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2366 goto sigsegv;
2369 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2370 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2371 tswap_siginfo(&frame->info, info);
2373 setup_sigframe_v2(&frame->uc, set, env);
2375 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2376 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2378 env->regs[1] = info_addr;
2379 env->regs[2] = uc_addr;
2381 unlock_user_struct(frame, frame_addr, 1);
2382 return;
2383 sigsegv:
2384 force_sigsegv(usig);
2387 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2388 target_siginfo_t *info,
2389 target_sigset_t *set, CPUARMState *env)
2391 if (get_osversion() >= 0x020612) {
2392 setup_rt_frame_v2(usig, ka, info, set, env);
2393 } else {
2394 setup_rt_frame_v1(usig, ka, info, set, env);
2398 static int
2399 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2401 int err = 0;
2402 uint32_t cpsr;
2404 __get_user(env->regs[0], &sc->arm_r0);
2405 __get_user(env->regs[1], &sc->arm_r1);
2406 __get_user(env->regs[2], &sc->arm_r2);
2407 __get_user(env->regs[3], &sc->arm_r3);
2408 __get_user(env->regs[4], &sc->arm_r4);
2409 __get_user(env->regs[5], &sc->arm_r5);
2410 __get_user(env->regs[6], &sc->arm_r6);
2411 __get_user(env->regs[7], &sc->arm_r7);
2412 __get_user(env->regs[8], &sc->arm_r8);
2413 __get_user(env->regs[9], &sc->arm_r9);
2414 __get_user(env->regs[10], &sc->arm_r10);
2415 __get_user(env->regs[11], &sc->arm_fp);
2416 __get_user(env->regs[12], &sc->arm_ip);
2417 __get_user(env->regs[13], &sc->arm_sp);
2418 __get_user(env->regs[14], &sc->arm_lr);
2419 __get_user(env->regs[15], &sc->arm_pc);
2420 #ifdef TARGET_CONFIG_CPU_32
2421 __get_user(cpsr, &sc->arm_cpsr);
2422 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2423 #endif
2425 err |= !valid_user_regs(env);
2427 return err;
2430 static long do_sigreturn_v1(CPUARMState *env)
2432 abi_ulong frame_addr;
2433 struct sigframe_v1 *frame = NULL;
2434 target_sigset_t set;
2435 sigset_t host_set;
2436 int i;
2439 * Since we stacked the signal on a 64-bit boundary,
2440 * then 'sp' should be word aligned here. If it's
2441 * not, then the user is trying to mess with us.
2443 frame_addr = env->regs[13];
2444 trace_user_do_sigreturn(env, frame_addr);
2445 if (frame_addr & 7) {
2446 goto badframe;
2449 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2450 goto badframe;
2453 __get_user(set.sig[0], &frame->sc.oldmask);
2454 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2455 __get_user(set.sig[i], &frame->extramask[i - 1]);
2458 target_to_host_sigset_internal(&host_set, &set);
2459 set_sigmask(&host_set);
2461 if (restore_sigcontext(env, &frame->sc)) {
2462 goto badframe;
2465 #if 0
2466 /* Send SIGTRAP if we're single-stepping */
2467 if (ptrace_cancel_bpt(current))
2468 send_sig(SIGTRAP, current, 1);
2469 #endif
2470 unlock_user_struct(frame, frame_addr, 0);
2471 return -TARGET_QEMU_ESIGRETURN;
2473 badframe:
2474 force_sig(TARGET_SIGSEGV);
2475 return -TARGET_QEMU_ESIGRETURN;
2478 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2480 int i;
2481 abi_ulong magic, sz;
2482 uint32_t fpscr, fpexc;
2483 struct target_vfp_sigframe *vfpframe;
2484 vfpframe = (struct target_vfp_sigframe *)regspace;
2486 __get_user(magic, &vfpframe->magic);
2487 __get_user(sz, &vfpframe->size);
2488 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2489 return 0;
2491 for (i = 0; i < 32; i++) {
2492 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2494 __get_user(fpscr, &vfpframe->ufp.fpscr);
2495 vfp_set_fpscr(env, fpscr);
2496 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2497 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2498 * and the exception flag is cleared
2500 fpexc |= (1 << 30);
2501 fpexc &= ~((1 << 31) | (1 << 28));
2502 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2503 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2504 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2505 return (abi_ulong*)(vfpframe + 1);
2508 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2509 abi_ulong *regspace)
2511 int i;
2512 abi_ulong magic, sz;
2513 struct target_iwmmxt_sigframe *iwmmxtframe;
2514 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2516 __get_user(magic, &iwmmxtframe->magic);
2517 __get_user(sz, &iwmmxtframe->size);
2518 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2519 return 0;
2521 for (i = 0; i < 16; i++) {
2522 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2524 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2525 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2526 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2527 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2528 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2529 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2530 return (abi_ulong*)(iwmmxtframe + 1);
2533 static int do_sigframe_return_v2(CPUARMState *env,
2534 target_ulong context_addr,
2535 struct target_ucontext_v2 *uc)
2537 sigset_t host_set;
2538 abi_ulong *regspace;
2540 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2541 set_sigmask(&host_set);
2543 if (restore_sigcontext(env, &uc->tuc_mcontext))
2544 return 1;
2546 /* Restore coprocessor signal frame */
2547 regspace = uc->tuc_regspace;
2548 if (arm_feature(env, ARM_FEATURE_VFP)) {
2549 regspace = restore_sigframe_v2_vfp(env, regspace);
2550 if (!regspace) {
2551 return 1;
2554 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2555 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2556 if (!regspace) {
2557 return 1;
2561 if (do_sigaltstack(context_addr
2562 + offsetof(struct target_ucontext_v2, tuc_stack),
2563 0, get_sp_from_cpustate(env)) == -EFAULT) {
2564 return 1;
2567 #if 0
2568 /* Send SIGTRAP if we're single-stepping */
2569 if (ptrace_cancel_bpt(current))
2570 send_sig(SIGTRAP, current, 1);
2571 #endif
2573 return 0;
2576 static long do_sigreturn_v2(CPUARMState *env)
2578 abi_ulong frame_addr;
2579 struct sigframe_v2 *frame = NULL;
2582 * Since we stacked the signal on a 64-bit boundary,
2583 * then 'sp' should be word aligned here. If it's
2584 * not, then the user is trying to mess with us.
2586 frame_addr = env->regs[13];
2587 trace_user_do_sigreturn(env, frame_addr);
2588 if (frame_addr & 7) {
2589 goto badframe;
2592 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2593 goto badframe;
2596 if (do_sigframe_return_v2(env,
2597 frame_addr
2598 + offsetof(struct sigframe_v2, uc),
2599 &frame->uc)) {
2600 goto badframe;
2603 unlock_user_struct(frame, frame_addr, 0);
2604 return -TARGET_QEMU_ESIGRETURN;
2606 badframe:
2607 unlock_user_struct(frame, frame_addr, 0);
2608 force_sig(TARGET_SIGSEGV);
2609 return -TARGET_QEMU_ESIGRETURN;
2612 long do_sigreturn(CPUARMState *env)
2614 if (get_osversion() >= 0x020612) {
2615 return do_sigreturn_v2(env);
2616 } else {
2617 return do_sigreturn_v1(env);
2621 static long do_rt_sigreturn_v1(CPUARMState *env)
2623 abi_ulong frame_addr;
2624 struct rt_sigframe_v1 *frame = NULL;
2625 sigset_t host_set;
2628 * Since we stacked the signal on a 64-bit boundary,
2629 * then 'sp' should be word aligned here. If it's
2630 * not, then the user is trying to mess with us.
2632 frame_addr = env->regs[13];
2633 trace_user_do_rt_sigreturn(env, frame_addr);
2634 if (frame_addr & 7) {
2635 goto badframe;
2638 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2639 goto badframe;
2642 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2643 set_sigmask(&host_set);
2645 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2646 goto badframe;
2649 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2650 goto badframe;
2652 #if 0
2653 /* Send SIGTRAP if we're single-stepping */
2654 if (ptrace_cancel_bpt(current))
2655 send_sig(SIGTRAP, current, 1);
2656 #endif
2657 unlock_user_struct(frame, frame_addr, 0);
2658 return -TARGET_QEMU_ESIGRETURN;
2660 badframe:
2661 unlock_user_struct(frame, frame_addr, 0);
2662 force_sig(TARGET_SIGSEGV);
2663 return -TARGET_QEMU_ESIGRETURN;
2666 static long do_rt_sigreturn_v2(CPUARMState *env)
2668 abi_ulong frame_addr;
2669 struct rt_sigframe_v2 *frame = NULL;
2672 * Since we stacked the signal on a 64-bit boundary,
2673 * then 'sp' should be word aligned here. If it's
2674 * not, then the user is trying to mess with us.
2676 frame_addr = env->regs[13];
2677 trace_user_do_rt_sigreturn(env, frame_addr);
2678 if (frame_addr & 7) {
2679 goto badframe;
2682 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2683 goto badframe;
2686 if (do_sigframe_return_v2(env,
2687 frame_addr
2688 + offsetof(struct rt_sigframe_v2, uc),
2689 &frame->uc)) {
2690 goto badframe;
2693 unlock_user_struct(frame, frame_addr, 0);
2694 return -TARGET_QEMU_ESIGRETURN;
2696 badframe:
2697 unlock_user_struct(frame, frame_addr, 0);
2698 force_sig(TARGET_SIGSEGV);
2699 return -TARGET_QEMU_ESIGRETURN;
2702 long do_rt_sigreturn(CPUARMState *env)
2704 if (get_osversion() >= 0x020612) {
2705 return do_rt_sigreturn_v2(env);
2706 } else {
2707 return do_rt_sigreturn_v1(env);
2711 #elif defined(TARGET_SPARC)
2713 #define __SUNOS_MAXWIN 31
2715 /* This is what SunOS does, so shall I. */
2716 struct target_sigcontext {
2717 abi_ulong sigc_onstack; /* state to restore */
2719 abi_ulong sigc_mask; /* sigmask to restore */
2720 abi_ulong sigc_sp; /* stack pointer */
2721 abi_ulong sigc_pc; /* program counter */
2722 abi_ulong sigc_npc; /* next program counter */
2723 abi_ulong sigc_psr; /* for condition codes etc */
2724 abi_ulong sigc_g1; /* User uses these two registers */
2725 abi_ulong sigc_o0; /* within the trampoline code. */
2727 /* Now comes information regarding the users window set
2728 * at the time of the signal.
2730 abi_ulong sigc_oswins; /* outstanding windows */
2732 /* stack ptrs for each regwin buf */
2733 char *sigc_spbuf[__SUNOS_MAXWIN];
2735 /* Windows to restore after signal */
2736 struct {
2737 abi_ulong locals[8];
2738 abi_ulong ins[8];
2739 } sigc_wbuf[__SUNOS_MAXWIN];
2741 /* A Sparc stack frame */
2742 struct sparc_stackf {
2743 abi_ulong locals[8];
2744 abi_ulong ins[8];
2745 /* It's simpler to treat fp and callers_pc as elements of ins[]
2746 * since we never need to access them ourselves.
2748 char *structptr;
2749 abi_ulong xargs[6];
2750 abi_ulong xxargs[1];
2753 typedef struct {
2754 struct {
2755 abi_ulong psr;
2756 abi_ulong pc;
2757 abi_ulong npc;
2758 abi_ulong y;
2759 abi_ulong u_regs[16]; /* globals and ins */
2760 } si_regs;
2761 int si_mask;
2762 } __siginfo_t;
2764 typedef struct {
2765 abi_ulong si_float_regs[32];
2766 unsigned long si_fsr;
2767 unsigned long si_fpqdepth;
2768 struct {
2769 unsigned long *insn_addr;
2770 unsigned long insn;
2771 } si_fpqueue [16];
2772 } qemu_siginfo_fpu_t;
2775 struct target_signal_frame {
2776 struct sparc_stackf ss;
2777 __siginfo_t info;
2778 abi_ulong fpu_save;
2779 abi_ulong insns[2] __attribute__ ((aligned (8)));
2780 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2781 abi_ulong extra_size; /* Should be 0 */
2782 qemu_siginfo_fpu_t fpu_state;
2784 struct target_rt_signal_frame {
2785 struct sparc_stackf ss;
2786 siginfo_t info;
2787 abi_ulong regs[20];
2788 sigset_t mask;
2789 abi_ulong fpu_save;
2790 unsigned int insns[2];
2791 stack_t stack;
2792 unsigned int extra_size; /* Should be 0 */
2793 qemu_siginfo_fpu_t fpu_state;
2796 #define UREG_O0 16
2797 #define UREG_O6 22
2798 #define UREG_I0 0
2799 #define UREG_I1 1
2800 #define UREG_I2 2
2801 #define UREG_I3 3
2802 #define UREG_I4 4
2803 #define UREG_I5 5
2804 #define UREG_I6 6
2805 #define UREG_I7 7
2806 #define UREG_L0 8
2807 #define UREG_FP UREG_I6
2808 #define UREG_SP UREG_O6
2810 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2811 CPUSPARCState *env,
2812 unsigned long framesize)
2814 abi_ulong sp;
2816 sp = env->regwptr[UREG_FP];
2818 /* This is the X/Open sanctioned signal stack switching. */
2819 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2820 if (!on_sig_stack(sp)
2821 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2822 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2825 return sp - framesize;
2828 static int
2829 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2831 int err = 0, i;
2833 __put_user(env->psr, &si->si_regs.psr);
2834 __put_user(env->pc, &si->si_regs.pc);
2835 __put_user(env->npc, &si->si_regs.npc);
2836 __put_user(env->y, &si->si_regs.y);
2837 for (i=0; i < 8; i++) {
2838 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2840 for (i=0; i < 8; i++) {
2841 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2843 __put_user(mask, &si->si_mask);
2844 return err;
2847 #if 0
2848 static int
2849 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2850 CPUSPARCState *env, unsigned long mask)
2852 int err = 0;
2854 __put_user(mask, &sc->sigc_mask);
2855 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2856 __put_user(env->pc, &sc->sigc_pc);
2857 __put_user(env->npc, &sc->sigc_npc);
2858 __put_user(env->psr, &sc->sigc_psr);
2859 __put_user(env->gregs[1], &sc->sigc_g1);
2860 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2862 return err;
2864 #endif
2865 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2867 static void setup_frame(int sig, struct target_sigaction *ka,
2868 target_sigset_t *set, CPUSPARCState *env)
2870 abi_ulong sf_addr;
2871 struct target_signal_frame *sf;
2872 int sigframe_size, err, i;
2874 /* 1. Make sure everything is clean */
2875 //synchronize_user_stack();
2877 sigframe_size = NF_ALIGNEDSZ;
2878 sf_addr = get_sigframe(ka, env, sigframe_size);
2879 trace_user_setup_frame(env, sf_addr);
2881 sf = lock_user(VERIFY_WRITE, sf_addr,
2882 sizeof(struct target_signal_frame), 0);
2883 if (!sf) {
2884 goto sigsegv;
2886 #if 0
2887 if (invalid_frame_pointer(sf, sigframe_size))
2888 goto sigill_and_return;
2889 #endif
2890 /* 2. Save the current process state */
2891 err = setup___siginfo(&sf->info, env, set->sig[0]);
2892 __put_user(0, &sf->extra_size);
2894 //save_fpu_state(regs, &sf->fpu_state);
2895 //__put_user(&sf->fpu_state, &sf->fpu_save);
2897 __put_user(set->sig[0], &sf->info.si_mask);
2898 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2899 __put_user(set->sig[i + 1], &sf->extramask[i]);
2902 for (i = 0; i < 8; i++) {
2903 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2905 for (i = 0; i < 8; i++) {
2906 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2908 if (err)
2909 goto sigsegv;
2911 /* 3. signal handler back-trampoline and parameters */
2912 env->regwptr[UREG_FP] = sf_addr;
2913 env->regwptr[UREG_I0] = sig;
2914 env->regwptr[UREG_I1] = sf_addr +
2915 offsetof(struct target_signal_frame, info);
2916 env->regwptr[UREG_I2] = sf_addr +
2917 offsetof(struct target_signal_frame, info);
2919 /* 4. signal handler */
2920 env->pc = ka->_sa_handler;
2921 env->npc = (env->pc + 4);
2922 /* 5. return to kernel instructions */
2923 if (ka->sa_restorer) {
2924 env->regwptr[UREG_I7] = ka->sa_restorer;
2925 } else {
2926 uint32_t val32;
2928 env->regwptr[UREG_I7] = sf_addr +
2929 offsetof(struct target_signal_frame, insns) - 2 * 4;
2931 /* mov __NR_sigreturn, %g1 */
2932 val32 = 0x821020d8;
2933 __put_user(val32, &sf->insns[0]);
2935 /* t 0x10 */
2936 val32 = 0x91d02010;
2937 __put_user(val32, &sf->insns[1]);
2938 if (err)
2939 goto sigsegv;
2941 /* Flush instruction space. */
2942 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2943 // tb_flush(env);
2945 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2946 return;
2947 #if 0
2948 sigill_and_return:
2949 force_sig(TARGET_SIGILL);
2950 #endif
2951 sigsegv:
2952 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2953 force_sigsegv(sig);
2956 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2957 target_siginfo_t *info,
2958 target_sigset_t *set, CPUSPARCState *env)
2960 fprintf(stderr, "setup_rt_frame: not implemented\n");
2963 long do_sigreturn(CPUSPARCState *env)
2965 abi_ulong sf_addr;
2966 struct target_signal_frame *sf;
2967 uint32_t up_psr, pc, npc;
2968 target_sigset_t set;
2969 sigset_t host_set;
2970 int err=0, i;
2972 sf_addr = env->regwptr[UREG_FP];
2973 trace_user_do_sigreturn(env, sf_addr);
2974 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2975 goto segv_and_exit;
2978 /* 1. Make sure we are not getting garbage from the user */
2980 if (sf_addr & 3)
2981 goto segv_and_exit;
2983 __get_user(pc, &sf->info.si_regs.pc);
2984 __get_user(npc, &sf->info.si_regs.npc);
2986 if ((pc | npc) & 3) {
2987 goto segv_and_exit;
2990 /* 2. Restore the state */
2991 __get_user(up_psr, &sf->info.si_regs.psr);
2993 /* User can only change condition codes and FPU enabling in %psr. */
2994 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2995 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2997 env->pc = pc;
2998 env->npc = npc;
2999 __get_user(env->y, &sf->info.si_regs.y);
3000 for (i=0; i < 8; i++) {
3001 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
3003 for (i=0; i < 8; i++) {
3004 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
3007 /* FIXME: implement FPU save/restore:
3008 * __get_user(fpu_save, &sf->fpu_save);
3009 * if (fpu_save)
3010 * err |= restore_fpu_state(env, fpu_save);
3013 /* This is pretty much atomic, no amount locking would prevent
3014 * the races which exist anyways.
3016 __get_user(set.sig[0], &sf->info.si_mask);
3017 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3018 __get_user(set.sig[i], &sf->extramask[i - 1]);
3021 target_to_host_sigset_internal(&host_set, &set);
3022 set_sigmask(&host_set);
3024 if (err) {
3025 goto segv_and_exit;
3027 unlock_user_struct(sf, sf_addr, 0);
3028 return -TARGET_QEMU_ESIGRETURN;
3030 segv_and_exit:
3031 unlock_user_struct(sf, sf_addr, 0);
3032 force_sig(TARGET_SIGSEGV);
3033 return -TARGET_QEMU_ESIGRETURN;
3036 long do_rt_sigreturn(CPUSPARCState *env)
3038 trace_user_do_rt_sigreturn(env, 0);
3039 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
3040 return -TARGET_ENOSYS;
3043 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
3044 #define SPARC_MC_TSTATE 0
3045 #define SPARC_MC_PC 1
3046 #define SPARC_MC_NPC 2
3047 #define SPARC_MC_Y 3
3048 #define SPARC_MC_G1 4
3049 #define SPARC_MC_G2 5
3050 #define SPARC_MC_G3 6
3051 #define SPARC_MC_G4 7
3052 #define SPARC_MC_G5 8
3053 #define SPARC_MC_G6 9
3054 #define SPARC_MC_G7 10
3055 #define SPARC_MC_O0 11
3056 #define SPARC_MC_O1 12
3057 #define SPARC_MC_O2 13
3058 #define SPARC_MC_O3 14
3059 #define SPARC_MC_O4 15
3060 #define SPARC_MC_O5 16
3061 #define SPARC_MC_O6 17
3062 #define SPARC_MC_O7 18
3063 #define SPARC_MC_NGREG 19
3065 typedef abi_ulong target_mc_greg_t;
3066 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
3068 struct target_mc_fq {
3069 abi_ulong *mcfq_addr;
3070 uint32_t mcfq_insn;
3073 struct target_mc_fpu {
3074 union {
3075 uint32_t sregs[32];
3076 uint64_t dregs[32];
3077 //uint128_t qregs[16];
3078 } mcfpu_fregs;
3079 abi_ulong mcfpu_fsr;
3080 abi_ulong mcfpu_fprs;
3081 abi_ulong mcfpu_gsr;
3082 struct target_mc_fq *mcfpu_fq;
3083 unsigned char mcfpu_qcnt;
3084 unsigned char mcfpu_qentsz;
3085 unsigned char mcfpu_enab;
3087 typedef struct target_mc_fpu target_mc_fpu_t;
3089 typedef struct {
3090 target_mc_gregset_t mc_gregs;
3091 target_mc_greg_t mc_fp;
3092 target_mc_greg_t mc_i7;
3093 target_mc_fpu_t mc_fpregs;
3094 } target_mcontext_t;
3096 struct target_ucontext {
3097 struct target_ucontext *tuc_link;
3098 abi_ulong tuc_flags;
3099 target_sigset_t tuc_sigmask;
3100 target_mcontext_t tuc_mcontext;
3103 /* A V9 register window */
3104 struct target_reg_window {
3105 abi_ulong locals[8];
3106 abi_ulong ins[8];
3109 #define TARGET_STACK_BIAS 2047
3111 /* {set, get}context() needed for 64-bit SparcLinux userland. */
3112 void sparc64_set_context(CPUSPARCState *env)
3114 abi_ulong ucp_addr;
3115 struct target_ucontext *ucp;
3116 target_mc_gregset_t *grp;
3117 abi_ulong pc, npc, tstate;
3118 abi_ulong fp, i7, w_addr;
3119 unsigned int i;
3121 ucp_addr = env->regwptr[UREG_I0];
3122 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
3123 goto do_sigsegv;
3125 grp = &ucp->tuc_mcontext.mc_gregs;
3126 __get_user(pc, &((*grp)[SPARC_MC_PC]));
3127 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
3128 if ((pc | npc) & 3) {
3129 goto do_sigsegv;
3131 if (env->regwptr[UREG_I1]) {
3132 target_sigset_t target_set;
3133 sigset_t set;
3135 if (TARGET_NSIG_WORDS == 1) {
3136 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
3137 } else {
3138 abi_ulong *src, *dst;
3139 src = ucp->tuc_sigmask.sig;
3140 dst = target_set.sig;
3141 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3142 __get_user(*dst, src);
3145 target_to_host_sigset_internal(&set, &target_set);
3146 set_sigmask(&set);
3148 env->pc = pc;
3149 env->npc = npc;
3150 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
3151 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
3152 env->asi = (tstate >> 24) & 0xff;
3153 cpu_put_ccr(env, tstate >> 32);
3154 cpu_put_cwp64(env, tstate & 0x1f);
3155 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
3156 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
3157 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
3158 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
3159 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
3160 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
3161 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
3162 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
3163 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
3164 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
3165 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
3166 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
3167 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
3168 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
3169 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
3171 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
3172 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
3174 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3175 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3176 abi_ulong) != 0) {
3177 goto do_sigsegv;
3179 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3180 abi_ulong) != 0) {
3181 goto do_sigsegv;
3183 /* FIXME this does not match how the kernel handles the FPU in
3184 * its sparc64_set_context implementation. In particular the FPU
3185 * is only restored if fenab is non-zero in:
3186 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
3188 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
3190 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3191 for (i = 0; i < 64; i++, src++) {
3192 if (i & 1) {
3193 __get_user(env->fpr[i/2].l.lower, src);
3194 } else {
3195 __get_user(env->fpr[i/2].l.upper, src);
3199 __get_user(env->fsr,
3200 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
3201 __get_user(env->gsr,
3202 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
3203 unlock_user_struct(ucp, ucp_addr, 0);
3204 return;
3205 do_sigsegv:
3206 unlock_user_struct(ucp, ucp_addr, 0);
3207 force_sig(TARGET_SIGSEGV);
3210 void sparc64_get_context(CPUSPARCState *env)
3212 abi_ulong ucp_addr;
3213 struct target_ucontext *ucp;
3214 target_mc_gregset_t *grp;
3215 target_mcontext_t *mcp;
3216 abi_ulong fp, i7, w_addr;
3217 int err;
3218 unsigned int i;
3219 target_sigset_t target_set;
3220 sigset_t set;
3222 ucp_addr = env->regwptr[UREG_I0];
3223 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
3224 goto do_sigsegv;
3227 mcp = &ucp->tuc_mcontext;
3228 grp = &mcp->mc_gregs;
3230 /* Skip over the trap instruction, first. */
3231 env->pc = env->npc;
3232 env->npc += 4;
3234 /* If we're only reading the signal mask then do_sigprocmask()
3235 * is guaranteed not to fail, which is important because we don't
3236 * have any way to signal a failure or restart this operation since
3237 * this is not a normal syscall.
3239 err = do_sigprocmask(0, NULL, &set);
3240 assert(err == 0);
3241 host_to_target_sigset_internal(&target_set, &set);
3242 if (TARGET_NSIG_WORDS == 1) {
3243 __put_user(target_set.sig[0],
3244 (abi_ulong *)&ucp->tuc_sigmask);
3245 } else {
3246 abi_ulong *src, *dst;
3247 src = target_set.sig;
3248 dst = ucp->tuc_sigmask.sig;
3249 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
3250 __put_user(*src, dst);
3252 if (err)
3253 goto do_sigsegv;
3256 /* XXX: tstate must be saved properly */
3257 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
3258 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
3259 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
3260 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
3261 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
3262 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
3263 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
3264 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
3265 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
3266 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
3267 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
3268 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
3269 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
3270 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
3271 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
3272 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
3273 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
3274 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
3275 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
3277 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3278 fp = i7 = 0;
3279 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3280 abi_ulong) != 0) {
3281 goto do_sigsegv;
3283 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3284 abi_ulong) != 0) {
3285 goto do_sigsegv;
3287 __put_user(fp, &(mcp->mc_fp));
3288 __put_user(i7, &(mcp->mc_i7));
3291 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3292 for (i = 0; i < 64; i++, dst++) {
3293 if (i & 1) {
3294 __put_user(env->fpr[i/2].l.lower, dst);
3295 } else {
3296 __put_user(env->fpr[i/2].l.upper, dst);
3300 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3301 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3302 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3304 if (err)
3305 goto do_sigsegv;
3306 unlock_user_struct(ucp, ucp_addr, 1);
3307 return;
3308 do_sigsegv:
3309 unlock_user_struct(ucp, ucp_addr, 1);
3310 force_sig(TARGET_SIGSEGV);
3312 #endif
3313 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3315 # if defined(TARGET_ABI_MIPSO32)
3316 struct target_sigcontext {
3317 uint32_t sc_regmask; /* Unused */
3318 uint32_t sc_status;
3319 uint64_t sc_pc;
3320 uint64_t sc_regs[32];
3321 uint64_t sc_fpregs[32];
3322 uint32_t sc_ownedfp; /* Unused */
3323 uint32_t sc_fpc_csr;
3324 uint32_t sc_fpc_eir; /* Unused */
3325 uint32_t sc_used_math;
3326 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3327 uint32_t pad0;
3328 uint64_t sc_mdhi;
3329 uint64_t sc_mdlo;
3330 target_ulong sc_hi1; /* Was sc_cause */
3331 target_ulong sc_lo1; /* Was sc_badvaddr */
3332 target_ulong sc_hi2; /* Was sc_sigset[4] */
3333 target_ulong sc_lo2;
3334 target_ulong sc_hi3;
3335 target_ulong sc_lo3;
3337 # else /* N32 || N64 */
3338 struct target_sigcontext {
3339 uint64_t sc_regs[32];
3340 uint64_t sc_fpregs[32];
3341 uint64_t sc_mdhi;
3342 uint64_t sc_hi1;
3343 uint64_t sc_hi2;
3344 uint64_t sc_hi3;
3345 uint64_t sc_mdlo;
3346 uint64_t sc_lo1;
3347 uint64_t sc_lo2;
3348 uint64_t sc_lo3;
3349 uint64_t sc_pc;
3350 uint32_t sc_fpc_csr;
3351 uint32_t sc_used_math;
3352 uint32_t sc_dsp;
3353 uint32_t sc_reserved;
3355 # endif /* O32 */
3357 struct sigframe {
3358 uint32_t sf_ass[4]; /* argument save space for o32 */
3359 uint32_t sf_code[2]; /* signal trampoline */
3360 struct target_sigcontext sf_sc;
3361 target_sigset_t sf_mask;
3364 struct target_ucontext {
3365 target_ulong tuc_flags;
3366 target_ulong tuc_link;
3367 target_stack_t tuc_stack;
3368 target_ulong pad0;
3369 struct target_sigcontext tuc_mcontext;
3370 target_sigset_t tuc_sigmask;
3373 struct target_rt_sigframe {
3374 uint32_t rs_ass[4]; /* argument save space for o32 */
3375 uint32_t rs_code[2]; /* signal trampoline */
3376 struct target_siginfo rs_info;
3377 struct target_ucontext rs_uc;
3380 /* Install trampoline to jump back from signal handler */
3381 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3383 int err = 0;
3386 * Set up the return code ...
3388 * li v0, __NR__foo_sigreturn
3389 * syscall
3392 __put_user(0x24020000 + syscall, tramp + 0);
3393 __put_user(0x0000000c , tramp + 1);
3394 return err;
3397 static inline void setup_sigcontext(CPUMIPSState *regs,
3398 struct target_sigcontext *sc)
3400 int i;
3402 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3403 regs->hflags &= ~MIPS_HFLAG_BMASK;
3405 __put_user(0, &sc->sc_regs[0]);
3406 for (i = 1; i < 32; ++i) {
3407 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3410 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3411 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3413 /* Rather than checking for dsp existence, always copy. The storage
3414 would just be garbage otherwise. */
3415 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3416 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3417 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3418 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3419 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3420 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3422 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3423 __put_user(dsp, &sc->sc_dsp);
3426 __put_user(1, &sc->sc_used_math);
3428 for (i = 0; i < 32; ++i) {
3429 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3433 static inline void
3434 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3436 int i;
3438 __get_user(regs->CP0_EPC, &sc->sc_pc);
3440 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3441 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3443 for (i = 1; i < 32; ++i) {
3444 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3447 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3448 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3449 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3450 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3451 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3452 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3454 uint32_t dsp;
3455 __get_user(dsp, &sc->sc_dsp);
3456 cpu_wrdsp(dsp, 0x3ff, regs);
3459 for (i = 0; i < 32; ++i) {
3460 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3465 * Determine which stack to use..
3467 static inline abi_ulong
3468 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3470 unsigned long sp;
3472 /* Default to using normal stack */
3473 sp = regs->active_tc.gpr[29];
3476 * FPU emulator may have its own trampoline active just
3477 * above the user stack, 16-bytes before the next lowest
3478 * 16 byte boundary. Try to avoid trashing it.
3480 sp -= 32;
3482 /* This is the X/Open sanctioned signal stack switching. */
3483 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3484 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3487 return (sp - frame_size) & ~7;
3490 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3492 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3493 env->hflags &= ~MIPS_HFLAG_M16;
3494 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3495 env->active_tc.PC &= ~(target_ulong) 1;
3499 # if defined(TARGET_ABI_MIPSO32)
3500 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3501 static void setup_frame(int sig, struct target_sigaction * ka,
3502 target_sigset_t *set, CPUMIPSState *regs)
3504 struct sigframe *frame;
3505 abi_ulong frame_addr;
3506 int i;
3508 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3509 trace_user_setup_frame(regs, frame_addr);
3510 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3511 goto give_sigsegv;
3514 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3516 setup_sigcontext(regs, &frame->sf_sc);
3518 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3519 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3523 * Arguments to signal handler:
3525 * a0 = signal number
3526 * a1 = 0 (should be cause)
3527 * a2 = pointer to struct sigcontext
3529 * $25 and PC point to the signal handler, $29 points to the
3530 * struct sigframe.
3532 regs->active_tc.gpr[ 4] = sig;
3533 regs->active_tc.gpr[ 5] = 0;
3534 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3535 regs->active_tc.gpr[29] = frame_addr;
3536 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3537 /* The original kernel code sets CP0_EPC to the handler
3538 * since it returns to userland using eret
3539 * we cannot do this here, and we must set PC directly */
3540 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3541 mips_set_hflags_isa_mode_from_pc(regs);
3542 unlock_user_struct(frame, frame_addr, 1);
3543 return;
3545 give_sigsegv:
3546 force_sigsegv(sig);
3549 long do_sigreturn(CPUMIPSState *regs)
3551 struct sigframe *frame;
3552 abi_ulong frame_addr;
3553 sigset_t blocked;
3554 target_sigset_t target_set;
3555 int i;
3557 frame_addr = regs->active_tc.gpr[29];
3558 trace_user_do_sigreturn(regs, frame_addr);
3559 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3560 goto badframe;
3562 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3563 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3566 target_to_host_sigset_internal(&blocked, &target_set);
3567 set_sigmask(&blocked);
3569 restore_sigcontext(regs, &frame->sf_sc);
3571 #if 0
3573 * Don't let your children do this ...
3575 __asm__ __volatile__(
3576 "move\t$29, %0\n\t"
3577 "j\tsyscall_exit"
3578 :/* no outputs */
3579 :"r" (&regs));
3580 /* Unreached */
3581 #endif
3583 regs->active_tc.PC = regs->CP0_EPC;
3584 mips_set_hflags_isa_mode_from_pc(regs);
3585 /* I am not sure this is right, but it seems to work
3586 * maybe a problem with nested signals ? */
3587 regs->CP0_EPC = 0;
3588 return -TARGET_QEMU_ESIGRETURN;
3590 badframe:
3591 force_sig(TARGET_SIGSEGV);
3592 return -TARGET_QEMU_ESIGRETURN;
3594 # endif /* O32 */
3596 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3597 target_siginfo_t *info,
3598 target_sigset_t *set, CPUMIPSState *env)
3600 struct target_rt_sigframe *frame;
3601 abi_ulong frame_addr;
3602 int i;
3604 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3605 trace_user_setup_rt_frame(env, frame_addr);
3606 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3607 goto give_sigsegv;
3610 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3612 tswap_siginfo(&frame->rs_info, info);
3614 __put_user(0, &frame->rs_uc.tuc_flags);
3615 __put_user(0, &frame->rs_uc.tuc_link);
3616 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3617 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3618 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3619 &frame->rs_uc.tuc_stack.ss_flags);
3621 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3623 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3624 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3628 * Arguments to signal handler:
3630 * a0 = signal number
3631 * a1 = pointer to siginfo_t
3632 * a2 = pointer to ucontext_t
3634 * $25 and PC point to the signal handler, $29 points to the
3635 * struct sigframe.
3637 env->active_tc.gpr[ 4] = sig;
3638 env->active_tc.gpr[ 5] = frame_addr
3639 + offsetof(struct target_rt_sigframe, rs_info);
3640 env->active_tc.gpr[ 6] = frame_addr
3641 + offsetof(struct target_rt_sigframe, rs_uc);
3642 env->active_tc.gpr[29] = frame_addr;
3643 env->active_tc.gpr[31] = frame_addr
3644 + offsetof(struct target_rt_sigframe, rs_code);
3645 /* The original kernel code sets CP0_EPC to the handler
3646 * since it returns to userland using eret
3647 * we cannot do this here, and we must set PC directly */
3648 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3649 mips_set_hflags_isa_mode_from_pc(env);
3650 unlock_user_struct(frame, frame_addr, 1);
3651 return;
3653 give_sigsegv:
3654 unlock_user_struct(frame, frame_addr, 1);
3655 force_sigsegv(sig);
3658 long do_rt_sigreturn(CPUMIPSState *env)
3660 struct target_rt_sigframe *frame;
3661 abi_ulong frame_addr;
3662 sigset_t blocked;
3664 frame_addr = env->active_tc.gpr[29];
3665 trace_user_do_rt_sigreturn(env, frame_addr);
3666 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3667 goto badframe;
3670 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3671 set_sigmask(&blocked);
3673 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3675 if (do_sigaltstack(frame_addr +
3676 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3677 0, get_sp_from_cpustate(env)) == -EFAULT)
3678 goto badframe;
3680 env->active_tc.PC = env->CP0_EPC;
3681 mips_set_hflags_isa_mode_from_pc(env);
3682 /* I am not sure this is right, but it seems to work
3683 * maybe a problem with nested signals ? */
3684 env->CP0_EPC = 0;
3685 return -TARGET_QEMU_ESIGRETURN;
3687 badframe:
3688 force_sig(TARGET_SIGSEGV);
3689 return -TARGET_QEMU_ESIGRETURN;
3692 #elif defined(TARGET_SH4)
3695 * code and data structures from linux kernel:
3696 * include/asm-sh/sigcontext.h
3697 * arch/sh/kernel/signal.c
3700 struct target_sigcontext {
3701 target_ulong oldmask;
3703 /* CPU registers */
3704 target_ulong sc_gregs[16];
3705 target_ulong sc_pc;
3706 target_ulong sc_pr;
3707 target_ulong sc_sr;
3708 target_ulong sc_gbr;
3709 target_ulong sc_mach;
3710 target_ulong sc_macl;
3712 /* FPU registers */
3713 target_ulong sc_fpregs[16];
3714 target_ulong sc_xfpregs[16];
3715 unsigned int sc_fpscr;
3716 unsigned int sc_fpul;
3717 unsigned int sc_ownedfp;
3720 struct target_sigframe
3722 struct target_sigcontext sc;
3723 target_ulong extramask[TARGET_NSIG_WORDS-1];
3724 uint16_t retcode[3];
3728 struct target_ucontext {
3729 target_ulong tuc_flags;
3730 struct target_ucontext *tuc_link;
3731 target_stack_t tuc_stack;
3732 struct target_sigcontext tuc_mcontext;
3733 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3736 struct target_rt_sigframe
3738 struct target_siginfo info;
3739 struct target_ucontext uc;
3740 uint16_t retcode[3];
3744 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3745 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3747 static abi_ulong get_sigframe(struct target_sigaction *ka,
3748 unsigned long sp, size_t frame_size)
3750 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3751 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3754 return (sp - frame_size) & -8ul;
3757 /* Notice when we're in the middle of a gUSA region and reset.
3758 Note that this will only occur for !parallel_cpus, as we will
3759 translate such sequences differently in a parallel context. */
3760 static void unwind_gusa(CPUSH4State *regs)
3762 /* If the stack pointer is sufficiently negative, and we haven't
3763 completed the sequence, then reset to the entry to the region. */
3764 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3765 However, the page mappings in qemu linux-user aren't as restricted
3766 and we wind up with the normal stack mapped above 0xF0000000.
3767 That said, there is no reason why the kernel should be allowing
3768 a gUSA region that spans 1GB. Use a tighter check here, for what
3769 can actually be enabled by the immediate move. */
3770 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3771 /* Reset the PC to before the gUSA region, as computed from
3772 R0 = region end, SP = -(region size), plus one more for the
3773 insn that actually initializes SP to the region size. */
3774 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3776 /* Reset the SP to the saved version in R1. */
3777 regs->gregs[15] = regs->gregs[1];
3781 static void setup_sigcontext(struct target_sigcontext *sc,
3782 CPUSH4State *regs, unsigned long mask)
3784 int i;
3786 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3787 COPY(gregs[0]); COPY(gregs[1]);
3788 COPY(gregs[2]); COPY(gregs[3]);
3789 COPY(gregs[4]); COPY(gregs[5]);
3790 COPY(gregs[6]); COPY(gregs[7]);
3791 COPY(gregs[8]); COPY(gregs[9]);
3792 COPY(gregs[10]); COPY(gregs[11]);
3793 COPY(gregs[12]); COPY(gregs[13]);
3794 COPY(gregs[14]); COPY(gregs[15]);
3795 COPY(gbr); COPY(mach);
3796 COPY(macl); COPY(pr);
3797 COPY(sr); COPY(pc);
3798 #undef COPY
3800 for (i=0; i<16; i++) {
3801 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3803 __put_user(regs->fpscr, &sc->sc_fpscr);
3804 __put_user(regs->fpul, &sc->sc_fpul);
3806 /* non-iBCS2 extensions.. */
3807 __put_user(mask, &sc->oldmask);
3810 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3812 int i;
3814 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3815 COPY(gregs[0]); COPY(gregs[1]);
3816 COPY(gregs[2]); COPY(gregs[3]);
3817 COPY(gregs[4]); COPY(gregs[5]);
3818 COPY(gregs[6]); COPY(gregs[7]);
3819 COPY(gregs[8]); COPY(gregs[9]);
3820 COPY(gregs[10]); COPY(gregs[11]);
3821 COPY(gregs[12]); COPY(gregs[13]);
3822 COPY(gregs[14]); COPY(gregs[15]);
3823 COPY(gbr); COPY(mach);
3824 COPY(macl); COPY(pr);
3825 COPY(sr); COPY(pc);
3826 #undef COPY
3828 for (i=0; i<16; i++) {
3829 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3831 __get_user(regs->fpscr, &sc->sc_fpscr);
3832 __get_user(regs->fpul, &sc->sc_fpul);
3834 regs->tra = -1; /* disable syscall checks */
3835 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3838 static void setup_frame(int sig, struct target_sigaction *ka,
3839 target_sigset_t *set, CPUSH4State *regs)
3841 struct target_sigframe *frame;
3842 abi_ulong frame_addr;
3843 int i;
3845 unwind_gusa(regs);
3847 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3848 trace_user_setup_frame(regs, frame_addr);
3849 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3850 goto give_sigsegv;
3853 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3855 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3856 __put_user(set->sig[i + 1], &frame->extramask[i]);
3859 /* Set up to return from userspace. If provided, use a stub
3860 already in userspace. */
3861 if (ka->sa_flags & TARGET_SA_RESTORER) {
3862 regs->pr = (unsigned long) ka->sa_restorer;
3863 } else {
3864 /* Generate return code (system call to sigreturn) */
3865 abi_ulong retcode_addr = frame_addr +
3866 offsetof(struct target_sigframe, retcode);
3867 __put_user(MOVW(2), &frame->retcode[0]);
3868 __put_user(TRAP_NOARG, &frame->retcode[1]);
3869 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3870 regs->pr = (unsigned long) retcode_addr;
3873 /* Set up registers for signal handler */
3874 regs->gregs[15] = frame_addr;
3875 regs->gregs[4] = sig; /* Arg for signal handler */
3876 regs->gregs[5] = 0;
3877 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3878 regs->pc = (unsigned long) ka->_sa_handler;
3879 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3881 unlock_user_struct(frame, frame_addr, 1);
3882 return;
3884 give_sigsegv:
3885 unlock_user_struct(frame, frame_addr, 1);
3886 force_sigsegv(sig);
3889 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3890 target_siginfo_t *info,
3891 target_sigset_t *set, CPUSH4State *regs)
3893 struct target_rt_sigframe *frame;
3894 abi_ulong frame_addr;
3895 int i;
3897 unwind_gusa(regs);
3899 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3900 trace_user_setup_rt_frame(regs, frame_addr);
3901 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3902 goto give_sigsegv;
3905 tswap_siginfo(&frame->info, info);
3907 /* Create the ucontext. */
3908 __put_user(0, &frame->uc.tuc_flags);
3909 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3910 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3911 &frame->uc.tuc_stack.ss_sp);
3912 __put_user(sas_ss_flags(regs->gregs[15]),
3913 &frame->uc.tuc_stack.ss_flags);
3914 __put_user(target_sigaltstack_used.ss_size,
3915 &frame->uc.tuc_stack.ss_size);
3916 setup_sigcontext(&frame->uc.tuc_mcontext,
3917 regs, set->sig[0]);
3918 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3919 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3922 /* Set up to return from userspace. If provided, use a stub
3923 already in userspace. */
3924 if (ka->sa_flags & TARGET_SA_RESTORER) {
3925 regs->pr = (unsigned long) ka->sa_restorer;
3926 } else {
3927 /* Generate return code (system call to sigreturn) */
3928 abi_ulong retcode_addr = frame_addr +
3929 offsetof(struct target_rt_sigframe, retcode);
3930 __put_user(MOVW(2), &frame->retcode[0]);
3931 __put_user(TRAP_NOARG, &frame->retcode[1]);
3932 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3933 regs->pr = (unsigned long) retcode_addr;
3936 /* Set up registers for signal handler */
3937 regs->gregs[15] = frame_addr;
3938 regs->gregs[4] = sig; /* Arg for signal handler */
3939 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3940 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3941 regs->pc = (unsigned long) ka->_sa_handler;
3942 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3944 unlock_user_struct(frame, frame_addr, 1);
3945 return;
3947 give_sigsegv:
3948 unlock_user_struct(frame, frame_addr, 1);
3949 force_sigsegv(sig);
3952 long do_sigreturn(CPUSH4State *regs)
3954 struct target_sigframe *frame;
3955 abi_ulong frame_addr;
3956 sigset_t blocked;
3957 target_sigset_t target_set;
3958 int i;
3959 int err = 0;
3961 frame_addr = regs->gregs[15];
3962 trace_user_do_sigreturn(regs, frame_addr);
3963 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3964 goto badframe;
3967 __get_user(target_set.sig[0], &frame->sc.oldmask);
3968 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3969 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3972 if (err)
3973 goto badframe;
3975 target_to_host_sigset_internal(&blocked, &target_set);
3976 set_sigmask(&blocked);
3978 restore_sigcontext(regs, &frame->sc);
3980 unlock_user_struct(frame, frame_addr, 0);
3981 return -TARGET_QEMU_ESIGRETURN;
3983 badframe:
3984 unlock_user_struct(frame, frame_addr, 0);
3985 force_sig(TARGET_SIGSEGV);
3986 return -TARGET_QEMU_ESIGRETURN;
3989 long do_rt_sigreturn(CPUSH4State *regs)
3991 struct target_rt_sigframe *frame;
3992 abi_ulong frame_addr;
3993 sigset_t blocked;
3995 frame_addr = regs->gregs[15];
3996 trace_user_do_rt_sigreturn(regs, frame_addr);
3997 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3998 goto badframe;
4001 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
4002 set_sigmask(&blocked);
4004 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
4006 if (do_sigaltstack(frame_addr +
4007 offsetof(struct target_rt_sigframe, uc.tuc_stack),
4008 0, get_sp_from_cpustate(regs)) == -EFAULT) {
4009 goto badframe;
4012 unlock_user_struct(frame, frame_addr, 0);
4013 return -TARGET_QEMU_ESIGRETURN;
4015 badframe:
4016 unlock_user_struct(frame, frame_addr, 0);
4017 force_sig(TARGET_SIGSEGV);
4018 return -TARGET_QEMU_ESIGRETURN;
4020 #elif defined(TARGET_MICROBLAZE)
4022 struct target_sigcontext {
4023 struct target_pt_regs regs; /* needs to be first */
4024 uint32_t oldmask;
4027 struct target_stack_t {
4028 abi_ulong ss_sp;
4029 int ss_flags;
4030 unsigned int ss_size;
4033 struct target_ucontext {
4034 abi_ulong tuc_flags;
4035 abi_ulong tuc_link;
4036 struct target_stack_t tuc_stack;
4037 struct target_sigcontext tuc_mcontext;
4038 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
4041 /* Signal frames. */
4042 struct target_signal_frame {
4043 struct target_ucontext uc;
4044 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4045 uint32_t tramp[2];
4048 struct rt_signal_frame {
4049 siginfo_t info;
4050 ucontext_t uc;
4051 uint32_t tramp[2];
4054 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4056 __put_user(env->regs[0], &sc->regs.r0);
4057 __put_user(env->regs[1], &sc->regs.r1);
4058 __put_user(env->regs[2], &sc->regs.r2);
4059 __put_user(env->regs[3], &sc->regs.r3);
4060 __put_user(env->regs[4], &sc->regs.r4);
4061 __put_user(env->regs[5], &sc->regs.r5);
4062 __put_user(env->regs[6], &sc->regs.r6);
4063 __put_user(env->regs[7], &sc->regs.r7);
4064 __put_user(env->regs[8], &sc->regs.r8);
4065 __put_user(env->regs[9], &sc->regs.r9);
4066 __put_user(env->regs[10], &sc->regs.r10);
4067 __put_user(env->regs[11], &sc->regs.r11);
4068 __put_user(env->regs[12], &sc->regs.r12);
4069 __put_user(env->regs[13], &sc->regs.r13);
4070 __put_user(env->regs[14], &sc->regs.r14);
4071 __put_user(env->regs[15], &sc->regs.r15);
4072 __put_user(env->regs[16], &sc->regs.r16);
4073 __put_user(env->regs[17], &sc->regs.r17);
4074 __put_user(env->regs[18], &sc->regs.r18);
4075 __put_user(env->regs[19], &sc->regs.r19);
4076 __put_user(env->regs[20], &sc->regs.r20);
4077 __put_user(env->regs[21], &sc->regs.r21);
4078 __put_user(env->regs[22], &sc->regs.r22);
4079 __put_user(env->regs[23], &sc->regs.r23);
4080 __put_user(env->regs[24], &sc->regs.r24);
4081 __put_user(env->regs[25], &sc->regs.r25);
4082 __put_user(env->regs[26], &sc->regs.r26);
4083 __put_user(env->regs[27], &sc->regs.r27);
4084 __put_user(env->regs[28], &sc->regs.r28);
4085 __put_user(env->regs[29], &sc->regs.r29);
4086 __put_user(env->regs[30], &sc->regs.r30);
4087 __put_user(env->regs[31], &sc->regs.r31);
4088 __put_user(env->sregs[SR_PC], &sc->regs.pc);
4091 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
4093 __get_user(env->regs[0], &sc->regs.r0);
4094 __get_user(env->regs[1], &sc->regs.r1);
4095 __get_user(env->regs[2], &sc->regs.r2);
4096 __get_user(env->regs[3], &sc->regs.r3);
4097 __get_user(env->regs[4], &sc->regs.r4);
4098 __get_user(env->regs[5], &sc->regs.r5);
4099 __get_user(env->regs[6], &sc->regs.r6);
4100 __get_user(env->regs[7], &sc->regs.r7);
4101 __get_user(env->regs[8], &sc->regs.r8);
4102 __get_user(env->regs[9], &sc->regs.r9);
4103 __get_user(env->regs[10], &sc->regs.r10);
4104 __get_user(env->regs[11], &sc->regs.r11);
4105 __get_user(env->regs[12], &sc->regs.r12);
4106 __get_user(env->regs[13], &sc->regs.r13);
4107 __get_user(env->regs[14], &sc->regs.r14);
4108 __get_user(env->regs[15], &sc->regs.r15);
4109 __get_user(env->regs[16], &sc->regs.r16);
4110 __get_user(env->regs[17], &sc->regs.r17);
4111 __get_user(env->regs[18], &sc->regs.r18);
4112 __get_user(env->regs[19], &sc->regs.r19);
4113 __get_user(env->regs[20], &sc->regs.r20);
4114 __get_user(env->regs[21], &sc->regs.r21);
4115 __get_user(env->regs[22], &sc->regs.r22);
4116 __get_user(env->regs[23], &sc->regs.r23);
4117 __get_user(env->regs[24], &sc->regs.r24);
4118 __get_user(env->regs[25], &sc->regs.r25);
4119 __get_user(env->regs[26], &sc->regs.r26);
4120 __get_user(env->regs[27], &sc->regs.r27);
4121 __get_user(env->regs[28], &sc->regs.r28);
4122 __get_user(env->regs[29], &sc->regs.r29);
4123 __get_user(env->regs[30], &sc->regs.r30);
4124 __get_user(env->regs[31], &sc->regs.r31);
4125 __get_user(env->sregs[SR_PC], &sc->regs.pc);
4128 static abi_ulong get_sigframe(struct target_sigaction *ka,
4129 CPUMBState *env, int frame_size)
4131 abi_ulong sp = env->regs[1];
4133 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
4134 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4137 return ((sp - frame_size) & -8UL);
4140 static void setup_frame(int sig, struct target_sigaction *ka,
4141 target_sigset_t *set, CPUMBState *env)
4143 struct target_signal_frame *frame;
4144 abi_ulong frame_addr;
4145 int i;
4147 frame_addr = get_sigframe(ka, env, sizeof *frame);
4148 trace_user_setup_frame(env, frame_addr);
4149 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4150 goto badframe;
4152 /* Save the mask. */
4153 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
4155 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4156 __put_user(set->sig[i], &frame->extramask[i - 1]);
4159 setup_sigcontext(&frame->uc.tuc_mcontext, env);
4161 /* Set up to return from userspace. If provided, use a stub
4162 already in userspace. */
4163 /* minus 8 is offset to cater for "rtsd r15,8" offset */
4164 if (ka->sa_flags & TARGET_SA_RESTORER) {
4165 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
4166 } else {
4167 uint32_t t;
4168 /* Note, these encodings are _big endian_! */
4169 /* addi r12, r0, __NR_sigreturn */
4170 t = 0x31800000UL | TARGET_NR_sigreturn;
4171 __put_user(t, frame->tramp + 0);
4172 /* brki r14, 0x8 */
4173 t = 0xb9cc0008UL;
4174 __put_user(t, frame->tramp + 1);
4176 /* Return from sighandler will jump to the tramp.
4177 Negative 8 offset because return is rtsd r15, 8 */
4178 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
4179 - 8;
4182 /* Set up registers for signal handler */
4183 env->regs[1] = frame_addr;
4184 /* Signal handler args: */
4185 env->regs[5] = sig; /* Arg 0: signum */
4186 env->regs[6] = 0;
4187 /* arg 1: sigcontext */
4188 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
4190 /* Offset of 4 to handle microblaze rtid r14, 0 */
4191 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
4193 unlock_user_struct(frame, frame_addr, 1);
4194 return;
4195 badframe:
4196 force_sigsegv(sig);
4199 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4200 target_siginfo_t *info,
4201 target_sigset_t *set, CPUMBState *env)
4203 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
4206 long do_sigreturn(CPUMBState *env)
4208 struct target_signal_frame *frame;
4209 abi_ulong frame_addr;
4210 target_sigset_t target_set;
4211 sigset_t set;
4212 int i;
4214 frame_addr = env->regs[R_SP];
4215 trace_user_do_sigreturn(env, frame_addr);
4216 /* Make sure the guest isn't playing games. */
4217 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4218 goto badframe;
4220 /* Restore blocked signals */
4221 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
4222 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4223 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4225 target_to_host_sigset_internal(&set, &target_set);
4226 set_sigmask(&set);
4228 restore_sigcontext(&frame->uc.tuc_mcontext, env);
4229 /* We got here through a sigreturn syscall, our path back is via an
4230 rtb insn so setup r14 for that. */
4231 env->regs[14] = env->sregs[SR_PC];
4233 unlock_user_struct(frame, frame_addr, 0);
4234 return -TARGET_QEMU_ESIGRETURN;
4235 badframe:
4236 force_sig(TARGET_SIGSEGV);
4237 return -TARGET_QEMU_ESIGRETURN;
4240 long do_rt_sigreturn(CPUMBState *env)
4242 trace_user_do_rt_sigreturn(env, 0);
4243 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
4244 return -TARGET_ENOSYS;
4247 #elif defined(TARGET_CRIS)
4249 struct target_sigcontext {
4250 struct target_pt_regs regs; /* needs to be first */
4251 uint32_t oldmask;
4252 uint32_t usp; /* usp before stacking this gunk on it */
4255 /* Signal frames. */
4256 struct target_signal_frame {
4257 struct target_sigcontext sc;
4258 uint32_t extramask[TARGET_NSIG_WORDS - 1];
4259 uint16_t retcode[4]; /* Trampoline code. */
4262 struct rt_signal_frame {
4263 siginfo_t *pinfo;
4264 void *puc;
4265 siginfo_t info;
4266 ucontext_t uc;
4267 uint16_t retcode[4]; /* Trampoline code. */
4270 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4272 __put_user(env->regs[0], &sc->regs.r0);
4273 __put_user(env->regs[1], &sc->regs.r1);
4274 __put_user(env->regs[2], &sc->regs.r2);
4275 __put_user(env->regs[3], &sc->regs.r3);
4276 __put_user(env->regs[4], &sc->regs.r4);
4277 __put_user(env->regs[5], &sc->regs.r5);
4278 __put_user(env->regs[6], &sc->regs.r6);
4279 __put_user(env->regs[7], &sc->regs.r7);
4280 __put_user(env->regs[8], &sc->regs.r8);
4281 __put_user(env->regs[9], &sc->regs.r9);
4282 __put_user(env->regs[10], &sc->regs.r10);
4283 __put_user(env->regs[11], &sc->regs.r11);
4284 __put_user(env->regs[12], &sc->regs.r12);
4285 __put_user(env->regs[13], &sc->regs.r13);
4286 __put_user(env->regs[14], &sc->usp);
4287 __put_user(env->regs[15], &sc->regs.acr);
4288 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4289 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4290 __put_user(env->pc, &sc->regs.erp);
4293 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4295 __get_user(env->regs[0], &sc->regs.r0);
4296 __get_user(env->regs[1], &sc->regs.r1);
4297 __get_user(env->regs[2], &sc->regs.r2);
4298 __get_user(env->regs[3], &sc->regs.r3);
4299 __get_user(env->regs[4], &sc->regs.r4);
4300 __get_user(env->regs[5], &sc->regs.r5);
4301 __get_user(env->regs[6], &sc->regs.r6);
4302 __get_user(env->regs[7], &sc->regs.r7);
4303 __get_user(env->regs[8], &sc->regs.r8);
4304 __get_user(env->regs[9], &sc->regs.r9);
4305 __get_user(env->regs[10], &sc->regs.r10);
4306 __get_user(env->regs[11], &sc->regs.r11);
4307 __get_user(env->regs[12], &sc->regs.r12);
4308 __get_user(env->regs[13], &sc->regs.r13);
4309 __get_user(env->regs[14], &sc->usp);
4310 __get_user(env->regs[15], &sc->regs.acr);
4311 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4312 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4313 __get_user(env->pc, &sc->regs.erp);
4316 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4318 abi_ulong sp;
4319 /* Align the stack downwards to 4. */
4320 sp = (env->regs[R_SP] & ~3);
4321 return sp - framesize;
4324 static void setup_frame(int sig, struct target_sigaction *ka,
4325 target_sigset_t *set, CPUCRISState *env)
4327 struct target_signal_frame *frame;
4328 abi_ulong frame_addr;
4329 int i;
4331 frame_addr = get_sigframe(env, sizeof *frame);
4332 trace_user_setup_frame(env, frame_addr);
4333 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4334 goto badframe;
4337 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4338 * use this trampoline anymore but it sets it up for GDB.
4339 * In QEMU, using the trampoline simplifies things a bit so we use it.
4341 * This is movu.w __NR_sigreturn, r9; break 13;
4343 __put_user(0x9c5f, frame->retcode+0);
4344 __put_user(TARGET_NR_sigreturn,
4345 frame->retcode + 1);
4346 __put_user(0xe93d, frame->retcode + 2);
4348 /* Save the mask. */
4349 __put_user(set->sig[0], &frame->sc.oldmask);
4351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4352 __put_user(set->sig[i], &frame->extramask[i - 1]);
4355 setup_sigcontext(&frame->sc, env);
4357 /* Move the stack and setup the arguments for the handler. */
4358 env->regs[R_SP] = frame_addr;
4359 env->regs[10] = sig;
4360 env->pc = (unsigned long) ka->_sa_handler;
4361 /* Link SRP so the guest returns through the trampoline. */
4362 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4364 unlock_user_struct(frame, frame_addr, 1);
4365 return;
4366 badframe:
4367 force_sigsegv(sig);
4370 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4371 target_siginfo_t *info,
4372 target_sigset_t *set, CPUCRISState *env)
4374 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4377 long do_sigreturn(CPUCRISState *env)
4379 struct target_signal_frame *frame;
4380 abi_ulong frame_addr;
4381 target_sigset_t target_set;
4382 sigset_t set;
4383 int i;
4385 frame_addr = env->regs[R_SP];
4386 trace_user_do_sigreturn(env, frame_addr);
4387 /* Make sure the guest isn't playing games. */
4388 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4389 goto badframe;
4392 /* Restore blocked signals */
4393 __get_user(target_set.sig[0], &frame->sc.oldmask);
4394 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4395 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4397 target_to_host_sigset_internal(&set, &target_set);
4398 set_sigmask(&set);
4400 restore_sigcontext(&frame->sc, env);
4401 unlock_user_struct(frame, frame_addr, 0);
4402 return -TARGET_QEMU_ESIGRETURN;
4403 badframe:
4404 force_sig(TARGET_SIGSEGV);
4405 return -TARGET_QEMU_ESIGRETURN;
4408 long do_rt_sigreturn(CPUCRISState *env)
4410 trace_user_do_rt_sigreturn(env, 0);
4411 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4412 return -TARGET_ENOSYS;
4415 #elif defined(TARGET_NIOS2)
4417 #define MCONTEXT_VERSION 2
4419 struct target_sigcontext {
4420 int version;
4421 unsigned long gregs[32];
4424 struct target_ucontext {
4425 abi_ulong tuc_flags;
4426 abi_ulong tuc_link;
4427 target_stack_t tuc_stack;
4428 struct target_sigcontext tuc_mcontext;
4429 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4432 struct target_rt_sigframe {
4433 struct target_siginfo info;
4434 struct target_ucontext uc;
4437 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4439 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4440 #ifdef CONFIG_STACK_GROWSUP
4441 return target_sigaltstack_used.ss_sp;
4442 #else
4443 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4444 #endif
4446 return sp;
4449 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4451 unsigned long *gregs = uc->tuc_mcontext.gregs;
4453 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4454 __put_user(env->regs[1], &gregs[0]);
4455 __put_user(env->regs[2], &gregs[1]);
4456 __put_user(env->regs[3], &gregs[2]);
4457 __put_user(env->regs[4], &gregs[3]);
4458 __put_user(env->regs[5], &gregs[4]);
4459 __put_user(env->regs[6], &gregs[5]);
4460 __put_user(env->regs[7], &gregs[6]);
4461 __put_user(env->regs[8], &gregs[7]);
4462 __put_user(env->regs[9], &gregs[8]);
4463 __put_user(env->regs[10], &gregs[9]);
4464 __put_user(env->regs[11], &gregs[10]);
4465 __put_user(env->regs[12], &gregs[11]);
4466 __put_user(env->regs[13], &gregs[12]);
4467 __put_user(env->regs[14], &gregs[13]);
4468 __put_user(env->regs[15], &gregs[14]);
4469 __put_user(env->regs[16], &gregs[15]);
4470 __put_user(env->regs[17], &gregs[16]);
4471 __put_user(env->regs[18], &gregs[17]);
4472 __put_user(env->regs[19], &gregs[18]);
4473 __put_user(env->regs[20], &gregs[19]);
4474 __put_user(env->regs[21], &gregs[20]);
4475 __put_user(env->regs[22], &gregs[21]);
4476 __put_user(env->regs[23], &gregs[22]);
4477 __put_user(env->regs[R_RA], &gregs[23]);
4478 __put_user(env->regs[R_FP], &gregs[24]);
4479 __put_user(env->regs[R_GP], &gregs[25]);
4480 __put_user(env->regs[R_EA], &gregs[27]);
4481 __put_user(env->regs[R_SP], &gregs[28]);
4483 return 0;
4486 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4487 int *pr2)
4489 int temp;
4490 abi_ulong off, frame_addr = env->regs[R_SP];
4491 unsigned long *gregs = uc->tuc_mcontext.gregs;
4492 int err;
4494 /* Always make any pending restarted system calls return -EINTR */
4495 /* current->restart_block.fn = do_no_restart_syscall; */
4497 __get_user(temp, &uc->tuc_mcontext.version);
4498 if (temp != MCONTEXT_VERSION) {
4499 return 1;
4502 /* restore passed registers */
4503 __get_user(env->regs[1], &gregs[0]);
4504 __get_user(env->regs[2], &gregs[1]);
4505 __get_user(env->regs[3], &gregs[2]);
4506 __get_user(env->regs[4], &gregs[3]);
4507 __get_user(env->regs[5], &gregs[4]);
4508 __get_user(env->regs[6], &gregs[5]);
4509 __get_user(env->regs[7], &gregs[6]);
4510 __get_user(env->regs[8], &gregs[7]);
4511 __get_user(env->regs[9], &gregs[8]);
4512 __get_user(env->regs[10], &gregs[9]);
4513 __get_user(env->regs[11], &gregs[10]);
4514 __get_user(env->regs[12], &gregs[11]);
4515 __get_user(env->regs[13], &gregs[12]);
4516 __get_user(env->regs[14], &gregs[13]);
4517 __get_user(env->regs[15], &gregs[14]);
4518 __get_user(env->regs[16], &gregs[15]);
4519 __get_user(env->regs[17], &gregs[16]);
4520 __get_user(env->regs[18], &gregs[17]);
4521 __get_user(env->regs[19], &gregs[18]);
4522 __get_user(env->regs[20], &gregs[19]);
4523 __get_user(env->regs[21], &gregs[20]);
4524 __get_user(env->regs[22], &gregs[21]);
4525 __get_user(env->regs[23], &gregs[22]);
4526 /* gregs[23] is handled below */
4527 /* Verify, should this be settable */
4528 __get_user(env->regs[R_FP], &gregs[24]);
4529 /* Verify, should this be settable */
4530 __get_user(env->regs[R_GP], &gregs[25]);
4531 /* Not really necessary no user settable bits */
4532 __get_user(temp, &gregs[26]);
4533 __get_user(env->regs[R_EA], &gregs[27]);
4535 __get_user(env->regs[R_RA], &gregs[23]);
4536 __get_user(env->regs[R_SP], &gregs[28]);
4538 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4539 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4540 if (err == -EFAULT) {
4541 return 1;
4544 *pr2 = env->regs[2];
4545 return 0;
4548 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4549 size_t frame_size)
4551 unsigned long usp;
4553 /* Default to using normal stack. */
4554 usp = env->regs[R_SP];
4556 /* This is the X/Open sanctioned signal stack switching. */
4557 usp = sigsp(usp, ka);
4559 /* Verify, is it 32 or 64 bit aligned */
4560 return (void *)((usp - frame_size) & -8UL);
4563 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4564 target_siginfo_t *info,
4565 target_sigset_t *set,
4566 CPUNios2State *env)
4568 struct target_rt_sigframe *frame;
4569 int i, err = 0;
4571 frame = get_sigframe(ka, env, sizeof(*frame));
4573 if (ka->sa_flags & SA_SIGINFO) {
4574 tswap_siginfo(&frame->info, info);
4577 /* Create the ucontext. */
4578 __put_user(0, &frame->uc.tuc_flags);
4579 __put_user(0, &frame->uc.tuc_link);
4580 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4581 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4582 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4583 err |= rt_setup_ucontext(&frame->uc, env);
4584 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4585 __put_user((abi_ulong)set->sig[i],
4586 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4589 if (err) {
4590 goto give_sigsegv;
4593 /* Set up to return from userspace; jump to fixed address sigreturn
4594 trampoline on kuser page. */
4595 env->regs[R_RA] = (unsigned long) (0x1044);
4597 /* Set up registers for signal handler */
4598 env->regs[R_SP] = (unsigned long) frame;
4599 env->regs[4] = (unsigned long) sig;
4600 env->regs[5] = (unsigned long) &frame->info;
4601 env->regs[6] = (unsigned long) &frame->uc;
4602 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4603 return;
4605 give_sigsegv:
4606 if (sig == TARGET_SIGSEGV) {
4607 ka->_sa_handler = TARGET_SIG_DFL;
4609 force_sigsegv(sig);
4610 return;
4613 long do_sigreturn(CPUNios2State *env)
4615 trace_user_do_sigreturn(env, 0);
4616 fprintf(stderr, "do_sigreturn: not implemented\n");
4617 return -TARGET_ENOSYS;
4620 long do_rt_sigreturn(CPUNios2State *env)
4622 /* Verify, can we follow the stack back */
4623 abi_ulong frame_addr = env->regs[R_SP];
4624 struct target_rt_sigframe *frame;
4625 sigset_t set;
4626 int rval;
4628 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4629 goto badframe;
4632 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4633 do_sigprocmask(SIG_SETMASK, &set, NULL);
4635 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4636 goto badframe;
4639 unlock_user_struct(frame, frame_addr, 0);
4640 return rval;
4642 badframe:
4643 unlock_user_struct(frame, frame_addr, 0);
4644 force_sig(TARGET_SIGSEGV);
4645 return 0;
4647 /* TARGET_NIOS2 */
4649 #elif defined(TARGET_OPENRISC)
4651 struct target_sigcontext {
4652 struct target_pt_regs regs;
4653 abi_ulong oldmask;
4654 abi_ulong usp;
4657 struct target_ucontext {
4658 abi_ulong tuc_flags;
4659 abi_ulong tuc_link;
4660 target_stack_t tuc_stack;
4661 struct target_sigcontext tuc_mcontext;
4662 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4665 struct target_rt_sigframe {
4666 abi_ulong pinfo;
4667 uint64_t puc;
4668 struct target_siginfo info;
4669 struct target_sigcontext sc;
4670 struct target_ucontext uc;
4671 unsigned char retcode[16]; /* trampoline code */
4674 /* This is the asm-generic/ucontext.h version */
4675 #if 0
4676 static int restore_sigcontext(CPUOpenRISCState *regs,
4677 struct target_sigcontext *sc)
4679 unsigned int err = 0;
4680 unsigned long old_usp;
4682 /* Alwys make any pending restarted system call return -EINTR */
4683 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4685 /* restore the regs from &sc->regs (same as sc, since regs is first)
4686 * (sc is already checked for VERIFY_READ since the sigframe was
4687 * checked in sys_sigreturn previously)
4690 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4691 goto badframe;
4694 /* make sure the U-flag is set so user-mode cannot fool us */
4696 regs->sr &= ~SR_SM;
4698 /* restore the old USP as it was before we stacked the sc etc.
4699 * (we cannot just pop the sigcontext since we aligned the sp and
4700 * stuff after pushing it)
4703 __get_user(old_usp, &sc->usp);
4704 phx_signal("old_usp 0x%lx", old_usp);
4706 __PHX__ REALLY /* ??? */
4707 wrusp(old_usp);
4708 regs->gpr[1] = old_usp;
4710 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4711 * after this completes, but we don't use that mechanism. maybe we can
4712 * use it now ?
4715 return err;
4717 badframe:
4718 return 1;
4720 #endif
4722 /* Set up a signal frame. */
4724 static void setup_sigcontext(struct target_sigcontext *sc,
4725 CPUOpenRISCState *regs,
4726 unsigned long mask)
4728 unsigned long usp = cpu_get_gpr(regs, 1);
4730 /* copy the regs. they are first in sc so we can use sc directly */
4732 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4734 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4735 the signal handler. The frametype will be restored to its previous
4736 value in restore_sigcontext. */
4737 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4739 /* then some other stuff */
4740 __put_user(mask, &sc->oldmask);
4741 __put_user(usp, &sc->usp);
4744 static inline unsigned long align_sigframe(unsigned long sp)
4746 return sp & ~3UL;
4749 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4750 CPUOpenRISCState *regs,
4751 size_t frame_size)
4753 unsigned long sp = cpu_get_gpr(regs, 1);
4754 int onsigstack = on_sig_stack(sp);
4756 /* redzone */
4757 /* This is the X/Open sanctioned signal stack switching. */
4758 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4759 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4762 sp = align_sigframe(sp - frame_size);
4765 * If we are on the alternate signal stack and would overflow it, don't.
4766 * Return an always-bogus address instead so we will die with SIGSEGV.
4769 if (onsigstack && !likely(on_sig_stack(sp))) {
4770 return -1L;
4773 return sp;
4776 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4777 target_siginfo_t *info,
4778 target_sigset_t *set, CPUOpenRISCState *env)
4780 int err = 0;
4781 abi_ulong frame_addr;
4782 unsigned long return_ip;
4783 struct target_rt_sigframe *frame;
4784 abi_ulong info_addr, uc_addr;
4786 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4787 trace_user_setup_rt_frame(env, frame_addr);
4788 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4789 goto give_sigsegv;
4792 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4793 __put_user(info_addr, &frame->pinfo);
4794 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4795 __put_user(uc_addr, &frame->puc);
4797 if (ka->sa_flags & SA_SIGINFO) {
4798 tswap_siginfo(&frame->info, info);
4801 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4802 __put_user(0, &frame->uc.tuc_flags);
4803 __put_user(0, &frame->uc.tuc_link);
4804 __put_user(target_sigaltstack_used.ss_sp,
4805 &frame->uc.tuc_stack.ss_sp);
4806 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4807 &frame->uc.tuc_stack.ss_flags);
4808 __put_user(target_sigaltstack_used.ss_size,
4809 &frame->uc.tuc_stack.ss_size);
4810 setup_sigcontext(&frame->sc, env, set->sig[0]);
4812 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4814 /* trampoline - the desired return ip is the retcode itself */
4815 return_ip = (unsigned long)&frame->retcode;
4816 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4817 __put_user(0xa960, (short *)(frame->retcode + 0));
4818 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4819 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4820 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4822 if (err) {
4823 goto give_sigsegv;
4826 /* TODO what is the current->exec_domain stuff and invmap ? */
4828 /* Set up registers for signal handler */
4829 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4830 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4831 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4832 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4833 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4835 /* actually move the usp to reflect the stacked frame */
4836 cpu_set_gpr(env, 1, (unsigned long)frame);
4838 return;
4840 give_sigsegv:
4841 unlock_user_struct(frame, frame_addr, 1);
4842 force_sigsegv(sig);
4845 long do_sigreturn(CPUOpenRISCState *env)
4847 trace_user_do_sigreturn(env, 0);
4848 fprintf(stderr, "do_sigreturn: not implemented\n");
4849 return -TARGET_ENOSYS;
4852 long do_rt_sigreturn(CPUOpenRISCState *env)
4854 trace_user_do_rt_sigreturn(env, 0);
4855 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4856 return -TARGET_ENOSYS;
4858 /* TARGET_OPENRISC */
4860 #elif defined(TARGET_S390X)
4862 #define __NUM_GPRS 16
4863 #define __NUM_FPRS 16
4864 #define __NUM_ACRS 16
4866 #define S390_SYSCALL_SIZE 2
4867 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4869 #define _SIGCONTEXT_NSIG 64
4870 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4871 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4872 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4873 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4874 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4876 typedef struct {
4877 target_psw_t psw;
4878 target_ulong gprs[__NUM_GPRS];
4879 unsigned int acrs[__NUM_ACRS];
4880 } target_s390_regs_common;
4882 typedef struct {
4883 unsigned int fpc;
4884 double fprs[__NUM_FPRS];
4885 } target_s390_fp_regs;
4887 typedef struct {
4888 target_s390_regs_common regs;
4889 target_s390_fp_regs fpregs;
4890 } target_sigregs;
4892 struct target_sigcontext {
4893 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4894 target_sigregs *sregs;
4897 typedef struct {
4898 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4899 struct target_sigcontext sc;
4900 target_sigregs sregs;
4901 int signo;
4902 uint8_t retcode[S390_SYSCALL_SIZE];
4903 } sigframe;
4905 struct target_ucontext {
4906 target_ulong tuc_flags;
4907 struct target_ucontext *tuc_link;
4908 target_stack_t tuc_stack;
4909 target_sigregs tuc_mcontext;
4910 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4913 typedef struct {
4914 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4915 uint8_t retcode[S390_SYSCALL_SIZE];
4916 struct target_siginfo info;
4917 struct target_ucontext uc;
4918 } rt_sigframe;
4920 static inline abi_ulong
4921 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4923 abi_ulong sp;
4925 /* Default to using normal stack */
4926 sp = env->regs[15];
4928 /* This is the X/Open sanctioned signal stack switching. */
4929 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4930 if (!sas_ss_flags(sp)) {
4931 sp = target_sigaltstack_used.ss_sp +
4932 target_sigaltstack_used.ss_size;
4936 /* This is the legacy signal stack switching. */
4937 else if (/* FIXME !user_mode(regs) */ 0 &&
4938 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4939 ka->sa_restorer) {
4940 sp = (abi_ulong) ka->sa_restorer;
4943 return (sp - frame_size) & -8ul;
4946 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4948 int i;
4949 //save_access_regs(current->thread.acrs); FIXME
4951 /* Copy a 'clean' PSW mask to the user to avoid leaking
4952 information about whether PER is currently on. */
4953 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4954 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4955 for (i = 0; i < 16; i++) {
4956 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4958 for (i = 0; i < 16; i++) {
4959 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4962 * We have to store the fp registers to current->thread.fp_regs
4963 * to merge them with the emulated registers.
4965 //save_fp_regs(&current->thread.fp_regs); FIXME
4966 for (i = 0; i < 16; i++) {
4967 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4971 static void setup_frame(int sig, struct target_sigaction *ka,
4972 target_sigset_t *set, CPUS390XState *env)
4974 sigframe *frame;
4975 abi_ulong frame_addr;
4977 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4978 trace_user_setup_frame(env, frame_addr);
4979 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4980 goto give_sigsegv;
4983 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4985 save_sigregs(env, &frame->sregs);
4987 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4988 (abi_ulong *)&frame->sc.sregs);
4990 /* Set up to return from userspace. If provided, use a stub
4991 already in userspace. */
4992 if (ka->sa_flags & TARGET_SA_RESTORER) {
4993 env->regs[14] = (unsigned long)
4994 ka->sa_restorer | PSW_ADDR_AMODE;
4995 } else {
4996 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4997 | PSW_ADDR_AMODE;
4998 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4999 (uint16_t *)(frame->retcode));
5002 /* Set up backchain. */
5003 __put_user(env->regs[15], (abi_ulong *) frame);
5005 /* Set up registers for signal handler */
5006 env->regs[15] = frame_addr;
5007 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5009 env->regs[2] = sig; //map_signal(sig);
5010 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
5012 /* We forgot to include these in the sigcontext.
5013 To avoid breaking binary compatibility, they are passed as args. */
5014 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
5015 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
5017 /* Place signal number on stack to allow backtrace from handler. */
5018 __put_user(env->regs[2], &frame->signo);
5019 unlock_user_struct(frame, frame_addr, 1);
5020 return;
5022 give_sigsegv:
5023 force_sigsegv(sig);
5026 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5027 target_siginfo_t *info,
5028 target_sigset_t *set, CPUS390XState *env)
5030 int i;
5031 rt_sigframe *frame;
5032 abi_ulong frame_addr;
5034 frame_addr = get_sigframe(ka, env, sizeof *frame);
5035 trace_user_setup_rt_frame(env, frame_addr);
5036 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5037 goto give_sigsegv;
5040 tswap_siginfo(&frame->info, info);
5042 /* Create the ucontext. */
5043 __put_user(0, &frame->uc.tuc_flags);
5044 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
5045 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5046 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
5047 &frame->uc.tuc_stack.ss_flags);
5048 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5049 save_sigregs(env, &frame->uc.tuc_mcontext);
5050 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
5051 __put_user((abi_ulong)set->sig[i],
5052 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
5055 /* Set up to return from userspace. If provided, use a stub
5056 already in userspace. */
5057 if (ka->sa_flags & TARGET_SA_RESTORER) {
5058 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
5059 } else {
5060 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
5061 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
5062 (uint16_t *)(frame->retcode));
5065 /* Set up backchain. */
5066 __put_user(env->regs[15], (abi_ulong *) frame);
5068 /* Set up registers for signal handler */
5069 env->regs[15] = frame_addr;
5070 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
5072 env->regs[2] = sig; //map_signal(sig);
5073 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
5074 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
5075 return;
5077 give_sigsegv:
5078 force_sigsegv(sig);
5081 static int
5082 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
5084 int err = 0;
5085 int i;
5087 for (i = 0; i < 16; i++) {
5088 __get_user(env->regs[i], &sc->regs.gprs[i]);
5091 __get_user(env->psw.mask, &sc->regs.psw.mask);
5092 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
5093 (unsigned long long)env->psw.addr);
5094 __get_user(env->psw.addr, &sc->regs.psw.addr);
5095 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
5097 for (i = 0; i < 16; i++) {
5098 __get_user(env->aregs[i], &sc->regs.acrs[i]);
5100 for (i = 0; i < 16; i++) {
5101 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
5104 return err;
5107 long do_sigreturn(CPUS390XState *env)
5109 sigframe *frame;
5110 abi_ulong frame_addr = env->regs[15];
5111 target_sigset_t target_set;
5112 sigset_t set;
5114 trace_user_do_sigreturn(env, frame_addr);
5115 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5116 goto badframe;
5118 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
5120 target_to_host_sigset_internal(&set, &target_set);
5121 set_sigmask(&set); /* ~_BLOCKABLE? */
5123 if (restore_sigregs(env, &frame->sregs)) {
5124 goto badframe;
5127 unlock_user_struct(frame, frame_addr, 0);
5128 return -TARGET_QEMU_ESIGRETURN;
5130 badframe:
5131 force_sig(TARGET_SIGSEGV);
5132 return -TARGET_QEMU_ESIGRETURN;
5135 long do_rt_sigreturn(CPUS390XState *env)
5137 rt_sigframe *frame;
5138 abi_ulong frame_addr = env->regs[15];
5139 sigset_t set;
5141 trace_user_do_rt_sigreturn(env, frame_addr);
5142 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5143 goto badframe;
5145 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5147 set_sigmask(&set); /* ~_BLOCKABLE? */
5149 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
5150 goto badframe;
5153 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
5154 get_sp_from_cpustate(env)) == -EFAULT) {
5155 goto badframe;
5157 unlock_user_struct(frame, frame_addr, 0);
5158 return -TARGET_QEMU_ESIGRETURN;
5160 badframe:
5161 unlock_user_struct(frame, frame_addr, 0);
5162 force_sig(TARGET_SIGSEGV);
5163 return -TARGET_QEMU_ESIGRETURN;
5166 #elif defined(TARGET_PPC)
5168 /* Size of dummy stack frame allocated when calling signal handler.
5169 See arch/powerpc/include/asm/ptrace.h. */
5170 #if defined(TARGET_PPC64)
5171 #define SIGNAL_FRAMESIZE 128
5172 #else
5173 #define SIGNAL_FRAMESIZE 64
5174 #endif
5176 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
5177 on 64-bit PPC, sigcontext and mcontext are one and the same. */
5178 struct target_mcontext {
5179 target_ulong mc_gregs[48];
5180 /* Includes fpscr. */
5181 uint64_t mc_fregs[33];
5182 #if defined(TARGET_PPC64)
5183 /* Pointer to the vector regs */
5184 target_ulong v_regs;
5185 #else
5186 target_ulong mc_pad[2];
5187 #endif
5188 /* We need to handle Altivec and SPE at the same time, which no
5189 kernel needs to do. Fortunately, the kernel defines this bit to
5190 be Altivec-register-large all the time, rather than trying to
5191 twiddle it based on the specific platform. */
5192 union {
5193 /* SPE vector registers. One extra for SPEFSCR. */
5194 uint32_t spe[33];
5195 /* Altivec vector registers. The packing of VSCR and VRSAVE
5196 varies depending on whether we're PPC64 or not: PPC64 splits
5197 them apart; PPC32 stuffs them together.
5198 We also need to account for the VSX registers on PPC64
5200 #if defined(TARGET_PPC64)
5201 #define QEMU_NVRREG (34 + 16)
5202 /* On ppc64, this mcontext structure is naturally *unaligned*,
5203 * or rather it is aligned on a 8 bytes boundary but not on
5204 * a 16 bytes one. This pad fixes it up. This is also why the
5205 * vector regs are referenced by the v_regs pointer above so
5206 * any amount of padding can be added here
5208 target_ulong pad;
5209 #else
5210 /* On ppc32, we are already aligned to 16 bytes */
5211 #define QEMU_NVRREG 33
5212 #endif
5213 /* We cannot use ppc_avr_t here as we do *not* want the implied
5214 * 16-bytes alignment that would result from it. This would have
5215 * the effect of making the whole struct target_mcontext aligned
5216 * which breaks the layout of struct target_ucontext on ppc64.
5218 uint64_t altivec[QEMU_NVRREG][2];
5219 #undef QEMU_NVRREG
5220 } mc_vregs;
5223 /* See arch/powerpc/include/asm/sigcontext.h. */
5224 struct target_sigcontext {
5225 target_ulong _unused[4];
5226 int32_t signal;
5227 #if defined(TARGET_PPC64)
5228 int32_t pad0;
5229 #endif
5230 target_ulong handler;
5231 target_ulong oldmask;
5232 target_ulong regs; /* struct pt_regs __user * */
5233 #if defined(TARGET_PPC64)
5234 struct target_mcontext mcontext;
5235 #endif
5238 /* Indices for target_mcontext.mc_gregs, below.
5239 See arch/powerpc/include/asm/ptrace.h for details. */
5240 enum {
5241 TARGET_PT_R0 = 0,
5242 TARGET_PT_R1 = 1,
5243 TARGET_PT_R2 = 2,
5244 TARGET_PT_R3 = 3,
5245 TARGET_PT_R4 = 4,
5246 TARGET_PT_R5 = 5,
5247 TARGET_PT_R6 = 6,
5248 TARGET_PT_R7 = 7,
5249 TARGET_PT_R8 = 8,
5250 TARGET_PT_R9 = 9,
5251 TARGET_PT_R10 = 10,
5252 TARGET_PT_R11 = 11,
5253 TARGET_PT_R12 = 12,
5254 TARGET_PT_R13 = 13,
5255 TARGET_PT_R14 = 14,
5256 TARGET_PT_R15 = 15,
5257 TARGET_PT_R16 = 16,
5258 TARGET_PT_R17 = 17,
5259 TARGET_PT_R18 = 18,
5260 TARGET_PT_R19 = 19,
5261 TARGET_PT_R20 = 20,
5262 TARGET_PT_R21 = 21,
5263 TARGET_PT_R22 = 22,
5264 TARGET_PT_R23 = 23,
5265 TARGET_PT_R24 = 24,
5266 TARGET_PT_R25 = 25,
5267 TARGET_PT_R26 = 26,
5268 TARGET_PT_R27 = 27,
5269 TARGET_PT_R28 = 28,
5270 TARGET_PT_R29 = 29,
5271 TARGET_PT_R30 = 30,
5272 TARGET_PT_R31 = 31,
5273 TARGET_PT_NIP = 32,
5274 TARGET_PT_MSR = 33,
5275 TARGET_PT_ORIG_R3 = 34,
5276 TARGET_PT_CTR = 35,
5277 TARGET_PT_LNK = 36,
5278 TARGET_PT_XER = 37,
5279 TARGET_PT_CCR = 38,
5280 /* Yes, there are two registers with #39. One is 64-bit only. */
5281 TARGET_PT_MQ = 39,
5282 TARGET_PT_SOFTE = 39,
5283 TARGET_PT_TRAP = 40,
5284 TARGET_PT_DAR = 41,
5285 TARGET_PT_DSISR = 42,
5286 TARGET_PT_RESULT = 43,
5287 TARGET_PT_REGS_COUNT = 44
5291 struct target_ucontext {
5292 target_ulong tuc_flags;
5293 target_ulong tuc_link; /* ucontext_t __user * */
5294 struct target_sigaltstack tuc_stack;
5295 #if !defined(TARGET_PPC64)
5296 int32_t tuc_pad[7];
5297 target_ulong tuc_regs; /* struct mcontext __user *
5298 points to uc_mcontext field */
5299 #endif
5300 target_sigset_t tuc_sigmask;
5301 #if defined(TARGET_PPC64)
5302 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5303 struct target_sigcontext tuc_sigcontext;
5304 #else
5305 int32_t tuc_maskext[30];
5306 int32_t tuc_pad2[3];
5307 struct target_mcontext tuc_mcontext;
5308 #endif
5311 /* See arch/powerpc/kernel/signal_32.c. */
5312 struct target_sigframe {
5313 struct target_sigcontext sctx;
5314 struct target_mcontext mctx;
5315 int32_t abigap[56];
5318 #if defined(TARGET_PPC64)
5320 #define TARGET_TRAMP_SIZE 6
5322 struct target_rt_sigframe {
5323 /* sys_rt_sigreturn requires the ucontext be the first field */
5324 struct target_ucontext uc;
5325 target_ulong _unused[2];
5326 uint32_t trampoline[TARGET_TRAMP_SIZE];
5327 target_ulong pinfo; /* struct siginfo __user * */
5328 target_ulong puc; /* void __user * */
5329 struct target_siginfo info;
5330 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5331 char abigap[288];
5332 } __attribute__((aligned(16)));
5334 #else
5336 struct target_rt_sigframe {
5337 struct target_siginfo info;
5338 struct target_ucontext uc;
5339 int32_t abigap[56];
5342 #endif
5344 #if defined(TARGET_PPC64)
5346 struct target_func_ptr {
5347 target_ulong entry;
5348 target_ulong toc;
5351 #endif
5353 /* We use the mc_pad field for the signal return trampoline. */
5354 #define tramp mc_pad
5356 /* See arch/powerpc/kernel/signal.c. */
5357 static target_ulong get_sigframe(struct target_sigaction *ka,
5358 CPUPPCState *env,
5359 int frame_size)
5361 target_ulong oldsp;
5363 oldsp = env->gpr[1];
5365 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5366 (sas_ss_flags(oldsp) == 0)) {
5367 oldsp = (target_sigaltstack_used.ss_sp
5368 + target_sigaltstack_used.ss_size);
5371 return (oldsp - frame_size) & ~0xFUL;
5374 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5375 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5376 #define PPC_VEC_HI 0
5377 #define PPC_VEC_LO 1
5378 #else
5379 #define PPC_VEC_HI 1
5380 #define PPC_VEC_LO 0
5381 #endif
5384 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5386 target_ulong msr = env->msr;
5387 int i;
5388 target_ulong ccr = 0;
5390 /* In general, the kernel attempts to be intelligent about what it
5391 needs to save for Altivec/FP/SPE registers. We don't care that
5392 much, so we just go ahead and save everything. */
5394 /* Save general registers. */
5395 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5396 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5398 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5399 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5400 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5401 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5403 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5404 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5406 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5408 /* Save Altivec registers if necessary. */
5409 if (env->insns_flags & PPC_ALTIVEC) {
5410 uint32_t *vrsave;
5411 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5412 ppc_avr_t *avr = &env->avr[i];
5413 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5415 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5416 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5418 /* Set MSR_VR in the saved MSR value to indicate that
5419 frame->mc_vregs contains valid data. */
5420 msr |= MSR_VR;
5421 #if defined(TARGET_PPC64)
5422 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5423 /* 64-bit needs to put a pointer to the vectors in the frame */
5424 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5425 #else
5426 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5427 #endif
5428 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5431 /* Save VSX second halves */
5432 if (env->insns_flags2 & PPC2_VSX) {
5433 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5434 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5435 __put_user(env->vsr[i], &vsregs[i]);
5439 /* Save floating point registers. */
5440 if (env->insns_flags & PPC_FLOAT) {
5441 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5442 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5444 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5447 /* Save SPE registers. The kernel only saves the high half. */
5448 if (env->insns_flags & PPC_SPE) {
5449 #if defined(TARGET_PPC64)
5450 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5451 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5453 #else
5454 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5455 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5457 #endif
5458 /* Set MSR_SPE in the saved MSR value to indicate that
5459 frame->mc_vregs contains valid data. */
5460 msr |= MSR_SPE;
5461 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5464 /* Store MSR. */
5465 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5468 static void encode_trampoline(int sigret, uint32_t *tramp)
5470 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5471 if (sigret) {
5472 __put_user(0x38000000 | sigret, &tramp[0]);
5473 __put_user(0x44000002, &tramp[1]);
5477 static void restore_user_regs(CPUPPCState *env,
5478 struct target_mcontext *frame, int sig)
5480 target_ulong save_r2 = 0;
5481 target_ulong msr;
5482 target_ulong ccr;
5484 int i;
5486 if (!sig) {
5487 save_r2 = env->gpr[2];
5490 /* Restore general registers. */
5491 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5492 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5494 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5495 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5496 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5497 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5498 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5500 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5501 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5504 if (!sig) {
5505 env->gpr[2] = save_r2;
5507 /* Restore MSR. */
5508 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5510 /* If doing signal return, restore the previous little-endian mode. */
5511 if (sig)
5512 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5514 /* Restore Altivec registers if necessary. */
5515 if (env->insns_flags & PPC_ALTIVEC) {
5516 ppc_avr_t *v_regs;
5517 uint32_t *vrsave;
5518 #if defined(TARGET_PPC64)
5519 uint64_t v_addr;
5520 /* 64-bit needs to recover the pointer to the vectors from the frame */
5521 __get_user(v_addr, &frame->v_regs);
5522 v_regs = g2h(v_addr);
5523 #else
5524 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5525 #endif
5526 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5527 ppc_avr_t *avr = &env->avr[i];
5528 ppc_avr_t *vreg = &v_regs[i];
5530 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5531 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5533 /* Set MSR_VEC in the saved MSR value to indicate that
5534 frame->mc_vregs contains valid data. */
5535 #if defined(TARGET_PPC64)
5536 vrsave = (uint32_t *)&v_regs[33];
5537 #else
5538 vrsave = (uint32_t *)&v_regs[32];
5539 #endif
5540 __get_user(env->spr[SPR_VRSAVE], vrsave);
5543 /* Restore VSX second halves */
5544 if (env->insns_flags2 & PPC2_VSX) {
5545 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5546 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5547 __get_user(env->vsr[i], &vsregs[i]);
5551 /* Restore floating point registers. */
5552 if (env->insns_flags & PPC_FLOAT) {
5553 uint64_t fpscr;
5554 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5555 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5557 __get_user(fpscr, &frame->mc_fregs[32]);
5558 env->fpscr = (uint32_t) fpscr;
5561 /* Save SPE registers. The kernel only saves the high half. */
5562 if (env->insns_flags & PPC_SPE) {
5563 #if defined(TARGET_PPC64)
5564 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5565 uint32_t hi;
5567 __get_user(hi, &frame->mc_vregs.spe[i]);
5568 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5570 #else
5571 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5572 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5574 #endif
5575 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5579 #if !defined(TARGET_PPC64)
5580 static void setup_frame(int sig, struct target_sigaction *ka,
5581 target_sigset_t *set, CPUPPCState *env)
5583 struct target_sigframe *frame;
5584 struct target_sigcontext *sc;
5585 target_ulong frame_addr, newsp;
5586 int err = 0;
5588 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5589 trace_user_setup_frame(env, frame_addr);
5590 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5591 goto sigsegv;
5592 sc = &frame->sctx;
5594 __put_user(ka->_sa_handler, &sc->handler);
5595 __put_user(set->sig[0], &sc->oldmask);
5596 __put_user(set->sig[1], &sc->_unused[3]);
5597 __put_user(h2g(&frame->mctx), &sc->regs);
5598 __put_user(sig, &sc->signal);
5600 /* Save user regs. */
5601 save_user_regs(env, &frame->mctx);
5603 /* Construct the trampoline code on the stack. */
5604 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5606 /* The kernel checks for the presence of a VDSO here. We don't
5607 emulate a vdso, so use a sigreturn system call. */
5608 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5610 /* Turn off all fp exceptions. */
5611 env->fpscr = 0;
5613 /* Create a stack frame for the caller of the handler. */
5614 newsp = frame_addr - SIGNAL_FRAMESIZE;
5615 err |= put_user(env->gpr[1], newsp, target_ulong);
5617 if (err)
5618 goto sigsegv;
5620 /* Set up registers for signal handler. */
5621 env->gpr[1] = newsp;
5622 env->gpr[3] = sig;
5623 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5625 env->nip = (target_ulong) ka->_sa_handler;
5627 /* Signal handlers are entered in big-endian mode. */
5628 env->msr &= ~(1ull << MSR_LE);
5630 unlock_user_struct(frame, frame_addr, 1);
5631 return;
5633 sigsegv:
5634 unlock_user_struct(frame, frame_addr, 1);
5635 force_sigsegv(sig);
5637 #endif /* !defined(TARGET_PPC64) */
5639 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5640 target_siginfo_t *info,
5641 target_sigset_t *set, CPUPPCState *env)
5643 struct target_rt_sigframe *rt_sf;
5644 uint32_t *trampptr = 0;
5645 struct target_mcontext *mctx = 0;
5646 target_ulong rt_sf_addr, newsp = 0;
5647 int i, err = 0;
5648 #if defined(TARGET_PPC64)
5649 struct target_sigcontext *sc = 0;
5650 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5651 #endif
5653 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5654 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5655 goto sigsegv;
5657 tswap_siginfo(&rt_sf->info, info);
5659 __put_user(0, &rt_sf->uc.tuc_flags);
5660 __put_user(0, &rt_sf->uc.tuc_link);
5661 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5662 &rt_sf->uc.tuc_stack.ss_sp);
5663 __put_user(sas_ss_flags(env->gpr[1]),
5664 &rt_sf->uc.tuc_stack.ss_flags);
5665 __put_user(target_sigaltstack_used.ss_size,
5666 &rt_sf->uc.tuc_stack.ss_size);
5667 #if !defined(TARGET_PPC64)
5668 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5669 &rt_sf->uc.tuc_regs);
5670 #endif
5671 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5672 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5675 #if defined(TARGET_PPC64)
5676 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5677 trampptr = &rt_sf->trampoline[0];
5679 sc = &rt_sf->uc.tuc_sigcontext;
5680 __put_user(h2g(mctx), &sc->regs);
5681 __put_user(sig, &sc->signal);
5682 #else
5683 mctx = &rt_sf->uc.tuc_mcontext;
5684 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5685 #endif
5687 save_user_regs(env, mctx);
5688 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5690 /* The kernel checks for the presence of a VDSO here. We don't
5691 emulate a vdso, so use a sigreturn system call. */
5692 env->lr = (target_ulong) h2g(trampptr);
5694 /* Turn off all fp exceptions. */
5695 env->fpscr = 0;
5697 /* Create a stack frame for the caller of the handler. */
5698 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5699 err |= put_user(env->gpr[1], newsp, target_ulong);
5701 if (err)
5702 goto sigsegv;
5704 /* Set up registers for signal handler. */
5705 env->gpr[1] = newsp;
5706 env->gpr[3] = (target_ulong) sig;
5707 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5708 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5709 env->gpr[6] = (target_ulong) h2g(rt_sf);
5711 #if defined(TARGET_PPC64)
5712 if (get_ppc64_abi(image) < 2) {
5713 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5714 struct target_func_ptr *handler =
5715 (struct target_func_ptr *)g2h(ka->_sa_handler);
5716 env->nip = tswapl(handler->entry);
5717 env->gpr[2] = tswapl(handler->toc);
5718 } else {
5719 /* ELFv2 PPC64 function pointers are entry points, but R12
5720 * must also be set */
5721 env->nip = tswapl((target_ulong) ka->_sa_handler);
5722 env->gpr[12] = env->nip;
5724 #else
5725 env->nip = (target_ulong) ka->_sa_handler;
5726 #endif
5728 /* Signal handlers are entered in big-endian mode. */
5729 env->msr &= ~(1ull << MSR_LE);
5731 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5732 return;
5734 sigsegv:
5735 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5736 force_sigsegv(sig);
5740 #if !defined(TARGET_PPC64)
5741 long do_sigreturn(CPUPPCState *env)
5743 struct target_sigcontext *sc = NULL;
5744 struct target_mcontext *sr = NULL;
5745 target_ulong sr_addr = 0, sc_addr;
5746 sigset_t blocked;
5747 target_sigset_t set;
5749 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5750 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5751 goto sigsegv;
5753 #if defined(TARGET_PPC64)
5754 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5755 #else
5756 __get_user(set.sig[0], &sc->oldmask);
5757 __get_user(set.sig[1], &sc->_unused[3]);
5758 #endif
5759 target_to_host_sigset_internal(&blocked, &set);
5760 set_sigmask(&blocked);
5762 __get_user(sr_addr, &sc->regs);
5763 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5764 goto sigsegv;
5765 restore_user_regs(env, sr, 1);
5767 unlock_user_struct(sr, sr_addr, 1);
5768 unlock_user_struct(sc, sc_addr, 1);
5769 return -TARGET_QEMU_ESIGRETURN;
5771 sigsegv:
5772 unlock_user_struct(sr, sr_addr, 1);
5773 unlock_user_struct(sc, sc_addr, 1);
5774 force_sig(TARGET_SIGSEGV);
5775 return -TARGET_QEMU_ESIGRETURN;
5777 #endif /* !defined(TARGET_PPC64) */
5779 /* See arch/powerpc/kernel/signal_32.c. */
5780 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5782 struct target_mcontext *mcp;
5783 target_ulong mcp_addr;
5784 sigset_t blocked;
5785 target_sigset_t set;
5787 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5788 sizeof (set)))
5789 return 1;
5791 #if defined(TARGET_PPC64)
5792 mcp_addr = h2g(ucp) +
5793 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5794 #else
5795 __get_user(mcp_addr, &ucp->tuc_regs);
5796 #endif
5798 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5799 return 1;
5801 target_to_host_sigset_internal(&blocked, &set);
5802 set_sigmask(&blocked);
5803 restore_user_regs(env, mcp, sig);
5805 unlock_user_struct(mcp, mcp_addr, 1);
5806 return 0;
5809 long do_rt_sigreturn(CPUPPCState *env)
5811 struct target_rt_sigframe *rt_sf = NULL;
5812 target_ulong rt_sf_addr;
5814 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5815 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5816 goto sigsegv;
5818 if (do_setcontext(&rt_sf->uc, env, 1))
5819 goto sigsegv;
5821 do_sigaltstack(rt_sf_addr
5822 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5823 0, env->gpr[1]);
5825 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5826 return -TARGET_QEMU_ESIGRETURN;
5828 sigsegv:
5829 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5830 force_sig(TARGET_SIGSEGV);
5831 return -TARGET_QEMU_ESIGRETURN;
5834 #elif defined(TARGET_M68K)
5836 struct target_sigcontext {
5837 abi_ulong sc_mask;
5838 abi_ulong sc_usp;
5839 abi_ulong sc_d0;
5840 abi_ulong sc_d1;
5841 abi_ulong sc_a0;
5842 abi_ulong sc_a1;
5843 unsigned short sc_sr;
5844 abi_ulong sc_pc;
5847 struct target_sigframe
5849 abi_ulong pretcode;
5850 int sig;
5851 int code;
5852 abi_ulong psc;
5853 char retcode[8];
5854 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5855 struct target_sigcontext sc;
5858 typedef int target_greg_t;
5859 #define TARGET_NGREG 18
5860 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5862 typedef struct target_fpregset {
5863 int f_fpcntl[3];
5864 int f_fpregs[8*3];
5865 } target_fpregset_t;
5867 struct target_mcontext {
5868 int version;
5869 target_gregset_t gregs;
5870 target_fpregset_t fpregs;
5873 #define TARGET_MCONTEXT_VERSION 2
5875 struct target_ucontext {
5876 abi_ulong tuc_flags;
5877 abi_ulong tuc_link;
5878 target_stack_t tuc_stack;
5879 struct target_mcontext tuc_mcontext;
5880 abi_long tuc_filler[80];
5881 target_sigset_t tuc_sigmask;
5884 struct target_rt_sigframe
5886 abi_ulong pretcode;
5887 int sig;
5888 abi_ulong pinfo;
5889 abi_ulong puc;
5890 char retcode[8];
5891 struct target_siginfo info;
5892 struct target_ucontext uc;
5895 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5896 abi_ulong mask)
5898 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5899 __put_user(mask, &sc->sc_mask);
5900 __put_user(env->aregs[7], &sc->sc_usp);
5901 __put_user(env->dregs[0], &sc->sc_d0);
5902 __put_user(env->dregs[1], &sc->sc_d1);
5903 __put_user(env->aregs[0], &sc->sc_a0);
5904 __put_user(env->aregs[1], &sc->sc_a1);
5905 __put_user(sr, &sc->sc_sr);
5906 __put_user(env->pc, &sc->sc_pc);
5909 static void
5910 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5912 int temp;
5914 __get_user(env->aregs[7], &sc->sc_usp);
5915 __get_user(env->dregs[0], &sc->sc_d0);
5916 __get_user(env->dregs[1], &sc->sc_d1);
5917 __get_user(env->aregs[0], &sc->sc_a0);
5918 __get_user(env->aregs[1], &sc->sc_a1);
5919 __get_user(env->pc, &sc->sc_pc);
5920 __get_user(temp, &sc->sc_sr);
5921 cpu_m68k_set_ccr(env, temp);
5925 * Determine which stack to use..
5927 static inline abi_ulong
5928 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5929 size_t frame_size)
5931 unsigned long sp;
5933 sp = regs->aregs[7];
5935 /* This is the X/Open sanctioned signal stack switching. */
5936 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5937 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5940 return ((sp - frame_size) & -8UL);
5943 static void setup_frame(int sig, struct target_sigaction *ka,
5944 target_sigset_t *set, CPUM68KState *env)
5946 struct target_sigframe *frame;
5947 abi_ulong frame_addr;
5948 abi_ulong retcode_addr;
5949 abi_ulong sc_addr;
5950 int i;
5952 frame_addr = get_sigframe(ka, env, sizeof *frame);
5953 trace_user_setup_frame(env, frame_addr);
5954 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5955 goto give_sigsegv;
5958 __put_user(sig, &frame->sig);
5960 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5961 __put_user(sc_addr, &frame->psc);
5963 setup_sigcontext(&frame->sc, env, set->sig[0]);
5965 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5966 __put_user(set->sig[i], &frame->extramask[i - 1]);
5969 /* Set up to return from userspace. */
5971 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5972 __put_user(retcode_addr, &frame->pretcode);
5974 /* moveq #,d0; trap #0 */
5976 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5977 (uint32_t *)(frame->retcode));
5979 /* Set up to return from userspace */
5981 env->aregs[7] = frame_addr;
5982 env->pc = ka->_sa_handler;
5984 unlock_user_struct(frame, frame_addr, 1);
5985 return;
5987 give_sigsegv:
5988 force_sigsegv(sig);
5991 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5992 CPUM68KState *env)
5994 int i;
5995 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5997 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5998 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5999 /* fpiar is not emulated */
6001 for (i = 0; i < 8; i++) {
6002 uint32_t high = env->fregs[i].d.high << 16;
6003 __put_user(high, &fpregs->f_fpregs[i * 3]);
6004 __put_user(env->fregs[i].d.low,
6005 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6009 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
6010 CPUM68KState *env)
6012 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6013 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
6015 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
6016 __put_user(env->dregs[0], &gregs[0]);
6017 __put_user(env->dregs[1], &gregs[1]);
6018 __put_user(env->dregs[2], &gregs[2]);
6019 __put_user(env->dregs[3], &gregs[3]);
6020 __put_user(env->dregs[4], &gregs[4]);
6021 __put_user(env->dregs[5], &gregs[5]);
6022 __put_user(env->dregs[6], &gregs[6]);
6023 __put_user(env->dregs[7], &gregs[7]);
6024 __put_user(env->aregs[0], &gregs[8]);
6025 __put_user(env->aregs[1], &gregs[9]);
6026 __put_user(env->aregs[2], &gregs[10]);
6027 __put_user(env->aregs[3], &gregs[11]);
6028 __put_user(env->aregs[4], &gregs[12]);
6029 __put_user(env->aregs[5], &gregs[13]);
6030 __put_user(env->aregs[6], &gregs[14]);
6031 __put_user(env->aregs[7], &gregs[15]);
6032 __put_user(env->pc, &gregs[16]);
6033 __put_user(sr, &gregs[17]);
6035 target_rt_save_fpu_state(uc, env);
6037 return 0;
6040 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
6041 struct target_ucontext *uc)
6043 int i;
6044 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
6045 uint32_t fpcr;
6047 __get_user(fpcr, &fpregs->f_fpcntl[0]);
6048 cpu_m68k_set_fpcr(env, fpcr);
6049 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
6050 /* fpiar is not emulated */
6052 for (i = 0; i < 8; i++) {
6053 uint32_t high;
6054 __get_user(high, &fpregs->f_fpregs[i * 3]);
6055 env->fregs[i].d.high = high >> 16;
6056 __get_user(env->fregs[i].d.low,
6057 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
6061 static inline int target_rt_restore_ucontext(CPUM68KState *env,
6062 struct target_ucontext *uc)
6064 int temp;
6065 target_greg_t *gregs = uc->tuc_mcontext.gregs;
6067 __get_user(temp, &uc->tuc_mcontext.version);
6068 if (temp != TARGET_MCONTEXT_VERSION)
6069 goto badframe;
6071 /* restore passed registers */
6072 __get_user(env->dregs[0], &gregs[0]);
6073 __get_user(env->dregs[1], &gregs[1]);
6074 __get_user(env->dregs[2], &gregs[2]);
6075 __get_user(env->dregs[3], &gregs[3]);
6076 __get_user(env->dregs[4], &gregs[4]);
6077 __get_user(env->dregs[5], &gregs[5]);
6078 __get_user(env->dregs[6], &gregs[6]);
6079 __get_user(env->dregs[7], &gregs[7]);
6080 __get_user(env->aregs[0], &gregs[8]);
6081 __get_user(env->aregs[1], &gregs[9]);
6082 __get_user(env->aregs[2], &gregs[10]);
6083 __get_user(env->aregs[3], &gregs[11]);
6084 __get_user(env->aregs[4], &gregs[12]);
6085 __get_user(env->aregs[5], &gregs[13]);
6086 __get_user(env->aregs[6], &gregs[14]);
6087 __get_user(env->aregs[7], &gregs[15]);
6088 __get_user(env->pc, &gregs[16]);
6089 __get_user(temp, &gregs[17]);
6090 cpu_m68k_set_ccr(env, temp);
6092 target_rt_restore_fpu_state(env, uc);
6094 return 0;
6096 badframe:
6097 return 1;
6100 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6101 target_siginfo_t *info,
6102 target_sigset_t *set, CPUM68KState *env)
6104 struct target_rt_sigframe *frame;
6105 abi_ulong frame_addr;
6106 abi_ulong retcode_addr;
6107 abi_ulong info_addr;
6108 abi_ulong uc_addr;
6109 int err = 0;
6110 int i;
6112 frame_addr = get_sigframe(ka, env, sizeof *frame);
6113 trace_user_setup_rt_frame(env, frame_addr);
6114 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6115 goto give_sigsegv;
6118 __put_user(sig, &frame->sig);
6120 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
6121 __put_user(info_addr, &frame->pinfo);
6123 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
6124 __put_user(uc_addr, &frame->puc);
6126 tswap_siginfo(&frame->info, info);
6128 /* Create the ucontext */
6130 __put_user(0, &frame->uc.tuc_flags);
6131 __put_user(0, &frame->uc.tuc_link);
6132 __put_user(target_sigaltstack_used.ss_sp,
6133 &frame->uc.tuc_stack.ss_sp);
6134 __put_user(sas_ss_flags(env->aregs[7]),
6135 &frame->uc.tuc_stack.ss_flags);
6136 __put_user(target_sigaltstack_used.ss_size,
6137 &frame->uc.tuc_stack.ss_size);
6138 err |= target_rt_setup_ucontext(&frame->uc, env);
6140 if (err)
6141 goto give_sigsegv;
6143 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
6144 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6147 /* Set up to return from userspace. */
6149 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
6150 __put_user(retcode_addr, &frame->pretcode);
6152 /* moveq #,d0; notb d0; trap #0 */
6154 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
6155 (uint32_t *)(frame->retcode + 0));
6156 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
6158 if (err)
6159 goto give_sigsegv;
6161 /* Set up to return from userspace */
6163 env->aregs[7] = frame_addr;
6164 env->pc = ka->_sa_handler;
6166 unlock_user_struct(frame, frame_addr, 1);
6167 return;
6169 give_sigsegv:
6170 unlock_user_struct(frame, frame_addr, 1);
6171 force_sigsegv(sig);
6174 long do_sigreturn(CPUM68KState *env)
6176 struct target_sigframe *frame;
6177 abi_ulong frame_addr = env->aregs[7] - 4;
6178 target_sigset_t target_set;
6179 sigset_t set;
6180 int i;
6182 trace_user_do_sigreturn(env, frame_addr);
6183 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6184 goto badframe;
6186 /* set blocked signals */
6188 __get_user(target_set.sig[0], &frame->sc.sc_mask);
6190 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
6191 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
6194 target_to_host_sigset_internal(&set, &target_set);
6195 set_sigmask(&set);
6197 /* restore registers */
6199 restore_sigcontext(env, &frame->sc);
6201 unlock_user_struct(frame, frame_addr, 0);
6202 return -TARGET_QEMU_ESIGRETURN;
6204 badframe:
6205 force_sig(TARGET_SIGSEGV);
6206 return -TARGET_QEMU_ESIGRETURN;
6209 long do_rt_sigreturn(CPUM68KState *env)
6211 struct target_rt_sigframe *frame;
6212 abi_ulong frame_addr = env->aregs[7] - 4;
6213 sigset_t set;
6215 trace_user_do_rt_sigreturn(env, frame_addr);
6216 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
6217 goto badframe;
6219 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6220 set_sigmask(&set);
6222 /* restore registers */
6224 if (target_rt_restore_ucontext(env, &frame->uc))
6225 goto badframe;
6227 if (do_sigaltstack(frame_addr +
6228 offsetof(struct target_rt_sigframe, uc.tuc_stack),
6229 0, get_sp_from_cpustate(env)) == -EFAULT)
6230 goto badframe;
6232 unlock_user_struct(frame, frame_addr, 0);
6233 return -TARGET_QEMU_ESIGRETURN;
6235 badframe:
6236 unlock_user_struct(frame, frame_addr, 0);
6237 force_sig(TARGET_SIGSEGV);
6238 return -TARGET_QEMU_ESIGRETURN;
6241 #elif defined(TARGET_ALPHA)
6243 struct target_sigcontext {
6244 abi_long sc_onstack;
6245 abi_long sc_mask;
6246 abi_long sc_pc;
6247 abi_long sc_ps;
6248 abi_long sc_regs[32];
6249 abi_long sc_ownedfp;
6250 abi_long sc_fpregs[32];
6251 abi_ulong sc_fpcr;
6252 abi_ulong sc_fp_control;
6253 abi_ulong sc_reserved1;
6254 abi_ulong sc_reserved2;
6255 abi_ulong sc_ssize;
6256 abi_ulong sc_sbase;
6257 abi_ulong sc_traparg_a0;
6258 abi_ulong sc_traparg_a1;
6259 abi_ulong sc_traparg_a2;
6260 abi_ulong sc_fp_trap_pc;
6261 abi_ulong sc_fp_trigger_sum;
6262 abi_ulong sc_fp_trigger_inst;
6265 struct target_ucontext {
6266 abi_ulong tuc_flags;
6267 abi_ulong tuc_link;
6268 abi_ulong tuc_osf_sigmask;
6269 target_stack_t tuc_stack;
6270 struct target_sigcontext tuc_mcontext;
6271 target_sigset_t tuc_sigmask;
6274 struct target_sigframe {
6275 struct target_sigcontext sc;
6276 unsigned int retcode[3];
6279 struct target_rt_sigframe {
6280 target_siginfo_t info;
6281 struct target_ucontext uc;
6282 unsigned int retcode[3];
6285 #define INSN_MOV_R30_R16 0x47fe0410
6286 #define INSN_LDI_R0 0x201f0000
6287 #define INSN_CALLSYS 0x00000083
6289 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6290 abi_ulong frame_addr, target_sigset_t *set)
6292 int i;
6294 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6295 __put_user(set->sig[0], &sc->sc_mask);
6296 __put_user(env->pc, &sc->sc_pc);
6297 __put_user(8, &sc->sc_ps);
6299 for (i = 0; i < 31; ++i) {
6300 __put_user(env->ir[i], &sc->sc_regs[i]);
6302 __put_user(0, &sc->sc_regs[31]);
6304 for (i = 0; i < 31; ++i) {
6305 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6307 __put_user(0, &sc->sc_fpregs[31]);
6308 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6310 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6311 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6312 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6315 static void restore_sigcontext(CPUAlphaState *env,
6316 struct target_sigcontext *sc)
6318 uint64_t fpcr;
6319 int i;
6321 __get_user(env->pc, &sc->sc_pc);
6323 for (i = 0; i < 31; ++i) {
6324 __get_user(env->ir[i], &sc->sc_regs[i]);
6326 for (i = 0; i < 31; ++i) {
6327 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6330 __get_user(fpcr, &sc->sc_fpcr);
6331 cpu_alpha_store_fpcr(env, fpcr);
6334 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6335 CPUAlphaState *env,
6336 unsigned long framesize)
6338 abi_ulong sp = env->ir[IR_SP];
6340 /* This is the X/Open sanctioned signal stack switching. */
6341 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6342 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6344 return (sp - framesize) & -32;
6347 static void setup_frame(int sig, struct target_sigaction *ka,
6348 target_sigset_t *set, CPUAlphaState *env)
6350 abi_ulong frame_addr, r26;
6351 struct target_sigframe *frame;
6352 int err = 0;
6354 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6355 trace_user_setup_frame(env, frame_addr);
6356 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6357 goto give_sigsegv;
6360 setup_sigcontext(&frame->sc, env, frame_addr, set);
6362 if (ka->sa_restorer) {
6363 r26 = ka->sa_restorer;
6364 } else {
6365 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6366 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6367 &frame->retcode[1]);
6368 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6369 /* imb() */
6370 r26 = frame_addr;
6373 unlock_user_struct(frame, frame_addr, 1);
6375 if (err) {
6376 give_sigsegv:
6377 force_sigsegv(sig);
6378 return;
6381 env->ir[IR_RA] = r26;
6382 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6383 env->ir[IR_A0] = sig;
6384 env->ir[IR_A1] = 0;
6385 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6386 env->ir[IR_SP] = frame_addr;
6389 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6390 target_siginfo_t *info,
6391 target_sigset_t *set, CPUAlphaState *env)
6393 abi_ulong frame_addr, r26;
6394 struct target_rt_sigframe *frame;
6395 int i, err = 0;
6397 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6398 trace_user_setup_rt_frame(env, frame_addr);
6399 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6400 goto give_sigsegv;
6403 tswap_siginfo(&frame->info, info);
6405 __put_user(0, &frame->uc.tuc_flags);
6406 __put_user(0, &frame->uc.tuc_link);
6407 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6408 __put_user(target_sigaltstack_used.ss_sp,
6409 &frame->uc.tuc_stack.ss_sp);
6410 __put_user(sas_ss_flags(env->ir[IR_SP]),
6411 &frame->uc.tuc_stack.ss_flags);
6412 __put_user(target_sigaltstack_used.ss_size,
6413 &frame->uc.tuc_stack.ss_size);
6414 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6415 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6416 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6419 if (ka->sa_restorer) {
6420 r26 = ka->sa_restorer;
6421 } else {
6422 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6423 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6424 &frame->retcode[1]);
6425 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6426 /* imb(); */
6427 r26 = frame_addr;
6430 if (err) {
6431 give_sigsegv:
6432 force_sigsegv(sig);
6433 return;
6436 env->ir[IR_RA] = r26;
6437 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6438 env->ir[IR_A0] = sig;
6439 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6440 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6441 env->ir[IR_SP] = frame_addr;
6444 long do_sigreturn(CPUAlphaState *env)
6446 struct target_sigcontext *sc;
6447 abi_ulong sc_addr = env->ir[IR_A0];
6448 target_sigset_t target_set;
6449 sigset_t set;
6451 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6452 goto badframe;
6455 target_sigemptyset(&target_set);
6456 __get_user(target_set.sig[0], &sc->sc_mask);
6458 target_to_host_sigset_internal(&set, &target_set);
6459 set_sigmask(&set);
6461 restore_sigcontext(env, sc);
6462 unlock_user_struct(sc, sc_addr, 0);
6463 return -TARGET_QEMU_ESIGRETURN;
6465 badframe:
6466 force_sig(TARGET_SIGSEGV);
6467 return -TARGET_QEMU_ESIGRETURN;
6470 long do_rt_sigreturn(CPUAlphaState *env)
6472 abi_ulong frame_addr = env->ir[IR_A0];
6473 struct target_rt_sigframe *frame;
6474 sigset_t set;
6476 trace_user_do_rt_sigreturn(env, frame_addr);
6477 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6478 goto badframe;
6480 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6481 set_sigmask(&set);
6483 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6484 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6485 uc.tuc_stack),
6486 0, env->ir[IR_SP]) == -EFAULT) {
6487 goto badframe;
6490 unlock_user_struct(frame, frame_addr, 0);
6491 return -TARGET_QEMU_ESIGRETURN;
6494 badframe:
6495 unlock_user_struct(frame, frame_addr, 0);
6496 force_sig(TARGET_SIGSEGV);
6497 return -TARGET_QEMU_ESIGRETURN;
6500 #elif defined(TARGET_TILEGX)
6502 struct target_sigcontext {
6503 union {
6504 /* General-purpose registers. */
6505 abi_ulong gregs[56];
6506 struct {
6507 abi_ulong __gregs[53];
6508 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6509 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6510 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6513 abi_ulong pc; /* Program counter. */
6514 abi_ulong ics; /* In Interrupt Critical Section? */
6515 abi_ulong faultnum; /* Fault number. */
6516 abi_ulong pad[5];
6519 struct target_ucontext {
6520 abi_ulong tuc_flags;
6521 abi_ulong tuc_link;
6522 target_stack_t tuc_stack;
6523 struct target_sigcontext tuc_mcontext;
6524 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6527 struct target_rt_sigframe {
6528 unsigned char save_area[16]; /* caller save area */
6529 struct target_siginfo info;
6530 struct target_ucontext uc;
6531 abi_ulong retcode[2];
6534 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6535 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6538 static void setup_sigcontext(struct target_sigcontext *sc,
6539 CPUArchState *env, int signo)
6541 int i;
6543 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6544 __put_user(env->regs[i], &sc->gregs[i]);
6547 __put_user(env->pc, &sc->pc);
6548 __put_user(0, &sc->ics);
6549 __put_user(signo, &sc->faultnum);
6552 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6554 int i;
6556 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6557 __get_user(env->regs[i], &sc->gregs[i]);
6560 __get_user(env->pc, &sc->pc);
6563 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6564 size_t frame_size)
6566 unsigned long sp = env->regs[TILEGX_R_SP];
6568 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6569 return -1UL;
6572 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6573 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6576 sp -= frame_size;
6577 sp &= -16UL;
6578 return sp;
6581 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6582 target_siginfo_t *info,
6583 target_sigset_t *set, CPUArchState *env)
6585 abi_ulong frame_addr;
6586 struct target_rt_sigframe *frame;
6587 unsigned long restorer;
6589 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6590 trace_user_setup_rt_frame(env, frame_addr);
6591 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6592 goto give_sigsegv;
6595 /* Always write at least the signal number for the stack backtracer. */
6596 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6597 /* At sigreturn time, restore the callee-save registers too. */
6598 tswap_siginfo(&frame->info, info);
6599 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6600 } else {
6601 __put_user(info->si_signo, &frame->info.si_signo);
6604 /* Create the ucontext. */
6605 __put_user(0, &frame->uc.tuc_flags);
6606 __put_user(0, &frame->uc.tuc_link);
6607 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6608 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6609 &frame->uc.tuc_stack.ss_flags);
6610 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6611 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6613 if (ka->sa_flags & TARGET_SA_RESTORER) {
6614 restorer = (unsigned long) ka->sa_restorer;
6615 } else {
6616 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6617 __put_user(INSN_SWINT1, &frame->retcode[1]);
6618 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6620 env->pc = (unsigned long) ka->_sa_handler;
6621 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6622 env->regs[TILEGX_R_LR] = restorer;
6623 env->regs[0] = (unsigned long) sig;
6624 env->regs[1] = (unsigned long) &frame->info;
6625 env->regs[2] = (unsigned long) &frame->uc;
6626 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6628 unlock_user_struct(frame, frame_addr, 1);
6629 return;
6631 give_sigsegv:
6632 force_sigsegv(sig);
6635 long do_rt_sigreturn(CPUTLGState *env)
6637 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6638 struct target_rt_sigframe *frame;
6639 sigset_t set;
6641 trace_user_do_rt_sigreturn(env, frame_addr);
6642 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6643 goto badframe;
6645 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6646 set_sigmask(&set);
6648 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6649 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6650 uc.tuc_stack),
6651 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6652 goto badframe;
6655 unlock_user_struct(frame, frame_addr, 0);
6656 return -TARGET_QEMU_ESIGRETURN;
6659 badframe:
6660 unlock_user_struct(frame, frame_addr, 0);
6661 force_sig(TARGET_SIGSEGV);
6662 return -TARGET_QEMU_ESIGRETURN;
6665 #elif defined(TARGET_RISCV)
6667 /* Signal handler invocation must be transparent for the code being
6668 interrupted. Complete CPU (hart) state is saved on entry and restored
6669 before returning from the handler. Process sigmask is also saved to block
6670 signals while the handler is running. The handler gets its own stack,
6671 which also doubles as storage for the CPU state and sigmask.
6673 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */
6675 struct target_sigcontext {
6676 abi_long pc;
6677 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */
6678 uint64_t fpr[32];
6679 uint32_t fcsr;
6680 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
6682 struct target_ucontext {
6683 unsigned long uc_flags;
6684 struct target_ucontext *uc_link;
6685 target_stack_t uc_stack;
6686 struct target_sigcontext uc_mcontext;
6687 target_sigset_t uc_sigmask;
6690 struct target_rt_sigframe {
6691 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */
6692 struct target_siginfo info;
6693 struct target_ucontext uc;
6696 static abi_ulong get_sigframe(struct target_sigaction *ka,
6697 CPURISCVState *regs, size_t framesize)
6699 abi_ulong sp = regs->gpr[xSP];
6700 int onsigstack = on_sig_stack(sp);
6702 /* redzone */
6703 /* This is the X/Open sanctioned signal stack switching. */
6704 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
6705 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6708 sp -= framesize;
6709 sp &= ~3UL; /* align sp on 4-byte boundary */
6711 /* If we are on the alternate signal stack and would overflow it, don't.
6712 Return an always-bogus address instead so we will die with SIGSEGV. */
6713 if (onsigstack && !likely(on_sig_stack(sp))) {
6714 return -1L;
6717 return sp;
6720 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env)
6722 int i;
6724 __put_user(env->pc, &sc->pc);
6726 for (i = 1; i < 32; i++) {
6727 __put_user(env->gpr[i], &sc->gpr[i - 1]);
6729 for (i = 0; i < 32; i++) {
6730 __put_user(env->fpr[i], &sc->fpr[i]);
6733 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/
6734 __put_user(fcsr, &sc->fcsr);
6737 static void setup_ucontext(struct target_ucontext *uc,
6738 CPURISCVState *env, target_sigset_t *set)
6740 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp;
6741 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]);
6742 abi_ulong ss_size = target_sigaltstack_used.ss_size;
6744 __put_user(0, &(uc->uc_flags));
6745 __put_user(0, &(uc->uc_link));
6747 __put_user(ss_sp, &(uc->uc_stack.ss_sp));
6748 __put_user(ss_flags, &(uc->uc_stack.ss_flags));
6749 __put_user(ss_size, &(uc->uc_stack.ss_size));
6751 int i;
6752 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6753 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i]));
6756 setup_sigcontext(&uc->uc_mcontext, env);
6759 static inline void install_sigtramp(uint32_t *tramp)
6761 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */
6762 __put_user(0x00000073, tramp + 1); /* ecall */
6765 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6766 target_siginfo_t *info,
6767 target_sigset_t *set, CPURISCVState *env)
6769 abi_ulong frame_addr;
6770 struct target_rt_sigframe *frame;
6772 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6773 trace_user_setup_rt_frame(env, frame_addr);
6775 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6776 goto badframe;
6779 setup_ucontext(&frame->uc, env, set);
6780 tswap_siginfo(&frame->info, info);
6781 install_sigtramp(frame->tramp);
6783 env->pc = ka->_sa_handler;
6784 env->gpr[xSP] = frame_addr;
6785 env->gpr[xA0] = sig;
6786 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6787 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6788 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp);
6790 return;
6792 badframe:
6793 unlock_user_struct(frame, frame_addr, 1);
6794 if (sig == TARGET_SIGSEGV) {
6795 ka->_sa_handler = TARGET_SIG_DFL;
6797 force_sig(TARGET_SIGSEGV);
6800 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc)
6802 int i;
6804 __get_user(env->pc, &sc->pc);
6806 for (i = 1; i < 32; ++i) {
6807 __get_user(env->gpr[i], &sc->gpr[i - 1]);
6809 for (i = 0; i < 32; ++i) {
6810 __get_user(env->fpr[i], &sc->fpr[i]);
6813 uint32_t fcsr;
6814 __get_user(fcsr, &sc->fcsr);
6815 csr_write_helper(env, fcsr, CSR_FCSR);
6818 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc)
6820 sigset_t blocked;
6821 target_sigset_t target_set;
6822 int i;
6824 target_sigemptyset(&target_set);
6825 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6826 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i]));
6829 target_to_host_sigset_internal(&blocked, &target_set);
6830 set_sigmask(&blocked);
6832 restore_sigcontext(env, &uc->uc_mcontext);
6835 long do_rt_sigreturn(CPURISCVState *env)
6837 struct target_rt_sigframe *frame;
6838 abi_ulong frame_addr;
6840 frame_addr = env->gpr[xSP];
6841 trace_user_do_sigreturn(env, frame_addr);
6842 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6843 goto badframe;
6846 restore_ucontext(env, &frame->uc);
6848 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6849 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) {
6850 goto badframe;
6853 unlock_user_struct(frame, frame_addr, 0);
6854 return -TARGET_QEMU_ESIGRETURN;
6856 badframe:
6857 unlock_user_struct(frame, frame_addr, 0);
6858 force_sig(TARGET_SIGSEGV);
6859 return 0;
6862 #elif defined(TARGET_HPPA)
6864 struct target_sigcontext {
6865 abi_ulong sc_flags;
6866 abi_ulong sc_gr[32];
6867 uint64_t sc_fr[32];
6868 abi_ulong sc_iasq[2];
6869 abi_ulong sc_iaoq[2];
6870 abi_ulong sc_sar;
6873 struct target_ucontext {
6874 abi_uint tuc_flags;
6875 abi_ulong tuc_link;
6876 target_stack_t tuc_stack;
6877 abi_uint pad[1];
6878 struct target_sigcontext tuc_mcontext;
6879 target_sigset_t tuc_sigmask;
6882 struct target_rt_sigframe {
6883 abi_uint tramp[9];
6884 target_siginfo_t info;
6885 struct target_ucontext uc;
6886 /* hidden location of upper halves of pa2.0 64-bit gregs */
6889 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6891 int flags = 0;
6892 int i;
6894 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6896 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6897 /* In the gateway page, executing a syscall. */
6898 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6899 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6900 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6901 } else {
6902 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6903 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6905 __put_user(0, &sc->sc_iasq[0]);
6906 __put_user(0, &sc->sc_iasq[1]);
6907 __put_user(flags, &sc->sc_flags);
6909 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6910 for (i = 1; i < 32; ++i) {
6911 __put_user(env->gr[i], &sc->sc_gr[i]);
6914 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6915 for (i = 1; i < 32; ++i) {
6916 __put_user(env->fr[i], &sc->sc_fr[i]);
6919 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6922 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6924 target_ulong psw;
6925 int i;
6927 __get_user(psw, &sc->sc_gr[0]);
6928 cpu_hppa_put_psw(env, psw);
6930 for (i = 1; i < 32; ++i) {
6931 __get_user(env->gr[i], &sc->sc_gr[i]);
6933 for (i = 0; i < 32; ++i) {
6934 __get_user(env->fr[i], &sc->sc_fr[i]);
6936 cpu_hppa_loaded_fr0(env);
6938 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6939 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6940 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6943 /* No, this doesn't look right, but it's copied straight from the kernel. */
6944 #define PARISC_RT_SIGFRAME_SIZE32 \
6945 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6947 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6948 target_siginfo_t *info,
6949 target_sigset_t *set, CPUArchState *env)
6951 abi_ulong frame_addr, sp, haddr;
6952 struct target_rt_sigframe *frame;
6953 int i;
6955 sp = env->gr[30];
6956 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6957 if (sas_ss_flags(sp) == 0) {
6958 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6961 frame_addr = QEMU_ALIGN_UP(sp, 64);
6962 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6964 trace_user_setup_rt_frame(env, frame_addr);
6966 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6967 goto give_sigsegv;
6970 tswap_siginfo(&frame->info, info);
6971 frame->uc.tuc_flags = 0;
6972 frame->uc.tuc_link = 0;
6974 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6975 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6976 &frame->uc.tuc_stack.ss_flags);
6977 __put_user(target_sigaltstack_used.ss_size,
6978 &frame->uc.tuc_stack.ss_size);
6980 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6981 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6984 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6986 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6987 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6988 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6989 __put_user(0x08000240, frame->tramp + 3); /* nop */
6991 unlock_user_struct(frame, frame_addr, 1);
6993 env->gr[2] = h2g(frame->tramp);
6994 env->gr[30] = sp;
6995 env->gr[26] = sig;
6996 env->gr[25] = h2g(&frame->info);
6997 env->gr[24] = h2g(&frame->uc);
6999 haddr = ka->_sa_handler;
7000 if (haddr & 2) {
7001 /* Function descriptor. */
7002 target_ulong *fdesc, dest;
7004 haddr &= -4;
7005 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
7006 goto give_sigsegv;
7008 __get_user(dest, fdesc);
7009 __get_user(env->gr[19], fdesc + 1);
7010 unlock_user_struct(fdesc, haddr, 1);
7011 haddr = dest;
7013 env->iaoq_f = haddr;
7014 env->iaoq_b = haddr + 4;
7015 return;
7017 give_sigsegv:
7018 force_sigsegv(sig);
7021 long do_rt_sigreturn(CPUArchState *env)
7023 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
7024 struct target_rt_sigframe *frame;
7025 sigset_t set;
7027 trace_user_do_rt_sigreturn(env, frame_addr);
7028 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
7029 goto badframe;
7031 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
7032 set_sigmask(&set);
7034 restore_sigcontext(env, &frame->uc.tuc_mcontext);
7035 unlock_user_struct(frame, frame_addr, 0);
7037 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
7038 uc.tuc_stack),
7039 0, env->gr[30]) == -EFAULT) {
7040 goto badframe;
7043 unlock_user_struct(frame, frame_addr, 0);
7044 return -TARGET_QEMU_ESIGRETURN;
7046 badframe:
7047 force_sig(TARGET_SIGSEGV);
7048 return -TARGET_QEMU_ESIGRETURN;
7051 #elif defined(TARGET_XTENSA)
7053 struct target_sigcontext {
7054 abi_ulong sc_pc;
7055 abi_ulong sc_ps;
7056 abi_ulong sc_lbeg;
7057 abi_ulong sc_lend;
7058 abi_ulong sc_lcount;
7059 abi_ulong sc_sar;
7060 abi_ulong sc_acclo;
7061 abi_ulong sc_acchi;
7062 abi_ulong sc_a[16];
7063 abi_ulong sc_xtregs;
7066 struct target_ucontext {
7067 abi_ulong tuc_flags;
7068 abi_ulong tuc_link;
7069 target_stack_t tuc_stack;
7070 struct target_sigcontext tuc_mcontext;
7071 target_sigset_t tuc_sigmask;
7074 struct target_rt_sigframe {
7075 target_siginfo_t info;
7076 struct target_ucontext uc;
7077 /* TODO: xtregs */
7078 uint8_t retcode[6];
7079 abi_ulong window[4];
7082 static abi_ulong get_sigframe(struct target_sigaction *sa,
7083 CPUXtensaState *env,
7084 unsigned long framesize)
7086 abi_ulong sp = env->regs[1];
7088 /* This is the X/Open sanctioned signal stack switching. */
7089 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
7090 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
7092 return (sp - framesize) & -16;
7095 static int flush_window_regs(CPUXtensaState *env)
7097 const uint32_t nareg_mask = env->config->nareg - 1;
7098 uint32_t wb = env->sregs[WINDOW_BASE];
7099 uint32_t ws = (xtensa_replicate_windowstart(env) >> (wb + 1)) &
7100 ((1 << env->config->nareg / 4) - 1);
7101 uint32_t d = ctz32(ws) + 1;
7102 uint32_t sp;
7103 abi_long ret = 0;
7105 wb += d;
7106 ws >>= d;
7108 xtensa_sync_phys_from_window(env);
7109 sp = env->phys_regs[(wb * 4 + 1) & nareg_mask];
7111 while (ws && ret == 0) {
7112 int d;
7113 int i;
7114 int idx;
7116 if (ws & 0x1) {
7117 ws >>= 1;
7118 d = 1;
7119 } else if (ws & 0x2) {
7120 ws >>= 2;
7121 d = 2;
7122 for (i = 0; i < 4; ++i) {
7123 idx = (wb * 4 + 4 + i) & nareg_mask;
7124 ret |= put_user_ual(env->phys_regs[idx], sp + (i - 12) * 4);
7126 } else if (ws & 0x4) {
7127 ws >>= 3;
7128 d = 3;
7129 for (i = 0; i < 8; ++i) {
7130 idx = (wb * 4 + 4 + i) & nareg_mask;
7131 ret |= put_user_ual(env->phys_regs[idx], sp + (i - 16) * 4);
7133 } else {
7134 g_assert_not_reached();
7136 sp = env->phys_regs[((wb + d) * 4 + 1) & nareg_mask];
7137 for (i = 0; i < 4; ++i) {
7138 idx = (wb * 4 + i) & nareg_mask;
7139 ret |= put_user_ual(env->phys_regs[idx], sp + (i - 4) * 4);
7141 wb += d;
7143 return ret == 0;
7146 static int setup_sigcontext(struct target_rt_sigframe *frame,
7147 CPUXtensaState *env)
7149 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
7150 int i;
7152 __put_user(env->pc, &sc->sc_pc);
7153 __put_user(env->sregs[PS], &sc->sc_ps);
7154 __put_user(env->sregs[LBEG], &sc->sc_lbeg);
7155 __put_user(env->sregs[LEND], &sc->sc_lend);
7156 __put_user(env->sregs[LCOUNT], &sc->sc_lcount);
7157 if (!flush_window_regs(env)) {
7158 return 0;
7160 for (i = 0; i < 16; ++i) {
7161 __put_user(env->regs[i], sc->sc_a + i);
7163 __put_user(0, &sc->sc_xtregs);
7164 /* TODO: xtregs */
7165 return 1;
7168 static void setup_rt_frame(int sig, struct target_sigaction *ka,
7169 target_siginfo_t *info,
7170 target_sigset_t *set, CPUXtensaState *env)
7172 abi_ulong frame_addr;
7173 struct target_rt_sigframe *frame;
7174 uint32_t ra;
7175 int i;
7177 frame_addr = get_sigframe(ka, env, sizeof(*frame));
7178 trace_user_setup_rt_frame(env, frame_addr);
7180 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
7181 goto give_sigsegv;
7184 if (ka->sa_flags & SA_SIGINFO) {
7185 tswap_siginfo(&frame->info, info);
7188 __put_user(0, &frame->uc.tuc_flags);
7189 __put_user(0, &frame->uc.tuc_link);
7190 __put_user(target_sigaltstack_used.ss_sp,
7191 &frame->uc.tuc_stack.ss_sp);
7192 __put_user(sas_ss_flags(env->regs[1]),
7193 &frame->uc.tuc_stack.ss_flags);
7194 __put_user(target_sigaltstack_used.ss_size,
7195 &frame->uc.tuc_stack.ss_size);
7196 if (!setup_sigcontext(frame, env)) {
7197 unlock_user_struct(frame, frame_addr, 0);
7198 goto give_sigsegv;
7200 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
7201 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
7204 if (ka->sa_flags & TARGET_SA_RESTORER) {
7205 ra = ka->sa_restorer;
7206 } else {
7207 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode);
7208 #ifdef TARGET_WORDS_BIGENDIAN
7209 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
7210 __put_user(0x22, &frame->retcode[0]);
7211 __put_user(0x0a, &frame->retcode[1]);
7212 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
7213 /* Generate instruction: SYSCALL */
7214 __put_user(0x00, &frame->retcode[3]);
7215 __put_user(0x05, &frame->retcode[4]);
7216 __put_user(0x00, &frame->retcode[5]);
7217 #else
7218 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */
7219 __put_user(0x22, &frame->retcode[0]);
7220 __put_user(0xa0, &frame->retcode[1]);
7221 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]);
7222 /* Generate instruction: SYSCALL */
7223 __put_user(0x00, &frame->retcode[3]);
7224 __put_user(0x50, &frame->retcode[4]);
7225 __put_user(0x00, &frame->retcode[5]);
7226 #endif
7228 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
7229 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) {
7230 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT);
7232 memset(env->regs, 0, sizeof(env->regs));
7233 env->pc = ka->_sa_handler;
7234 env->regs[1] = frame_addr;
7235 env->sregs[WINDOW_BASE] = 0;
7236 env->sregs[WINDOW_START] = 1;
7238 env->regs[4] = (ra & 0x3fffffff) | 0x40000000;
7239 env->regs[6] = sig;
7240 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info);
7241 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc);
7242 unlock_user_struct(frame, frame_addr, 1);
7243 return;
7245 give_sigsegv:
7246 force_sigsegv(sig);
7247 return;
7250 static void restore_sigcontext(CPUXtensaState *env,
7251 struct target_rt_sigframe *frame)
7253 struct target_sigcontext *sc = &frame->uc.tuc_mcontext;
7254 uint32_t ps;
7255 int i;
7257 __get_user(env->pc, &sc->sc_pc);
7258 __get_user(ps, &sc->sc_ps);
7259 __get_user(env->sregs[LBEG], &sc->sc_lbeg);
7260 __get_user(env->sregs[LEND], &sc->sc_lend);
7261 __get_user(env->sregs[LCOUNT], &sc->sc_lcount);
7263 env->sregs[WINDOW_BASE] = 0;
7264 env->sregs[WINDOW_START] = 1;
7265 env->sregs[PS] = deposit32(env->sregs[PS],
7266 PS_CALLINC_SHIFT,
7267 PS_CALLINC_LEN,
7268 extract32(ps, PS_CALLINC_SHIFT,
7269 PS_CALLINC_LEN));
7270 for (i = 0; i < 16; ++i) {
7271 __get_user(env->regs[i], sc->sc_a + i);
7273 /* TODO: xtregs */
7276 long do_rt_sigreturn(CPUXtensaState *env)
7278 abi_ulong frame_addr = env->regs[1];
7279 struct target_rt_sigframe *frame;
7280 sigset_t set;
7282 trace_user_do_rt_sigreturn(env, frame_addr);
7283 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
7284 goto badframe;
7286 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
7287 set_sigmask(&set);
7289 restore_sigcontext(env, frame);
7291 if (do_sigaltstack(frame_addr +
7292 offsetof(struct target_rt_sigframe, uc.tuc_stack),
7293 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) {
7294 goto badframe;
7296 unlock_user_struct(frame, frame_addr, 0);
7297 return -TARGET_QEMU_ESIGRETURN;
7299 badframe:
7300 unlock_user_struct(frame, frame_addr, 0);
7301 force_sig(TARGET_SIGSEGV);
7302 return -TARGET_QEMU_ESIGRETURN;
7305 #else
7306 #error Target needs to add support for signal handling
7307 #endif
7309 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
7310 struct emulated_sigtable *k)
7312 CPUState *cpu = ENV_GET_CPU(cpu_env);
7313 abi_ulong handler;
7314 sigset_t set;
7315 target_sigset_t target_old_set;
7316 struct target_sigaction *sa;
7317 TaskState *ts = cpu->opaque;
7319 trace_user_handle_signal(cpu_env, sig);
7320 /* dequeue signal */
7321 k->pending = 0;
7323 sig = gdb_handlesig(cpu, sig);
7324 if (!sig) {
7325 sa = NULL;
7326 handler = TARGET_SIG_IGN;
7327 } else {
7328 sa = &sigact_table[sig - 1];
7329 handler = sa->_sa_handler;
7332 if (do_strace) {
7333 print_taken_signal(sig, &k->info);
7336 if (handler == TARGET_SIG_DFL) {
7337 /* default handler : ignore some signal. The other are job control or fatal */
7338 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
7339 kill(getpid(),SIGSTOP);
7340 } else if (sig != TARGET_SIGCHLD &&
7341 sig != TARGET_SIGURG &&
7342 sig != TARGET_SIGWINCH &&
7343 sig != TARGET_SIGCONT) {
7344 dump_core_and_abort(sig);
7346 } else if (handler == TARGET_SIG_IGN) {
7347 /* ignore sig */
7348 } else if (handler == TARGET_SIG_ERR) {
7349 dump_core_and_abort(sig);
7350 } else {
7351 /* compute the blocked signals during the handler execution */
7352 sigset_t *blocked_set;
7354 target_to_host_sigset(&set, &sa->sa_mask);
7355 /* SA_NODEFER indicates that the current signal should not be
7356 blocked during the handler */
7357 if (!(sa->sa_flags & TARGET_SA_NODEFER))
7358 sigaddset(&set, target_to_host_signal(sig));
7360 /* save the previous blocked signal state to restore it at the
7361 end of the signal execution (see do_sigreturn) */
7362 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
7364 /* block signals in the handler */
7365 blocked_set = ts->in_sigsuspend ?
7366 &ts->sigsuspend_mask : &ts->signal_mask;
7367 sigorset(&ts->signal_mask, blocked_set, &set);
7368 ts->in_sigsuspend = 0;
7370 /* if the CPU is in VM86 mode, we restore the 32 bit values */
7371 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
7373 CPUX86State *env = cpu_env;
7374 if (env->eflags & VM_MASK)
7375 save_v86_state(env);
7377 #endif
7378 /* prepare the stack frame of the virtual CPU */
7379 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
7380 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
7381 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
7382 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
7383 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
7384 /* These targets do not have traditional signals. */
7385 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7386 #else
7387 if (sa->sa_flags & TARGET_SA_SIGINFO)
7388 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
7389 else
7390 setup_frame(sig, sa, &target_old_set, cpu_env);
7391 #endif
7392 if (sa->sa_flags & TARGET_SA_RESETHAND) {
7393 sa->_sa_handler = TARGET_SIG_DFL;
7398 void process_pending_signals(CPUArchState *cpu_env)
7400 CPUState *cpu = ENV_GET_CPU(cpu_env);
7401 int sig;
7402 TaskState *ts = cpu->opaque;
7403 sigset_t set;
7404 sigset_t *blocked_set;
7406 while (atomic_read(&ts->signal_pending)) {
7407 /* FIXME: This is not threadsafe. */
7408 sigfillset(&set);
7409 sigprocmask(SIG_SETMASK, &set, 0);
7411 restart_scan:
7412 sig = ts->sync_signal.pending;
7413 if (sig) {
7414 /* Synchronous signals are forced,
7415 * see force_sig_info() and callers in Linux
7416 * Note that not all of our queue_signal() calls in QEMU correspond
7417 * to force_sig_info() calls in Linux (some are send_sig_info()).
7418 * However it seems like a kernel bug to me to allow the process
7419 * to block a synchronous signal since it could then just end up
7420 * looping round and round indefinitely.
7422 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
7423 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
7424 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
7425 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
7428 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
7431 for (sig = 1; sig <= TARGET_NSIG; sig++) {
7432 blocked_set = ts->in_sigsuspend ?
7433 &ts->sigsuspend_mask : &ts->signal_mask;
7435 if (ts->sigtab[sig - 1].pending &&
7436 (!sigismember(blocked_set,
7437 target_to_host_signal_table[sig]))) {
7438 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
7439 /* Restart scan from the beginning, as handle_pending_signal
7440 * might have resulted in a new synchronous signal (eg SIGSEGV).
7442 goto restart_scan;
7446 /* if no signal is pending, unblock signals and recheck (the act
7447 * of unblocking might cause us to take another host signal which
7448 * will set signal_pending again).
7450 atomic_set(&ts->signal_pending, 0);
7451 ts->in_sigsuspend = 0;
7452 set = ts->signal_mask;
7453 sigdelset(&set, SIGSEGV);
7454 sigdelset(&set, SIGBUS);
7455 sigprocmask(SIG_SETMASK, &set, 0);
7457 ts->in_sigsuspend = 0;